python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/errno.h> #include <crypto/aes.h> #include <crypto/internal/des.h> #include <crypto/internal/skcipher.h> #include "cipher.h" static unsigned int aes_sw_max_len = CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN; module_param(aes_sw_max_len, uint, 0644); MODULE_PARM_DESC(aes_sw_max_len, "Only use hardware for AES requests larger than this " "[0=always use hardware; anything <16 breaks AES-GCM; default=" __stringify(CONFIG_CRYPTO_DEV_QCE_SW_MAX_LEN)"]"); static LIST_HEAD(skcipher_algs); static void qce_skcipher_done(void *data) { struct crypto_async_request *async_req = data; struct skcipher_request *req = skcipher_request_cast(async_req); struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); struct qce_device *qce = tmpl->qce; struct qce_result_dump *result_buf = qce->dma.result_buf; enum dma_data_direction dir_src, dir_dst; u32 status; int error; bool diff_dst; diff_dst = (req->src != req->dst) ? true : false; dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; error = qce_dma_terminate_all(&qce->dma); if (error) dev_dbg(qce->dev, "skcipher dma termination error (%d)\n", error); if (diff_dst) dma_unmap_sg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src); dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); sg_free_table(&rctx->dst_tbl); error = qce_check_status(qce, &status); if (error < 0) dev_dbg(qce->dev, "skcipher operation error (%x)\n", status); memcpy(rctx->iv, result_buf->encr_cntr_iv, rctx->ivsize); qce->async_req_done(tmpl->qce, error); } static int qce_skcipher_async_req_handle(struct crypto_async_request *async_req) { struct skcipher_request *req = skcipher_request_cast(async_req); struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct qce_alg_template *tmpl = to_cipher_tmpl(crypto_skcipher_reqtfm(req)); struct qce_device *qce = tmpl->qce; enum dma_data_direction dir_src, dir_dst; struct scatterlist *sg; bool diff_dst; gfp_t gfp; int dst_nents, src_nents, ret; rctx->iv = req->iv; rctx->ivsize = crypto_skcipher_ivsize(skcipher); rctx->cryptlen = req->cryptlen; diff_dst = (req->src != req->dst) ? true : false; dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL; dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL; rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen); if (diff_dst) rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen); else rctx->dst_nents = rctx->src_nents; if (rctx->src_nents < 0) { dev_err(qce->dev, "Invalid numbers of src SG.\n"); return rctx->src_nents; } if (rctx->dst_nents < 0) { dev_err(qce->dev, "Invalid numbers of dst SG.\n"); return -rctx->dst_nents; } rctx->dst_nents += 1; gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp); if (ret) return ret; sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); sg = qce_sgtable_add(&rctx->dst_tbl, req->dst, req->cryptlen); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto error_free; } sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg, QCE_RESULT_BUF_SZ); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto error_free; } sg_mark_end(sg); rctx->dst_sg = rctx->dst_tbl.sgl; dst_nents = dma_map_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); if (!dst_nents) { ret = -EIO; goto error_free; } if (diff_dst) { src_nents = dma_map_sg(qce->dev, req->src, rctx->src_nents, dir_src); if (!src_nents) { ret = -EIO; goto error_unmap_dst; } rctx->src_sg = req->src; } else { rctx->src_sg = rctx->dst_sg; src_nents = dst_nents - 1; } ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, src_nents, rctx->dst_sg, dst_nents, qce_skcipher_done, async_req); if (ret) goto error_unmap_src; qce_dma_issue_pending(&qce->dma); ret = qce_start(async_req, tmpl->crypto_alg_type); if (ret) goto error_terminate; return 0; error_terminate: qce_dma_terminate_all(&qce->dma); error_unmap_src: if (diff_dst) dma_unmap_sg(qce->dev, req->src, rctx->src_nents, dir_src); error_unmap_dst: dma_unmap_sg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst); error_free: sg_free_table(&rctx->dst_tbl); return ret; } static int qce_skcipher_setkey(struct crypto_skcipher *ablk, const u8 *key, unsigned int keylen) { struct crypto_tfm *tfm = crypto_skcipher_tfm(ablk); struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm); unsigned long flags = to_cipher_tmpl(ablk)->alg_flags; unsigned int __keylen; int ret; if (!key || !keylen) return -EINVAL; /* * AES XTS key1 = key2 not supported by crypto engine. * Revisit to request a fallback cipher in this case. */ if (IS_XTS(flags)) { __keylen = keylen >> 1; if (!memcmp(key, key + __keylen, __keylen)) return -ENOKEY; } else { __keylen = keylen; } switch (__keylen) { case AES_KEYSIZE_128: case AES_KEYSIZE_256: memcpy(ctx->enc_key, key, keylen); break; case AES_KEYSIZE_192: break; default: return -EINVAL; } ret = crypto_skcipher_setkey(ctx->fallback, key, keylen); if (!ret) ctx->enc_keylen = keylen; return ret; } static int qce_des_setkey(struct crypto_skcipher *ablk, const u8 *key, unsigned int keylen) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); int err; err = verify_skcipher_des_key(ablk, key); if (err) return err; ctx->enc_keylen = keylen; memcpy(ctx->enc_key, key, keylen); return 0; } static int qce_des3_setkey(struct crypto_skcipher *ablk, const u8 *key, unsigned int keylen) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(ablk); u32 _key[6]; int err; err = verify_skcipher_des3_key(ablk, key); if (err) return err; /* * The crypto engine does not support any two keys * being the same for triple des algorithms. The * verify_skcipher_des3_key does not check for all the * below conditions. Return -ENOKEY in case any two keys * are the same. Revisit to see if a fallback cipher * is needed to handle this condition. */ memcpy(_key, key, DES3_EDE_KEY_SIZE); if (!((_key[0] ^ _key[2]) | (_key[1] ^ _key[3])) || !((_key[2] ^ _key[4]) | (_key[3] ^ _key[5])) || !((_key[0] ^ _key[4]) | (_key[1] ^ _key[5]))) return -ENOKEY; ctx->enc_keylen = keylen; memcpy(ctx->enc_key, key, keylen); return 0; } static int qce_skcipher_crypt(struct skcipher_request *req, int encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct qce_cipher_reqctx *rctx = skcipher_request_ctx(req); struct qce_alg_template *tmpl = to_cipher_tmpl(tfm); unsigned int blocksize = crypto_skcipher_blocksize(tfm); int keylen; int ret; rctx->flags = tmpl->alg_flags; rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT; keylen = IS_XTS(rctx->flags) ? ctx->enc_keylen >> 1 : ctx->enc_keylen; /* CE does not handle 0 length messages */ if (!req->cryptlen) return 0; /* * ECB and CBC algorithms require message lengths to be * multiples of block size. */ if (IS_ECB(rctx->flags) || IS_CBC(rctx->flags)) if (!IS_ALIGNED(req->cryptlen, blocksize)) return -EINVAL; /* * Conditions for requesting a fallback cipher * AES-192 (not supported by crypto engine (CE)) * AES-XTS request with len <= 512 byte (not recommended to use CE) * AES-XTS request with len > QCE_SECTOR_SIZE and * is not a multiple of it.(Revisit this condition to check if it is * needed in all versions of CE) */ if (IS_AES(rctx->flags) && ((keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_256) || (IS_XTS(rctx->flags) && ((req->cryptlen <= aes_sw_max_len) || (req->cryptlen > QCE_SECTOR_SIZE && req->cryptlen % QCE_SECTOR_SIZE))))) { skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, req->base.complete, req->base.data); skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, req->cryptlen, req->iv); ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); } static int qce_skcipher_encrypt(struct skcipher_request *req) { return qce_skcipher_crypt(req, 1); } static int qce_skcipher_decrypt(struct skcipher_request *req) { return qce_skcipher_crypt(req, 0); } static int qce_skcipher_init(struct crypto_skcipher *tfm) { /* take the size without the fallback skcipher_request at the end */ crypto_skcipher_set_reqsize(tfm, offsetof(struct qce_cipher_reqctx, fallback_req)); return 0; } static int qce_skcipher_init_fallback(struct crypto_skcipher *tfm) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); ctx->fallback = crypto_alloc_skcipher(crypto_tfm_alg_name(&tfm->base), 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback)) return PTR_ERR(ctx->fallback); crypto_skcipher_set_reqsize(tfm, sizeof(struct qce_cipher_reqctx) + crypto_skcipher_reqsize(ctx->fallback)); return 0; } static void qce_skcipher_exit(struct crypto_skcipher *tfm) { struct qce_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_skcipher(ctx->fallback); } struct qce_skcipher_def { unsigned long flags; const char *name; const char *drv_name; unsigned int blocksize; unsigned int chunksize; unsigned int ivsize; unsigned int min_keysize; unsigned int max_keysize; }; static const struct qce_skcipher_def skcipher_def[] = { { .flags = QCE_ALG_AES | QCE_MODE_ECB, .name = "ecb(aes)", .drv_name = "ecb-aes-qce", .blocksize = AES_BLOCK_SIZE, .ivsize = 0, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, }, { .flags = QCE_ALG_AES | QCE_MODE_CBC, .name = "cbc(aes)", .drv_name = "cbc-aes-qce", .blocksize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, }, { .flags = QCE_ALG_AES | QCE_MODE_CTR, .name = "ctr(aes)", .drv_name = "ctr-aes-qce", .blocksize = 1, .chunksize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, }, { .flags = QCE_ALG_AES | QCE_MODE_XTS, .name = "xts(aes)", .drv_name = "xts-aes-qce", .blocksize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE * 2, .max_keysize = AES_MAX_KEY_SIZE * 2, }, { .flags = QCE_ALG_DES | QCE_MODE_ECB, .name = "ecb(des)", .drv_name = "ecb-des-qce", .blocksize = DES_BLOCK_SIZE, .ivsize = 0, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, }, { .flags = QCE_ALG_DES | QCE_MODE_CBC, .name = "cbc(des)", .drv_name = "cbc-des-qce", .blocksize = DES_BLOCK_SIZE, .ivsize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, }, { .flags = QCE_ALG_3DES | QCE_MODE_ECB, .name = "ecb(des3_ede)", .drv_name = "ecb-3des-qce", .blocksize = DES3_EDE_BLOCK_SIZE, .ivsize = 0, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, }, { .flags = QCE_ALG_3DES | QCE_MODE_CBC, .name = "cbc(des3_ede)", .drv_name = "cbc-3des-qce", .blocksize = DES3_EDE_BLOCK_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, }, }; static int qce_skcipher_register_one(const struct qce_skcipher_def *def, struct qce_device *qce) { struct qce_alg_template *tmpl; struct skcipher_alg *alg; int ret; tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); if (!tmpl) return -ENOMEM; alg = &tmpl->alg.skcipher; snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->drv_name); alg->base.cra_blocksize = def->blocksize; alg->chunksize = def->chunksize; alg->ivsize = def->ivsize; alg->min_keysize = def->min_keysize; alg->max_keysize = def->max_keysize; alg->setkey = IS_3DES(def->flags) ? qce_des3_setkey : IS_DES(def->flags) ? qce_des_setkey : qce_skcipher_setkey; alg->encrypt = qce_skcipher_encrypt; alg->decrypt = qce_skcipher_decrypt; alg->base.cra_priority = 300; alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->base.cra_ctxsize = sizeof(struct qce_cipher_ctx); alg->base.cra_alignmask = 0; alg->base.cra_module = THIS_MODULE; if (IS_AES(def->flags)) { alg->base.cra_flags |= CRYPTO_ALG_NEED_FALLBACK; alg->init = qce_skcipher_init_fallback; alg->exit = qce_skcipher_exit; } else { alg->init = qce_skcipher_init; } INIT_LIST_HEAD(&tmpl->entry); tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_SKCIPHER; tmpl->alg_flags = def->flags; tmpl->qce = qce; ret = crypto_register_skcipher(alg); if (ret) { dev_err(qce->dev, "%s registration failed\n", alg->base.cra_name); kfree(tmpl); return ret; } list_add_tail(&tmpl->entry, &skcipher_algs); dev_dbg(qce->dev, "%s is registered\n", alg->base.cra_name); return 0; } static void qce_skcipher_unregister(struct qce_device *qce) { struct qce_alg_template *tmpl, *n; list_for_each_entry_safe(tmpl, n, &skcipher_algs, entry) { crypto_unregister_skcipher(&tmpl->alg.skcipher); list_del(&tmpl->entry); kfree(tmpl); } } static int qce_skcipher_register(struct qce_device *qce) { int ret, i; for (i = 0; i < ARRAY_SIZE(skcipher_def); i++) { ret = qce_skcipher_register_one(&skcipher_def[i], qce); if (ret) goto err; } return 0; err: qce_skcipher_unregister(qce); return ret; } const struct qce_algo_ops skcipher_ops = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .register_algs = qce_skcipher_register, .unregister_algs = qce_skcipher_unregister, .async_req_handle = qce_skcipher_async_req_handle, };
linux-master
drivers/crypto/qce/skcipher.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. */ #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <crypto/internal/hash.h> #include "common.h" #include "core.h" #include "sha.h" struct qce_sha_saved_state { u8 pending_buf[QCE_SHA_MAX_BLOCKSIZE]; u8 partial_digest[QCE_SHA_MAX_DIGESTSIZE]; __be32 byte_count[2]; unsigned int pending_buflen; unsigned int flags; u64 count; bool first_blk; }; static LIST_HEAD(ahash_algs); static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0 }; static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = { SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 }; static void qce_ahash_done(void *data) { struct crypto_async_request *async_req = data; struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; struct qce_result_dump *result = qce->dma.result_buf; unsigned int digestsize = crypto_ahash_digestsize(ahash); int error; u32 status; error = qce_dma_terminate_all(&qce->dma); if (error) dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error); dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); memcpy(rctx->digest, result->auth_iv, digestsize); if (req->result && rctx->last_blk) memcpy(req->result, result->auth_iv, digestsize); rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]); rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]); error = qce_check_status(qce, &status); if (error < 0) dev_dbg(qce->dev, "ahash operation error (%x)\n", status); req->src = rctx->src_orig; req->nbytes = rctx->nbytes_orig; rctx->last_blk = false; rctx->first_blk = false; qce->async_req_done(tmpl->qce, error); } static int qce_ahash_async_req_handle(struct crypto_async_request *async_req) { struct ahash_request *req = ahash_request_cast(async_req); struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm); struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm); struct qce_device *qce = tmpl->qce; unsigned long flags = rctx->flags; int ret; if (IS_SHA_HMAC(flags)) { rctx->authkey = ctx->authkey; rctx->authklen = QCE_SHA_HMAC_KEY_SIZE; } else if (IS_CMAC(flags)) { rctx->authkey = ctx->authkey; rctx->authklen = AES_KEYSIZE_128; } rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); if (rctx->src_nents < 0) { dev_err(qce->dev, "Invalid numbers of src SG.\n"); return rctx->src_nents; } ret = dma_map_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); if (!ret) return -EIO; sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ); ret = dma_map_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); if (!ret) { ret = -EIO; goto error_unmap_src; } ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, &rctx->result_sg, 1, qce_ahash_done, async_req); if (ret) goto error_unmap_dst; qce_dma_issue_pending(&qce->dma); ret = qce_start(async_req, tmpl->crypto_alg_type); if (ret) goto error_terminate; return 0; error_terminate: qce_dma_terminate_all(&qce->dma); error_unmap_dst: dma_unmap_sg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE); error_unmap_src: dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); return ret; } static int qce_ahash_init(struct ahash_request *req) { struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); const u32 *std_iv = tmpl->std_iv; memset(rctx, 0, sizeof(*rctx)); rctx->first_blk = true; rctx->last_blk = false; rctx->flags = tmpl->alg_flags; memcpy(rctx->digest, std_iv, sizeof(rctx->digest)); return 0; } static int qce_ahash_export(struct ahash_request *req, void *out) { struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_sha_saved_state *export_state = out; memcpy(export_state->pending_buf, rctx->buf, rctx->buflen); memcpy(export_state->partial_digest, rctx->digest, sizeof(rctx->digest)); export_state->byte_count[0] = rctx->byte_count[0]; export_state->byte_count[1] = rctx->byte_count[1]; export_state->pending_buflen = rctx->buflen; export_state->count = rctx->count; export_state->first_blk = rctx->first_blk; export_state->flags = rctx->flags; return 0; } static int qce_ahash_import(struct ahash_request *req, const void *in) { struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); const struct qce_sha_saved_state *import_state = in; memset(rctx, 0, sizeof(*rctx)); rctx->count = import_state->count; rctx->buflen = import_state->pending_buflen; rctx->first_blk = import_state->first_blk; rctx->flags = import_state->flags; rctx->byte_count[0] = import_state->byte_count[0]; rctx->byte_count[1] = import_state->byte_count[1]; memcpy(rctx->buf, import_state->pending_buf, rctx->buflen); memcpy(rctx->digest, import_state->partial_digest, sizeof(rctx->digest)); return 0; } static int qce_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; struct scatterlist *sg_last, *sg; unsigned int total, len; unsigned int hash_later; unsigned int nbytes; unsigned int blocksize; blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); rctx->count += req->nbytes; /* check for buffer from previous updates and append it */ total = req->nbytes + rctx->buflen; if (total <= blocksize) { scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src, 0, req->nbytes, 0); rctx->buflen += req->nbytes; return 0; } /* save the original req structure fields */ rctx->src_orig = req->src; rctx->nbytes_orig = req->nbytes; /* * if we have data from previous update copy them on buffer. The old * data will be combined with current request bytes. */ if (rctx->buflen) memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); /* calculate how many bytes will be hashed later */ hash_later = total % blocksize; /* * At this point, there is more than one block size of data. If * the available data to transfer is exactly a multiple of block * size, save the last block to be transferred in qce_ahash_final * (with the last block bit set) if this is indeed the end of data * stream. If not this saved block will be transferred as part of * next update. If this block is not held back and if this is * indeed the end of data stream, the digest obtained will be wrong * since qce_ahash_final will see that rctx->buflen is 0 and return * doing nothing which in turn means that a digest will not be * copied to the destination result buffer. qce_ahash_final cannot * be made to alter this behavior and allowed to proceed if * rctx->buflen is 0 because the crypto engine BAM does not allow * for zero length transfers. */ if (!hash_later) hash_later = blocksize; if (hash_later) { unsigned int src_offset = req->nbytes - hash_later; scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, hash_later, 0); } /* here nbytes is multiple of blocksize */ nbytes = total - hash_later; len = rctx->buflen; sg = sg_last = req->src; while (len < nbytes && sg) { if (len + sg_dma_len(sg) > nbytes) break; len += sg_dma_len(sg); sg_last = sg; sg = sg_next(sg); } if (!sg_last) return -EINVAL; if (rctx->buflen) { sg_init_table(rctx->sg, 2); sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen); sg_chain(rctx->sg, 2, req->src); req->src = rctx->sg; } req->nbytes = nbytes; rctx->buflen = hash_later; return qce->async_req_enqueue(tmpl->qce, &req->base); } static int qce_ahash_final(struct ahash_request *req) { struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; if (!rctx->buflen) { if (tmpl->hash_zero) memcpy(req->result, tmpl->hash_zero, tmpl->alg.ahash.halg.digestsize); return 0; } rctx->last_blk = true; rctx->src_orig = req->src; rctx->nbytes_orig = req->nbytes; memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen); sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen); req->src = rctx->sg; req->nbytes = rctx->buflen; return qce->async_req_enqueue(tmpl->qce, &req->base); } static int qce_ahash_digest(struct ahash_request *req) { struct qce_sha_reqctx *rctx = ahash_request_ctx_dma(req); struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); struct qce_device *qce = tmpl->qce; int ret; ret = qce_ahash_init(req); if (ret) return ret; rctx->src_orig = req->src; rctx->nbytes_orig = req->nbytes; rctx->first_blk = true; rctx->last_blk = true; if (!rctx->nbytes_orig) { if (tmpl->hash_zero) memcpy(req->result, tmpl->hash_zero, tmpl->alg.ahash.halg.digestsize); return 0; } return qce->async_req_enqueue(tmpl->qce, &req->base); } static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { unsigned int digestsize = crypto_ahash_digestsize(tfm); struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base); struct crypto_wait wait; struct ahash_request *req; struct scatterlist sg; unsigned int blocksize; struct crypto_ahash *ahash_tfm; u8 *buf; int ret; const char *alg_name; blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); memset(ctx->authkey, 0, sizeof(ctx->authkey)); if (keylen <= blocksize) { memcpy(ctx->authkey, key, keylen); return 0; } if (digestsize == SHA1_DIGEST_SIZE) alg_name = "sha1-qce"; else if (digestsize == SHA256_DIGEST_SIZE) alg_name = "sha256-qce"; else return -EINVAL; ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0); if (IS_ERR(ahash_tfm)) return PTR_ERR(ahash_tfm); req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); if (!req) { ret = -ENOMEM; goto err_free_ahash; } crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &wait); crypto_ahash_clear_flags(ahash_tfm, ~0); buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_free_req; } memcpy(buf, key, keylen); sg_init_one(&sg, buf, keylen); ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); ret = crypto_wait_req(crypto_ahash_digest(req), &wait); kfree(buf); err_free_req: ahash_request_free(req); err_free_ahash: crypto_free_ahash(ahash_tfm); return ret; } static int qce_ahash_cra_init(struct crypto_tfm *tfm) { struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm); crypto_ahash_set_reqsize_dma(ahash, sizeof(struct qce_sha_reqctx)); memset(ctx, 0, sizeof(*ctx)); return 0; } struct qce_ahash_def { unsigned long flags; const char *name; const char *drv_name; unsigned int digestsize; unsigned int blocksize; unsigned int statesize; const u32 *std_iv; }; static const struct qce_ahash_def ahash_def[] = { { .flags = QCE_HASH_SHA1, .name = "sha1", .drv_name = "sha1-qce", .digestsize = SHA1_DIGEST_SIZE, .blocksize = SHA1_BLOCK_SIZE, .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha1, }, { .flags = QCE_HASH_SHA256, .name = "sha256", .drv_name = "sha256-qce", .digestsize = SHA256_DIGEST_SIZE, .blocksize = SHA256_BLOCK_SIZE, .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha256, }, { .flags = QCE_HASH_SHA1_HMAC, .name = "hmac(sha1)", .drv_name = "hmac-sha1-qce", .digestsize = SHA1_DIGEST_SIZE, .blocksize = SHA1_BLOCK_SIZE, .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha1, }, { .flags = QCE_HASH_SHA256_HMAC, .name = "hmac(sha256)", .drv_name = "hmac-sha256-qce", .digestsize = SHA256_DIGEST_SIZE, .blocksize = SHA256_BLOCK_SIZE, .statesize = sizeof(struct qce_sha_saved_state), .std_iv = std_iv_sha256, }, }; static int qce_ahash_register_one(const struct qce_ahash_def *def, struct qce_device *qce) { struct qce_alg_template *tmpl; struct ahash_alg *alg; struct crypto_alg *base; int ret; tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL); if (!tmpl) return -ENOMEM; tmpl->std_iv = def->std_iv; alg = &tmpl->alg.ahash; alg->init = qce_ahash_init; alg->update = qce_ahash_update; alg->final = qce_ahash_final; alg->digest = qce_ahash_digest; alg->export = qce_ahash_export; alg->import = qce_ahash_import; if (IS_SHA_HMAC(def->flags)) alg->setkey = qce_ahash_hmac_setkey; alg->halg.digestsize = def->digestsize; alg->halg.statesize = def->statesize; if (IS_SHA1(def->flags)) tmpl->hash_zero = sha1_zero_message_hash; else if (IS_SHA256(def->flags)) tmpl->hash_zero = sha256_zero_message_hash; base = &alg->halg.base; base->cra_blocksize = def->blocksize; base->cra_priority = 300; base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; base->cra_ctxsize = sizeof(struct qce_sha_ctx); base->cra_alignmask = 0; base->cra_module = THIS_MODULE; base->cra_init = qce_ahash_cra_init; snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->drv_name); INIT_LIST_HEAD(&tmpl->entry); tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH; tmpl->alg_flags = def->flags; tmpl->qce = qce; ret = crypto_register_ahash(alg); if (ret) { dev_err(qce->dev, "%s registration failed\n", base->cra_name); kfree(tmpl); return ret; } list_add_tail(&tmpl->entry, &ahash_algs); dev_dbg(qce->dev, "%s is registered\n", base->cra_name); return 0; } static void qce_ahash_unregister(struct qce_device *qce) { struct qce_alg_template *tmpl, *n; list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) { crypto_unregister_ahash(&tmpl->alg.ahash); list_del(&tmpl->entry); kfree(tmpl); } } static int qce_ahash_register(struct qce_device *qce) { int ret, i; for (i = 0; i < ARRAY_SIZE(ahash_def); i++) { ret = qce_ahash_register_one(&ahash_def[i], qce); if (ret) goto err; } return 0; err: qce_ahash_unregister(qce); return ret; } const struct qce_algo_ops ahash_ops = { .type = CRYPTO_ALG_TYPE_AHASH, .register_algs = qce_ahash_register, .unregister_algs = qce_ahash_unregister, .async_req_handle = qce_ahash_async_req_handle, };
linux-master
drivers/crypto/qce/sha.c
// SPDX-License-Identifier: GPL-2.0-only /* * Crypto acceleration support for Rockchip RK3288 * * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd * * Author: Zain Wang <[email protected]> * * Some ideas are from marvell-cesa.c and s5p-sss.c driver. */ #include "rk3288_crypto.h" #include <crypto/engine.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/reset.h> #include <linux/spinlock.h> static struct rockchip_ip rocklist = { .dev_list = LIST_HEAD_INIT(rocklist.dev_list), .lock = __SPIN_LOCK_UNLOCKED(rocklist.lock), }; struct rk_crypto_info *get_rk_crypto(void) { struct rk_crypto_info *first; spin_lock(&rocklist.lock); first = list_first_entry_or_null(&rocklist.dev_list, struct rk_crypto_info, list); list_rotate_left(&rocklist.dev_list); spin_unlock(&rocklist.lock); return first; } static const struct rk_variant rk3288_variant = { .num_clks = 4, .rkclks = { { "sclk", 150000000}, } }; static const struct rk_variant rk3328_variant = { .num_clks = 3, }; static const struct rk_variant rk3399_variant = { .num_clks = 3, }; static int rk_crypto_get_clks(struct rk_crypto_info *dev) { int i, j, err; unsigned long cr; dev->num_clks = devm_clk_bulk_get_all(dev->dev, &dev->clks); if (dev->num_clks < dev->variant->num_clks) { dev_err(dev->dev, "Missing clocks, got %d instead of %d\n", dev->num_clks, dev->variant->num_clks); return -EINVAL; } for (i = 0; i < dev->num_clks; i++) { cr = clk_get_rate(dev->clks[i].clk); for (j = 0; j < ARRAY_SIZE(dev->variant->rkclks); j++) { if (dev->variant->rkclks[j].max == 0) continue; if (strcmp(dev->variant->rkclks[j].name, dev->clks[i].id)) continue; if (cr > dev->variant->rkclks[j].max) { err = clk_set_rate(dev->clks[i].clk, dev->variant->rkclks[j].max); if (err) dev_err(dev->dev, "Fail downclocking %s from %lu to %lu\n", dev->variant->rkclks[j].name, cr, dev->variant->rkclks[j].max); else dev_info(dev->dev, "Downclocking %s from %lu to %lu\n", dev->variant->rkclks[j].name, cr, dev->variant->rkclks[j].max); } } } return 0; } static int rk_crypto_enable_clk(struct rk_crypto_info *dev) { int err; err = clk_bulk_prepare_enable(dev->num_clks, dev->clks); if (err) dev_err(dev->dev, "Could not enable clock clks\n"); return err; } static void rk_crypto_disable_clk(struct rk_crypto_info *dev) { clk_bulk_disable_unprepare(dev->num_clks, dev->clks); } /* * Power management strategy: The device is suspended until a request * is handled. For avoiding suspend/resume yoyo, the autosuspend is set to 2s. */ static int rk_crypto_pm_suspend(struct device *dev) { struct rk_crypto_info *rkdev = dev_get_drvdata(dev); rk_crypto_disable_clk(rkdev); reset_control_assert(rkdev->rst); return 0; } static int rk_crypto_pm_resume(struct device *dev) { struct rk_crypto_info *rkdev = dev_get_drvdata(dev); int ret; ret = rk_crypto_enable_clk(rkdev); if (ret) return ret; reset_control_deassert(rkdev->rst); return 0; } static const struct dev_pm_ops rk_crypto_pm_ops = { SET_RUNTIME_PM_OPS(rk_crypto_pm_suspend, rk_crypto_pm_resume, NULL) }; static int rk_crypto_pm_init(struct rk_crypto_info *rkdev) { int err; pm_runtime_use_autosuspend(rkdev->dev); pm_runtime_set_autosuspend_delay(rkdev->dev, 2000); err = pm_runtime_set_suspended(rkdev->dev); if (err) return err; pm_runtime_enable(rkdev->dev); return err; } static void rk_crypto_pm_exit(struct rk_crypto_info *rkdev) { pm_runtime_disable(rkdev->dev); } static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) { struct rk_crypto_info *dev = platform_get_drvdata(dev_id); u32 interrupt_status; interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); dev->status = 1; if (interrupt_status & 0x0a) { dev_warn(dev->dev, "DMA Error\n"); dev->status = 0; } complete(&dev->complete); return IRQ_HANDLED; } static struct rk_crypto_tmp *rk_cipher_algs[] = { &rk_ecb_aes_alg, &rk_cbc_aes_alg, &rk_ecb_des_alg, &rk_cbc_des_alg, &rk_ecb_des3_ede_alg, &rk_cbc_des3_ede_alg, &rk_ahash_sha1, &rk_ahash_sha256, &rk_ahash_md5, }; static int rk_crypto_debugfs_show(struct seq_file *seq, void *v) { struct rk_crypto_info *dd; unsigned int i; spin_lock(&rocklist.lock); list_for_each_entry(dd, &rocklist.dev_list, list) { seq_printf(seq, "%s %s requests: %lu\n", dev_driver_string(dd->dev), dev_name(dd->dev), dd->nreq); } spin_unlock(&rocklist.lock); for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { if (!rk_cipher_algs[i]->dev) continue; switch (rk_cipher_algs[i]->type) { case CRYPTO_ALG_TYPE_SKCIPHER: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", rk_cipher_algs[i]->alg.skcipher.base.base.cra_driver_name, rk_cipher_algs[i]->alg.skcipher.base.base.cra_name, rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb); seq_printf(seq, "\tfallback due to length: %lu\n", rk_cipher_algs[i]->stat_fb_len); seq_printf(seq, "\tfallback due to alignment: %lu\n", rk_cipher_algs[i]->stat_fb_align); seq_printf(seq, "\tfallback due to SGs: %lu\n", rk_cipher_algs[i]->stat_fb_sgdiff); break; case CRYPTO_ALG_TYPE_AHASH: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", rk_cipher_algs[i]->alg.hash.base.halg.base.cra_driver_name, rk_cipher_algs[i]->alg.hash.base.halg.base.cra_name, rk_cipher_algs[i]->stat_req, rk_cipher_algs[i]->stat_fb); break; } } return 0; } DEFINE_SHOW_ATTRIBUTE(rk_crypto_debugfs); static void register_debugfs(struct rk_crypto_info *crypto_info) { struct dentry *dbgfs_dir __maybe_unused; struct dentry *dbgfs_stats __maybe_unused; /* Ignore error of debugfs */ dbgfs_dir = debugfs_create_dir("rk3288_crypto", NULL); dbgfs_stats = debugfs_create_file("stats", 0444, dbgfs_dir, &rocklist, &rk_crypto_debugfs_fops); #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG rocklist.dbgfs_dir = dbgfs_dir; rocklist.dbgfs_stats = dbgfs_stats; #endif } static int rk_crypto_register(struct rk_crypto_info *crypto_info) { unsigned int i, k; int err = 0; for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { rk_cipher_algs[i]->dev = crypto_info; switch (rk_cipher_algs[i]->type) { case CRYPTO_ALG_TYPE_SKCIPHER: dev_info(crypto_info->dev, "Register %s as %s\n", rk_cipher_algs[i]->alg.skcipher.base.base.cra_name, rk_cipher_algs[i]->alg.skcipher.base.base.cra_driver_name); err = crypto_engine_register_skcipher(&rk_cipher_algs[i]->alg.skcipher); break; case CRYPTO_ALG_TYPE_AHASH: dev_info(crypto_info->dev, "Register %s as %s\n", rk_cipher_algs[i]->alg.hash.base.halg.base.cra_name, rk_cipher_algs[i]->alg.hash.base.halg.base.cra_driver_name); err = crypto_engine_register_ahash(&rk_cipher_algs[i]->alg.hash); break; default: dev_err(crypto_info->dev, "unknown algorithm\n"); } if (err) goto err_cipher_algs; } return 0; err_cipher_algs: for (k = 0; k < i; k++) { if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER) crypto_engine_unregister_skcipher(&rk_cipher_algs[k]->alg.skcipher); else crypto_engine_unregister_ahash(&rk_cipher_algs[i]->alg.hash); } return err; } static void rk_crypto_unregister(void) { unsigned int i; for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { if (rk_cipher_algs[i]->type == CRYPTO_ALG_TYPE_SKCIPHER) crypto_engine_unregister_skcipher(&rk_cipher_algs[i]->alg.skcipher); else crypto_engine_unregister_ahash(&rk_cipher_algs[i]->alg.hash); } } static const struct of_device_id crypto_of_id_table[] = { { .compatible = "rockchip,rk3288-crypto", .data = &rk3288_variant, }, { .compatible = "rockchip,rk3328-crypto", .data = &rk3328_variant, }, { .compatible = "rockchip,rk3399-crypto", .data = &rk3399_variant, }, {} }; MODULE_DEVICE_TABLE(of, crypto_of_id_table); static int rk_crypto_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rk_crypto_info *crypto_info, *first; int err = 0; crypto_info = devm_kzalloc(&pdev->dev, sizeof(*crypto_info), GFP_KERNEL); if (!crypto_info) { err = -ENOMEM; goto err_crypto; } crypto_info->dev = &pdev->dev; platform_set_drvdata(pdev, crypto_info); crypto_info->variant = of_device_get_match_data(&pdev->dev); if (!crypto_info->variant) { dev_err(&pdev->dev, "Missing variant\n"); return -EINVAL; } crypto_info->rst = devm_reset_control_array_get_exclusive(dev); if (IS_ERR(crypto_info->rst)) { err = PTR_ERR(crypto_info->rst); goto err_crypto; } reset_control_assert(crypto_info->rst); usleep_range(10, 20); reset_control_deassert(crypto_info->rst); crypto_info->reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(crypto_info->reg)) { err = PTR_ERR(crypto_info->reg); goto err_crypto; } err = rk_crypto_get_clks(crypto_info); if (err) goto err_crypto; crypto_info->irq = platform_get_irq(pdev, 0); if (crypto_info->irq < 0) { err = crypto_info->irq; goto err_crypto; } err = devm_request_irq(&pdev->dev, crypto_info->irq, rk_crypto_irq_handle, IRQF_SHARED, "rk-crypto", pdev); if (err) { dev_err(&pdev->dev, "irq request failed.\n"); goto err_crypto; } crypto_info->engine = crypto_engine_alloc_init(&pdev->dev, true); crypto_engine_start(crypto_info->engine); init_completion(&crypto_info->complete); err = rk_crypto_pm_init(crypto_info); if (err) goto err_pm; spin_lock(&rocklist.lock); first = list_first_entry_or_null(&rocklist.dev_list, struct rk_crypto_info, list); list_add_tail(&crypto_info->list, &rocklist.dev_list); spin_unlock(&rocklist.lock); if (!first) { err = rk_crypto_register(crypto_info); if (err) { dev_err(dev, "Fail to register crypto algorithms"); goto err_register_alg; } register_debugfs(crypto_info); } return 0; err_register_alg: rk_crypto_pm_exit(crypto_info); err_pm: crypto_engine_exit(crypto_info->engine); err_crypto: dev_err(dev, "Crypto Accelerator not successfully registered\n"); return err; } static int rk_crypto_remove(struct platform_device *pdev) { struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); struct rk_crypto_info *first; spin_lock_bh(&rocklist.lock); list_del(&crypto_tmp->list); first = list_first_entry_or_null(&rocklist.dev_list, struct rk_crypto_info, list); spin_unlock_bh(&rocklist.lock); if (!first) { #ifdef CONFIG_CRYPTO_DEV_ROCKCHIP_DEBUG debugfs_remove_recursive(rocklist.dbgfs_dir); #endif rk_crypto_unregister(); } rk_crypto_pm_exit(crypto_tmp); crypto_engine_exit(crypto_tmp->engine); return 0; } static struct platform_driver crypto_driver = { .probe = rk_crypto_probe, .remove = rk_crypto_remove, .driver = { .name = "rk3288-crypto", .pm = &rk_crypto_pm_ops, .of_match_table = crypto_of_id_table, }, }; module_platform_driver(crypto_driver); MODULE_AUTHOR("Zain Wang <[email protected]>"); MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine"); MODULE_LICENSE("GPL");
linux-master
drivers/crypto/rockchip/rk3288_crypto.c
// SPDX-License-Identifier: GPL-2.0-only /* * Crypto acceleration support for Rockchip RK3288 * * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd * * Author: Zain Wang <[email protected]> * * Some ideas are from marvell/cesa.c and s5p-sss.c driver. */ #include <asm/unaligned.h> #include <crypto/internal/hash.h> #include <linux/device.h> #include <linux/err.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include "rk3288_crypto.h" /* * IC can not process zero message hash, * so we put the fixed hash out when met zero message. */ static bool rk_ahash_need_fallback(struct ahash_request *req) { struct scatterlist *sg; sg = req->src; while (sg) { if (!IS_ALIGNED(sg->offset, sizeof(u32))) { return true; } if (sg->length % 4) { return true; } sg = sg_next(sg); } return false; } static int rk_ahash_digest_fb(struct ahash_request *areq) { struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct rk_ahash_ctx *tfmctx = crypto_ahash_ctx(tfm); struct ahash_alg *alg = crypto_ahash_alg(tfm); struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base); algt->stat_fb++; ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.nbytes = areq->nbytes; rctx->fallback_req.src = areq->src; rctx->fallback_req.result = areq->result; return crypto_ahash_digest(&rctx->fallback_req); } static int zero_message_process(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int rk_digest_size = crypto_ahash_digestsize(tfm); switch (rk_digest_size) { case SHA1_DIGEST_SIZE: memcpy(req->result, sha1_zero_message_hash, rk_digest_size); break; case SHA256_DIGEST_SIZE: memcpy(req->result, sha256_zero_message_hash, rk_digest_size); break; case MD5_DIGEST_SIZE: memcpy(req->result, md5_zero_message_hash, rk_digest_size); break; default: return -EINVAL; } return 0; } static void rk_ahash_reg_init(struct ahash_request *req, struct rk_crypto_info *dev) { struct rk_ahash_rctx *rctx = ahash_request_ctx(req); int reg_status; reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL) | RK_CRYPTO_HASH_FLUSH | _SBF(0xffff, 16); CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status); reg_status = CRYPTO_READ(dev, RK_CRYPTO_CTRL); reg_status &= (~RK_CRYPTO_HASH_FLUSH); reg_status |= _SBF(0xffff, 16); CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, reg_status); memset_io(dev->reg + RK_CRYPTO_HASH_DOUT_0, 0, 32); CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_HRDMA_ERR_ENA | RK_CRYPTO_HRDMA_DONE_ENA); CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, RK_CRYPTO_HRDMA_ERR_INT | RK_CRYPTO_HRDMA_DONE_INT); CRYPTO_WRITE(dev, RK_CRYPTO_HASH_CTRL, rctx->mode | RK_CRYPTO_HASH_SWAP_DO); CRYPTO_WRITE(dev, RK_CRYPTO_CONF, RK_CRYPTO_BYTESWAP_HRFIFO | RK_CRYPTO_BYTESWAP_BRFIFO | RK_CRYPTO_BYTESWAP_BTFIFO); CRYPTO_WRITE(dev, RK_CRYPTO_HASH_MSG_LEN, req->nbytes); } static int rk_ahash_init(struct ahash_request *req) { struct rk_ahash_rctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_ahash_init(&rctx->fallback_req); } static int rk_ahash_update(struct ahash_request *req) { struct rk_ahash_rctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.nbytes = req->nbytes; rctx->fallback_req.src = req->src; return crypto_ahash_update(&rctx->fallback_req); } static int rk_ahash_final(struct ahash_request *req) { struct rk_ahash_rctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.result = req->result; return crypto_ahash_final(&rctx->fallback_req); } static int rk_ahash_finup(struct ahash_request *req) { struct rk_ahash_rctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.nbytes = req->nbytes; rctx->fallback_req.src = req->src; rctx->fallback_req.result = req->result; return crypto_ahash_finup(&rctx->fallback_req); } static int rk_ahash_import(struct ahash_request *req, const void *in) { struct rk_ahash_rctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_ahash_import(&rctx->fallback_req, in); } static int rk_ahash_export(struct ahash_request *req, void *out) { struct rk_ahash_rctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_ahash_export(&rctx->fallback_req, out); } static int rk_ahash_digest(struct ahash_request *req) { struct rk_ahash_rctx *rctx = ahash_request_ctx(req); struct rk_crypto_info *dev; struct crypto_engine *engine; if (rk_ahash_need_fallback(req)) return rk_ahash_digest_fb(req); if (!req->nbytes) return zero_message_process(req); dev = get_rk_crypto(); rctx->dev = dev; engine = dev->engine; return crypto_transfer_hash_request_to_engine(engine, req); } static void crypto_ahash_dma_start(struct rk_crypto_info *dev, struct scatterlist *sg) { CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAS, sg_dma_address(sg)); CRYPTO_WRITE(dev, RK_CRYPTO_HRDMAL, sg_dma_len(sg) / 4); CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_HASH_START | (RK_CRYPTO_HASH_START << 16)); } static int rk_hash_prepare(struct crypto_engine *engine, void *breq) { struct ahash_request *areq = container_of(breq, struct ahash_request, base); struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); struct rk_crypto_info *rkc = rctx->dev; int ret; ret = dma_map_sg(rkc->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); if (ret <= 0) return -EINVAL; rctx->nrsg = ret; return 0; } static void rk_hash_unprepare(struct crypto_engine *engine, void *breq) { struct ahash_request *areq = container_of(breq, struct ahash_request, base); struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); struct rk_crypto_info *rkc = rctx->dev; dma_unmap_sg(rkc->dev, areq->src, rctx->nrsg, DMA_TO_DEVICE); } static int rk_hash_run(struct crypto_engine *engine, void *breq) { struct ahash_request *areq = container_of(breq, struct ahash_request, base); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct rk_ahash_rctx *rctx = ahash_request_ctx(areq); struct ahash_alg *alg = crypto_ahash_alg(tfm); struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base); struct scatterlist *sg = areq->src; struct rk_crypto_info *rkc = rctx->dev; int err; int i; u32 v; err = pm_runtime_resume_and_get(rkc->dev); if (err) return err; err = rk_hash_prepare(engine, breq); if (err) goto theend; rctx->mode = 0; algt->stat_req++; rkc->nreq++; switch (crypto_ahash_digestsize(tfm)) { case SHA1_DIGEST_SIZE: rctx->mode = RK_CRYPTO_HASH_SHA1; break; case SHA256_DIGEST_SIZE: rctx->mode = RK_CRYPTO_HASH_SHA256; break; case MD5_DIGEST_SIZE: rctx->mode = RK_CRYPTO_HASH_MD5; break; default: err = -EINVAL; goto theend; } rk_ahash_reg_init(areq, rkc); while (sg) { reinit_completion(&rkc->complete); rkc->status = 0; crypto_ahash_dma_start(rkc, sg); wait_for_completion_interruptible_timeout(&rkc->complete, msecs_to_jiffies(2000)); if (!rkc->status) { dev_err(rkc->dev, "DMA timeout\n"); err = -EFAULT; goto theend; } sg = sg_next(sg); } /* * it will take some time to process date after last dma * transmission. * * waiting time is relative with the last date len, * so cannot set a fixed time here. * 10us makes system not call here frequently wasting * efficiency, and make it response quickly when dma * complete. */ readl_poll_timeout(rkc->reg + RK_CRYPTO_HASH_STS, v, v == 0, 10, 1000); for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) { v = readl(rkc->reg + RK_CRYPTO_HASH_DOUT_0 + i * 4); put_unaligned_le32(v, areq->result + i * 4); } theend: pm_runtime_put_autosuspend(rkc->dev); local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); local_bh_enable(); rk_hash_unprepare(engine, breq); return 0; } static int rk_hash_init_tfm(struct crypto_ahash *tfm) { struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); const char *alg_name = crypto_ahash_alg_name(tfm); struct ahash_alg *alg = crypto_ahash_alg(tfm); struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.hash.base); /* for fallback */ tctx->fallback_tfm = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(tctx->fallback_tfm)) { dev_err(algt->dev->dev, "Could not load fallback driver.\n"); return PTR_ERR(tctx->fallback_tfm); } crypto_ahash_set_reqsize(tfm, sizeof(struct rk_ahash_rctx) + crypto_ahash_reqsize(tctx->fallback_tfm)); return 0; } static void rk_hash_exit_tfm(struct crypto_ahash *tfm) { struct rk_ahash_ctx *tctx = crypto_ahash_ctx(tfm); crypto_free_ahash(tctx->fallback_tfm); } struct rk_crypto_tmp rk_ahash_sha1 = { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash.base = { .init = rk_ahash_init, .update = rk_ahash_update, .final = rk_ahash_final, .finup = rk_ahash_finup, .export = rk_ahash_export, .import = rk_ahash_import, .digest = rk_ahash_digest, .init_tfm = rk_hash_init_tfm, .exit_tfm = rk_hash_exit_tfm, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name = "rk-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct rk_ahash_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = rk_hash_run, }, }; struct rk_crypto_tmp rk_ahash_sha256 = { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash.base = { .init = rk_ahash_init, .update = rk_ahash_update, .final = rk_ahash_final, .finup = rk_ahash_finup, .export = rk_ahash_export, .import = rk_ahash_import, .digest = rk_ahash_digest, .init_tfm = rk_hash_init_tfm, .exit_tfm = rk_hash_exit_tfm, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "rk-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct rk_ahash_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = rk_hash_run, }, }; struct rk_crypto_tmp rk_ahash_md5 = { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash.base = { .init = rk_ahash_init, .update = rk_ahash_update, .final = rk_ahash_final, .finup = rk_ahash_finup, .export = rk_ahash_export, .import = rk_ahash_import, .digest = rk_ahash_digest, .init_tfm = rk_hash_init_tfm, .exit_tfm = rk_hash_exit_tfm, .halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_driver_name = "rk-md5", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct rk_ahash_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = rk_hash_run, }, };
linux-master
drivers/crypto/rockchip/rk3288_crypto_ahash.c
// SPDX-License-Identifier: GPL-2.0-only /* * Crypto acceleration support for Rockchip RK3288 * * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd * * Author: Zain Wang <[email protected]> * * Some ideas are from marvell-cesa.c and s5p-sss.c driver. */ #include <crypto/engine.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/device.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/string.h> #include "rk3288_crypto.h" #define RK_CRYPTO_DEC BIT(0) static int rk_cipher_need_fallback(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base); struct scatterlist *sgs, *sgd; unsigned int stodo, dtodo, len; unsigned int bs = crypto_skcipher_blocksize(tfm); if (!req->cryptlen) return true; len = req->cryptlen; sgs = req->src; sgd = req->dst; while (sgs && sgd) { if (!IS_ALIGNED(sgs->offset, sizeof(u32))) { algt->stat_fb_align++; return true; } if (!IS_ALIGNED(sgd->offset, sizeof(u32))) { algt->stat_fb_align++; return true; } stodo = min(len, sgs->length); if (stodo % bs) { algt->stat_fb_len++; return true; } dtodo = min(len, sgd->length); if (dtodo % bs) { algt->stat_fb_len++; return true; } if (stodo != dtodo) { algt->stat_fb_sgdiff++; return true; } len -= stodo; sgs = sg_next(sgs); sgd = sg_next(sgd); } return false; } static int rk_cipher_fallback(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct rk_cipher_ctx *op = crypto_skcipher_ctx(tfm); struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base); int err; algt->stat_fb++; skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, areq->base.complete, areq->base.data); skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (rctx->mode & RK_CRYPTO_DEC) err = crypto_skcipher_decrypt(&rctx->fallback_req); else err = crypto_skcipher_encrypt(&rctx->fallback_req); return err; } static int rk_cipher_handle_req(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_crypto_info *rkc; struct crypto_engine *engine; if (rk_cipher_need_fallback(req)) return rk_cipher_fallback(req); rkc = get_rk_crypto(); engine = rkc->engine; rctx->dev = rkc; return crypto_transfer_skcipher_request_to_engine(engine, req); } static int rk_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); struct rk_cipher_ctx *ctx = crypto_tfm_ctx(tfm); if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) return -EINVAL; ctx->keylen = keylen; memcpy(ctx->key, key, keylen); return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_des_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); int err; err = verify_skcipher_des_key(cipher, key); if (err) return err; ctx->keylen = keylen; memcpy(ctx->key, key, keylen); return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_tdes_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); int err; err = verify_skcipher_des3_key(cipher, key); if (err) return err; ctx->keylen = keylen; memcpy(ctx->key, key, keylen); return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int rk_aes_ecb_encrypt(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); rctx->mode = RK_CRYPTO_AES_ECB_MODE; return rk_cipher_handle_req(req); } static int rk_aes_ecb_decrypt(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); rctx->mode = RK_CRYPTO_AES_ECB_MODE | RK_CRYPTO_DEC; return rk_cipher_handle_req(req); } static int rk_aes_cbc_encrypt(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); rctx->mode = RK_CRYPTO_AES_CBC_MODE; return rk_cipher_handle_req(req); } static int rk_aes_cbc_decrypt(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); rctx->mode = RK_CRYPTO_AES_CBC_MODE | RK_CRYPTO_DEC; return rk_cipher_handle_req(req); } static int rk_des_ecb_encrypt(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); rctx->mode = 0; return rk_cipher_handle_req(req); } static int rk_des_ecb_decrypt(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); rctx->mode = RK_CRYPTO_DEC; return rk_cipher_handle_req(req); } static int rk_des_cbc_encrypt(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC; return rk_cipher_handle_req(req); } static int rk_des_cbc_decrypt(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); rctx->mode = RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; return rk_cipher_handle_req(req); } static int rk_des3_ede_ecb_encrypt(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); rctx->mode = RK_CRYPTO_TDES_SELECT; return rk_cipher_handle_req(req); } static int rk_des3_ede_ecb_decrypt(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_DEC; return rk_cipher_handle_req(req); } static int rk_des3_ede_cbc_encrypt(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC; return rk_cipher_handle_req(req); } static int rk_des3_ede_cbc_decrypt(struct skcipher_request *req) { struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); rctx->mode = RK_CRYPTO_TDES_SELECT | RK_CRYPTO_TDES_CHAINMODE_CBC | RK_CRYPTO_DEC; return rk_cipher_handle_req(req); } static void rk_cipher_hw_init(struct rk_crypto_info *dev, struct skcipher_request *req) { struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); struct rk_cipher_rctx *rctx = skcipher_request_ctx(req); struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); u32 block, conf_reg = 0; block = crypto_tfm_alg_blocksize(tfm); if (block == DES_BLOCK_SIZE) { rctx->mode |= RK_CRYPTO_TDES_FIFO_MODE | RK_CRYPTO_TDES_BYTESWAP_KEY | RK_CRYPTO_TDES_BYTESWAP_IV; CRYPTO_WRITE(dev, RK_CRYPTO_TDES_CTRL, rctx->mode); memcpy_toio(dev->reg + RK_CRYPTO_TDES_KEY1_0, ctx->key, ctx->keylen); conf_reg = RK_CRYPTO_DESSEL; } else { rctx->mode |= RK_CRYPTO_AES_FIFO_MODE | RK_CRYPTO_AES_KEY_CHANGE | RK_CRYPTO_AES_BYTESWAP_KEY | RK_CRYPTO_AES_BYTESWAP_IV; if (ctx->keylen == AES_KEYSIZE_192) rctx->mode |= RK_CRYPTO_AES_192BIT_key; else if (ctx->keylen == AES_KEYSIZE_256) rctx->mode |= RK_CRYPTO_AES_256BIT_key; CRYPTO_WRITE(dev, RK_CRYPTO_AES_CTRL, rctx->mode); memcpy_toio(dev->reg + RK_CRYPTO_AES_KEY_0, ctx->key, ctx->keylen); } conf_reg |= RK_CRYPTO_BYTESWAP_BTFIFO | RK_CRYPTO_BYTESWAP_BRFIFO; CRYPTO_WRITE(dev, RK_CRYPTO_CONF, conf_reg); CRYPTO_WRITE(dev, RK_CRYPTO_INTENA, RK_CRYPTO_BCDMA_ERR_ENA | RK_CRYPTO_BCDMA_DONE_ENA); } static void crypto_dma_start(struct rk_crypto_info *dev, struct scatterlist *sgs, struct scatterlist *sgd, unsigned int todo) { CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAS, sg_dma_address(sgs)); CRYPTO_WRITE(dev, RK_CRYPTO_BRDMAL, todo); CRYPTO_WRITE(dev, RK_CRYPTO_BTDMAS, sg_dma_address(sgd)); CRYPTO_WRITE(dev, RK_CRYPTO_CTRL, RK_CRYPTO_BLOCK_START | _SBF(RK_CRYPTO_BLOCK_START, 16)); } static int rk_cipher_run(struct crypto_engine *engine, void *async_req) { struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct rk_cipher_rctx *rctx = skcipher_request_ctx(areq); struct scatterlist *sgs, *sgd; int err = 0; int ivsize = crypto_skcipher_ivsize(tfm); int offset; u8 iv[AES_BLOCK_SIZE]; u8 biv[AES_BLOCK_SIZE]; u8 *ivtouse = areq->iv; unsigned int len = areq->cryptlen; unsigned int todo; struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base); struct rk_crypto_info *rkc = rctx->dev; err = pm_runtime_resume_and_get(rkc->dev); if (err) return err; algt->stat_req++; rkc->nreq++; ivsize = crypto_skcipher_ivsize(tfm); if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { if (rctx->mode & RK_CRYPTO_DEC) { offset = areq->cryptlen - ivsize; scatterwalk_map_and_copy(rctx->backup_iv, areq->src, offset, ivsize, 0); } } sgs = areq->src; sgd = areq->dst; while (sgs && sgd && len) { if (!sgs->length) { sgs = sg_next(sgs); sgd = sg_next(sgd); continue; } if (rctx->mode & RK_CRYPTO_DEC) { /* we backup last block of source to be used as IV at next step */ offset = sgs->length - ivsize; scatterwalk_map_and_copy(biv, sgs, offset, ivsize, 0); } if (sgs == sgd) { err = dma_map_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL); if (err <= 0) { err = -EINVAL; goto theend_iv; } } else { err = dma_map_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE); if (err <= 0) { err = -EINVAL; goto theend_iv; } err = dma_map_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE); if (err <= 0) { err = -EINVAL; goto theend_sgs; } } err = 0; rk_cipher_hw_init(rkc, areq); if (ivsize) { if (ivsize == DES_BLOCK_SIZE) memcpy_toio(rkc->reg + RK_CRYPTO_TDES_IV_0, ivtouse, ivsize); else memcpy_toio(rkc->reg + RK_CRYPTO_AES_IV_0, ivtouse, ivsize); } reinit_completion(&rkc->complete); rkc->status = 0; todo = min(sg_dma_len(sgs), len); len -= todo; crypto_dma_start(rkc, sgs, sgd, todo / 4); wait_for_completion_interruptible_timeout(&rkc->complete, msecs_to_jiffies(2000)); if (!rkc->status) { dev_err(rkc->dev, "DMA timeout\n"); err = -EFAULT; goto theend; } if (sgs == sgd) { dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL); } else { dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE); dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE); } if (rctx->mode & RK_CRYPTO_DEC) { memcpy(iv, biv, ivsize); ivtouse = iv; } else { offset = sgd->length - ivsize; scatterwalk_map_and_copy(iv, sgd, offset, ivsize, 0); ivtouse = iv; } sgs = sg_next(sgs); sgd = sg_next(sgd); } if (areq->iv && ivsize > 0) { offset = areq->cryptlen - ivsize; if (rctx->mode & RK_CRYPTO_DEC) { memcpy(areq->iv, rctx->backup_iv, ivsize); memzero_explicit(rctx->backup_iv, ivsize); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, offset, ivsize, 0); } } theend: pm_runtime_put_autosuspend(rkc->dev); local_bh_disable(); crypto_finalize_skcipher_request(engine, areq, err); local_bh_enable(); return 0; theend_sgs: if (sgs == sgd) { dma_unmap_sg(rkc->dev, sgs, 1, DMA_BIDIRECTIONAL); } else { dma_unmap_sg(rkc->dev, sgs, 1, DMA_TO_DEVICE); dma_unmap_sg(rkc->dev, sgd, 1, DMA_FROM_DEVICE); } theend_iv: return err; } static int rk_cipher_tfm_init(struct crypto_skcipher *tfm) { struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); const char *name = crypto_tfm_alg_name(&tfm->base); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct rk_crypto_tmp *algt = container_of(alg, struct rk_crypto_tmp, alg.skcipher.base); ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback_tfm)) { dev_err(algt->dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(ctx->fallback_tfm)); return PTR_ERR(ctx->fallback_tfm); } tfm->reqsize = sizeof(struct rk_cipher_rctx) + crypto_skcipher_reqsize(ctx->fallback_tfm); return 0; } static void rk_cipher_tfm_exit(struct crypto_skcipher *tfm) { struct rk_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); memzero_explicit(ctx->key, ctx->keylen); crypto_free_skcipher(ctx->fallback_tfm); } struct rk_crypto_tmp rk_ecb_aes_alg = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher.base = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-aes-rk", .base.cra_priority = 300, .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x0f, .base.cra_module = THIS_MODULE, .init = rk_cipher_tfm_init, .exit = rk_cipher_tfm_exit, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = rk_aes_setkey, .encrypt = rk_aes_ecb_encrypt, .decrypt = rk_aes_ecb_decrypt, }, .alg.skcipher.op = { .do_one_request = rk_cipher_run, }, }; struct rk_crypto_tmp rk_cbc_aes_alg = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher.base = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-rk", .base.cra_priority = 300, .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x0f, .base.cra_module = THIS_MODULE, .init = rk_cipher_tfm_init, .exit = rk_cipher_tfm_exit, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = rk_aes_setkey, .encrypt = rk_aes_cbc_encrypt, .decrypt = rk_aes_cbc_decrypt, }, .alg.skcipher.op = { .do_one_request = rk_cipher_run, }, }; struct rk_crypto_tmp rk_ecb_des_alg = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher.base = { .base.cra_name = "ecb(des)", .base.cra_driver_name = "ecb-des-rk", .base.cra_priority = 300, .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, .base.cra_module = THIS_MODULE, .init = rk_cipher_tfm_init, .exit = rk_cipher_tfm_exit, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = rk_des_setkey, .encrypt = rk_des_ecb_encrypt, .decrypt = rk_des_ecb_decrypt, }, .alg.skcipher.op = { .do_one_request = rk_cipher_run, }, }; struct rk_crypto_tmp rk_cbc_des_alg = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher.base = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "cbc-des-rk", .base.cra_priority = 300, .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, .base.cra_module = THIS_MODULE, .init = rk_cipher_tfm_init, .exit = rk_cipher_tfm_exit, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, .setkey = rk_des_setkey, .encrypt = rk_des_cbc_encrypt, .decrypt = rk_des_cbc_decrypt, }, .alg.skcipher.op = { .do_one_request = rk_cipher_run, }, }; struct rk_crypto_tmp rk_ecb_des3_ede_alg = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher.base = { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-des3-ede-rk", .base.cra_priority = 300, .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, .base.cra_module = THIS_MODULE, .init = rk_cipher_tfm_init, .exit = rk_cipher_tfm_exit, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = rk_tdes_setkey, .encrypt = rk_des3_ede_ecb_encrypt, .decrypt = rk_des3_ede_ecb_decrypt, }, .alg.skcipher.op = { .do_one_request = rk_cipher_run, }, }; struct rk_crypto_tmp rk_cbc_des3_ede_alg = { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher.base = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-des3-ede-rk", .base.cra_priority = 300, .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct rk_cipher_ctx), .base.cra_alignmask = 0x07, .base.cra_module = THIS_MODULE, .init = rk_cipher_tfm_init, .exit = rk_cipher_tfm_exit, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, .setkey = rk_tdes_setkey, .encrypt = rk_des3_ede_cbc_encrypt, .decrypt = rk_des3_ede_cbc_decrypt, }, .alg.skcipher.op = { .do_one_request = rk_cipher_run, }, };
linux-master
drivers/crypto/rockchip/rk3288_crypto_skcipher.c
// SPDX-License-Identifier: GPL-2.0 /* * Cryptographic API. * * Support for StarFive hardware cryptographic engine. * Copyright (c) 2022 StarFive Technology * */ #include <crypto/engine.h> #include "jh7110-cryp.h" #include <linux/clk.h> #include <linux/completion.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/spinlock.h> #define DRIVER_NAME "jh7110-crypto" struct starfive_dev_list { struct list_head dev_list; spinlock_t lock; /* protect dev_list */ }; static struct starfive_dev_list dev_list = { .dev_list = LIST_HEAD_INIT(dev_list.dev_list), .lock = __SPIN_LOCK_UNLOCKED(dev_list.lock), }; struct starfive_cryp_dev *starfive_cryp_find_dev(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = NULL, *tmp; spin_lock_bh(&dev_list.lock); if (!ctx->cryp) { list_for_each_entry(tmp, &dev_list.dev_list, list) { cryp = tmp; break; } ctx->cryp = cryp; } else { cryp = ctx->cryp; } spin_unlock_bh(&dev_list.lock); return cryp; } static u16 side_chan; module_param(side_chan, ushort, 0); MODULE_PARM_DESC(side_chan, "Enable side channel mitigation for AES module.\n" "Enabling this feature will reduce speed performance.\n" " 0 - Disabled\n" " other - Enabled"); static int starfive_dma_init(struct starfive_cryp_dev *cryp) { dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); cryp->tx = dma_request_chan(cryp->dev, "tx"); if (IS_ERR(cryp->tx)) return dev_err_probe(cryp->dev, PTR_ERR(cryp->tx), "Error requesting tx dma channel.\n"); cryp->rx = dma_request_chan(cryp->dev, "rx"); if (IS_ERR(cryp->rx)) { dma_release_channel(cryp->tx); return dev_err_probe(cryp->dev, PTR_ERR(cryp->rx), "Error requesting rx dma channel.\n"); } return 0; } static void starfive_dma_cleanup(struct starfive_cryp_dev *cryp) { dma_release_channel(cryp->tx); dma_release_channel(cryp->rx); } static irqreturn_t starfive_cryp_irq(int irq, void *priv) { u32 status; u32 mask; struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)priv; mask = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); status = readl(cryp->base + STARFIVE_IE_FLAG_OFFSET); if (status & STARFIVE_IE_FLAG_AES_DONE) { mask |= STARFIVE_IE_MASK_AES_DONE; writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET); tasklet_schedule(&cryp->aes_done); } if (status & STARFIVE_IE_FLAG_HASH_DONE) { mask |= STARFIVE_IE_MASK_HASH_DONE; writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET); tasklet_schedule(&cryp->hash_done); } if (status & STARFIVE_IE_FLAG_PKA_DONE) { mask |= STARFIVE_IE_MASK_PKA_DONE; writel(mask, cryp->base + STARFIVE_IE_MASK_OFFSET); complete(&cryp->pka_done); } return IRQ_HANDLED; } static int starfive_cryp_probe(struct platform_device *pdev) { struct starfive_cryp_dev *cryp; struct resource *res; int irq; int ret; cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL); if (!cryp) return -ENOMEM; platform_set_drvdata(pdev, cryp); cryp->dev = &pdev->dev; cryp->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(cryp->base)) return dev_err_probe(&pdev->dev, PTR_ERR(cryp->base), "Error remapping memory for platform device\n"); tasklet_init(&cryp->aes_done, starfive_aes_done_task, (unsigned long)cryp); tasklet_init(&cryp->hash_done, starfive_hash_done_task, (unsigned long)cryp); cryp->phys_base = res->start; cryp->dma_maxburst = 32; cryp->side_chan = side_chan; cryp->hclk = devm_clk_get(&pdev->dev, "hclk"); if (IS_ERR(cryp->hclk)) return dev_err_probe(&pdev->dev, PTR_ERR(cryp->hclk), "Error getting hardware reference clock\n"); cryp->ahb = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(cryp->ahb)) return dev_err_probe(&pdev->dev, PTR_ERR(cryp->ahb), "Error getting ahb reference clock\n"); cryp->rst = devm_reset_control_get_shared(cryp->dev, NULL); if (IS_ERR(cryp->rst)) return dev_err_probe(&pdev->dev, PTR_ERR(cryp->rst), "Error getting hardware reset line\n"); init_completion(&cryp->pka_done); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_irq(&pdev->dev, irq, starfive_cryp_irq, 0, pdev->name, (void *)cryp); if (ret) return dev_err_probe(&pdev->dev, irq, "Failed to register interrupt handler\n"); clk_prepare_enable(cryp->hclk); clk_prepare_enable(cryp->ahb); reset_control_deassert(cryp->rst); spin_lock(&dev_list.lock); list_add(&cryp->list, &dev_list.dev_list); spin_unlock(&dev_list.lock); ret = starfive_dma_init(cryp); if (ret) { if (ret == -EPROBE_DEFER) goto err_probe_defer; else goto err_dma_init; } /* Initialize crypto engine */ cryp->engine = crypto_engine_alloc_init(&pdev->dev, 1); if (!cryp->engine) { ret = -ENOMEM; goto err_engine; } ret = crypto_engine_start(cryp->engine); if (ret) goto err_engine_start; ret = starfive_aes_register_algs(); if (ret) goto err_algs_aes; ret = starfive_hash_register_algs(); if (ret) goto err_algs_hash; ret = starfive_rsa_register_algs(); if (ret) goto err_algs_rsa; return 0; err_algs_rsa: starfive_hash_unregister_algs(); err_algs_hash: starfive_aes_unregister_algs(); err_algs_aes: crypto_engine_stop(cryp->engine); err_engine_start: crypto_engine_exit(cryp->engine); err_engine: starfive_dma_cleanup(cryp); err_dma_init: spin_lock(&dev_list.lock); list_del(&cryp->list); spin_unlock(&dev_list.lock); clk_disable_unprepare(cryp->hclk); clk_disable_unprepare(cryp->ahb); reset_control_assert(cryp->rst); tasklet_kill(&cryp->aes_done); tasklet_kill(&cryp->hash_done); err_probe_defer: return ret; } static void starfive_cryp_remove(struct platform_device *pdev) { struct starfive_cryp_dev *cryp = platform_get_drvdata(pdev); starfive_aes_unregister_algs(); starfive_hash_unregister_algs(); starfive_rsa_unregister_algs(); tasklet_kill(&cryp->aes_done); tasklet_kill(&cryp->hash_done); crypto_engine_stop(cryp->engine); crypto_engine_exit(cryp->engine); starfive_dma_cleanup(cryp); spin_lock(&dev_list.lock); list_del(&cryp->list); spin_unlock(&dev_list.lock); clk_disable_unprepare(cryp->hclk); clk_disable_unprepare(cryp->ahb); reset_control_assert(cryp->rst); } static const struct of_device_id starfive_dt_ids[] __maybe_unused = { { .compatible = "starfive,jh7110-crypto", .data = NULL}, {}, }; MODULE_DEVICE_TABLE(of, starfive_dt_ids); static struct platform_driver starfive_cryp_driver = { .probe = starfive_cryp_probe, .remove_new = starfive_cryp_remove, .driver = { .name = DRIVER_NAME, .of_match_table = starfive_dt_ids, }, }; module_platform_driver(starfive_cryp_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("StarFive JH7110 Cryptographic Module");
linux-master
drivers/crypto/starfive/jh7110-cryp.c
// SPDX-License-Identifier: GPL-2.0 /* * Hash function and HMAC support for StarFive driver * * Copyright (c) 2022 StarFive Technology * */ #include <crypto/engine.h> #include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> #include "jh7110-cryp.h" #include <linux/amba/pl080.h> #include <linux/clk.h> #include <linux/dma-direct.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #define STARFIVE_HASH_REGS_OFFSET 0x300 #define STARFIVE_HASH_SHACSR (STARFIVE_HASH_REGS_OFFSET + 0x0) #define STARFIVE_HASH_SHAWDR (STARFIVE_HASH_REGS_OFFSET + 0x4) #define STARFIVE_HASH_SHARDR (STARFIVE_HASH_REGS_OFFSET + 0x8) #define STARFIVE_HASH_SHAWSR (STARFIVE_HASH_REGS_OFFSET + 0xC) #define STARFIVE_HASH_SHAWLEN3 (STARFIVE_HASH_REGS_OFFSET + 0x10) #define STARFIVE_HASH_SHAWLEN2 (STARFIVE_HASH_REGS_OFFSET + 0x14) #define STARFIVE_HASH_SHAWLEN1 (STARFIVE_HASH_REGS_OFFSET + 0x18) #define STARFIVE_HASH_SHAWLEN0 (STARFIVE_HASH_REGS_OFFSET + 0x1C) #define STARFIVE_HASH_SHAWKR (STARFIVE_HASH_REGS_OFFSET + 0x20) #define STARFIVE_HASH_SHAWKLEN (STARFIVE_HASH_REGS_OFFSET + 0x24) #define STARFIVE_HASH_BUFLEN SHA512_BLOCK_SIZE #define STARFIVE_HASH_RESET 0x2 static inline int starfive_hash_wait_busy(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; u32 status; return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status, !(status & STARFIVE_HASH_BUSY), 10, 100000); } static inline int starfive_hash_wait_key_done(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; u32 status; return readl_relaxed_poll_timeout(cryp->base + STARFIVE_HASH_SHACSR, status, (status & STARFIVE_HASH_KEY_DONE), 10, 100000); } static int starfive_hash_hmac_key(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_request_ctx *rctx = ctx->rctx; struct starfive_cryp_dev *cryp = ctx->cryp; int klen = ctx->keylen, loop; unsigned int *key = (unsigned int *)ctx->key; unsigned char *cl; writel(ctx->keylen, cryp->base + STARFIVE_HASH_SHAWKLEN); rctx->csr.hash.hmac = 1; rctx->csr.hash.key_flag = 1; writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR); for (loop = 0; loop < klen / sizeof(unsigned int); loop++, key++) writel(*key, cryp->base + STARFIVE_HASH_SHAWKR); if (klen & 0x3) { cl = (unsigned char *)key; for (loop = 0; loop < (klen & 0x3); loop++, cl++) writeb(*cl, cryp->base + STARFIVE_HASH_SHAWKR); } if (starfive_hash_wait_key_done(ctx)) return dev_err_probe(cryp->dev, -ETIMEDOUT, "starfive_hash_wait_key_done error\n"); return 0; } static void starfive_hash_start(void *param) { struct starfive_cryp_ctx *ctx = param; struct starfive_cryp_request_ctx *rctx = ctx->rctx; struct starfive_cryp_dev *cryp = ctx->cryp; union starfive_alg_cr alg_cr; union starfive_hash_csr csr; u32 stat; dma_unmap_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE); alg_cr.v = 0; alg_cr.clear = 1; writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET); csr.v = readl(cryp->base + STARFIVE_HASH_SHACSR); csr.firstb = 0; csr.final = 1; stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); stat &= ~STARFIVE_IE_MASK_HASH_DONE; writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); writel(csr.v, cryp->base + STARFIVE_HASH_SHACSR); } static int starfive_hash_xmit_dma(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_request_ctx *rctx = ctx->rctx; struct starfive_cryp_dev *cryp = ctx->cryp; struct dma_async_tx_descriptor *in_desc; union starfive_alg_cr alg_cr; int total_len; int ret; if (!rctx->total) { starfive_hash_start(ctx); return 0; } writel(rctx->total, cryp->base + STARFIVE_DMA_IN_LEN_OFFSET); total_len = rctx->total; total_len = (total_len & 0x3) ? (((total_len >> 2) + 1) << 2) : total_len; sg_dma_len(rctx->in_sg) = total_len; alg_cr.v = 0; alg_cr.start = 1; alg_cr.hash_dma_en = 1; writel(alg_cr.v, cryp->base + STARFIVE_ALG_CR_OFFSET); ret = dma_map_sg(cryp->dev, rctx->in_sg, rctx->in_sg_len, DMA_TO_DEVICE); if (!ret) return dev_err_probe(cryp->dev, -EINVAL, "dma_map_sg() error\n"); cryp->cfg_in.direction = DMA_MEM_TO_DEV; cryp->cfg_in.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cryp->cfg_in.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cryp->cfg_in.src_maxburst = cryp->dma_maxburst; cryp->cfg_in.dst_maxburst = cryp->dma_maxburst; cryp->cfg_in.dst_addr = cryp->phys_base + STARFIVE_ALG_FIFO_OFFSET; dmaengine_slave_config(cryp->tx, &cryp->cfg_in); in_desc = dmaengine_prep_slave_sg(cryp->tx, rctx->in_sg, ret, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!in_desc) return -EINVAL; in_desc->callback = starfive_hash_start; in_desc->callback_param = ctx; dmaengine_submit(in_desc); dma_async_issue_pending(cryp->tx); return 0; } static int starfive_hash_xmit(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_request_ctx *rctx = ctx->rctx; struct starfive_cryp_dev *cryp = ctx->cryp; int ret = 0; rctx->csr.hash.v = 0; rctx->csr.hash.reset = 1; writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR); if (starfive_hash_wait_busy(ctx)) return dev_err_probe(cryp->dev, -ETIMEDOUT, "Error resetting engine.\n"); rctx->csr.hash.v = 0; rctx->csr.hash.mode = ctx->hash_mode; rctx->csr.hash.ie = 1; if (ctx->is_hmac) { ret = starfive_hash_hmac_key(ctx); if (ret) return ret; } else { rctx->csr.hash.start = 1; rctx->csr.hash.firstb = 1; writel(rctx->csr.hash.v, cryp->base + STARFIVE_HASH_SHACSR); } return starfive_hash_xmit_dma(ctx); } static int starfive_hash_copy_hash(struct ahash_request *req) { struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); int count, *data; int mlen; if (!req->result) return 0; mlen = rctx->digsize / sizeof(u32); data = (u32 *)req->result; for (count = 0; count < mlen; count++) data[count] = readl(ctx->cryp->base + STARFIVE_HASH_SHARDR); return 0; } void starfive_hash_done_task(unsigned long param) { struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param; int err = cryp->err; if (!err) err = starfive_hash_copy_hash(cryp->req.hreq); /* Reset to clear hash_done in irq register*/ writel(STARFIVE_HASH_RESET, cryp->base + STARFIVE_HASH_SHACSR); crypto_finalize_hash_request(cryp->engine, cryp->req.hreq, err); } static int starfive_hash_check_aligned(struct scatterlist *sg, size_t total, size_t align) { int len = 0; if (!total) return 0; if (!IS_ALIGNED(total, align)) return -EINVAL; while (sg) { if (!IS_ALIGNED(sg->offset, sizeof(u32))) return -EINVAL; if (!IS_ALIGNED(sg->length, align)) return -EINVAL; len += sg->length; sg = sg_next(sg); } if (len != total) return -EINVAL; return 0; } static int starfive_hash_one_request(struct crypto_engine *engine, void *areq) { struct ahash_request *req = container_of(areq, struct ahash_request, base); struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct starfive_cryp_dev *cryp = ctx->cryp; if (!cryp) return -ENODEV; return starfive_hash_xmit(ctx); } static int starfive_hash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, req->base.complete, req->base.data); ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src, req->result, req->nbytes); return crypto_ahash_init(&rctx->ahash_fbk_req); } static int starfive_hash_update(struct ahash_request *req) { struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, req->base.complete, req->base.data); ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src, req->result, req->nbytes); return crypto_ahash_update(&rctx->ahash_fbk_req); } static int starfive_hash_final(struct ahash_request *req) { struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, req->base.complete, req->base.data); ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src, req->result, req->nbytes); return crypto_ahash_final(&rctx->ahash_fbk_req); } static int starfive_hash_finup(struct ahash_request *req) { struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, req->base.complete, req->base.data); ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src, req->result, req->nbytes); return crypto_ahash_finup(&rctx->ahash_fbk_req); } static int starfive_hash_digest_fb(struct ahash_request *req) { struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags, req->base.complete, req->base.data); ahash_request_set_crypt(&rctx->ahash_fbk_req, req->src, req->result, req->nbytes); return crypto_ahash_digest(&rctx->ahash_fbk_req); } static int starfive_hash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); struct starfive_cryp_dev *cryp = ctx->cryp; memset(rctx, 0, sizeof(struct starfive_cryp_request_ctx)); cryp->req.hreq = req; rctx->total = req->nbytes; rctx->in_sg = req->src; rctx->blksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); rctx->digsize = crypto_ahash_digestsize(tfm); rctx->in_sg_len = sg_nents_for_len(rctx->in_sg, rctx->total); ctx->rctx = rctx; if (starfive_hash_check_aligned(rctx->in_sg, rctx->total, rctx->blksize)) return starfive_hash_digest_fb(req); return crypto_transfer_hash_request_to_engine(cryp->engine, req); } static int starfive_hash_export(struct ahash_request *req, void *out) { struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, req->base.complete, req->base.data); return crypto_ahash_export(&rctx->ahash_fbk_req, out); } static int starfive_hash_import(struct ahash_request *req, const void *in) { struct starfive_cryp_request_ctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->ahash_fbk_req, ctx->ahash_fbk); ahash_request_set_callback(&rctx->ahash_fbk_req, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP, req->base.complete, req->base.data); return crypto_ahash_import(&rctx->ahash_fbk_req, in); } static int starfive_hash_init_tfm(struct crypto_ahash *hash, const char *alg_name, unsigned int mode) { struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); ctx->cryp = starfive_cryp_find_dev(ctx); if (!ctx->cryp) return -ENODEV; ctx->ahash_fbk = crypto_alloc_ahash(alg_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->ahash_fbk)) return dev_err_probe(ctx->cryp->dev, PTR_ERR(ctx->ahash_fbk), "starfive_hash: Could not load fallback driver.\n"); crypto_ahash_set_statesize(hash, crypto_ahash_statesize(ctx->ahash_fbk)); crypto_ahash_set_reqsize(hash, sizeof(struct starfive_cryp_request_ctx) + crypto_ahash_reqsize(ctx->ahash_fbk)); ctx->keylen = 0; ctx->hash_mode = mode; return 0; } static void starfive_hash_exit_tfm(struct crypto_ahash *hash) { struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); crypto_free_ahash(ctx->ahash_fbk); } static int starfive_hash_long_setkey(struct starfive_cryp_ctx *ctx, const u8 *key, unsigned int keylen, const char *alg_name) { struct crypto_wait wait; struct ahash_request *req; struct scatterlist sg; struct crypto_ahash *ahash_tfm; u8 *buf; int ret; ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0); if (IS_ERR(ahash_tfm)) return PTR_ERR(ahash_tfm); req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); if (!req) { ret = -ENOMEM; goto err_free_ahash; } crypto_init_wait(&wait); ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, crypto_req_done, &wait); crypto_ahash_clear_flags(ahash_tfm, ~0); buf = kzalloc(keylen + STARFIVE_HASH_BUFLEN, GFP_KERNEL); if (!buf) { ret = -ENOMEM; goto err_free_req; } memcpy(buf, key, keylen); sg_init_one(&sg, buf, keylen); ahash_request_set_crypt(req, &sg, ctx->key, keylen); ret = crypto_wait_req(crypto_ahash_digest(req), &wait); kfree(buf); err_free_req: ahash_request_free(req); err_free_ahash: crypto_free_ahash(ahash_tfm); return ret; } static int starfive_hash_setkey(struct crypto_ahash *hash, const u8 *key, unsigned int keylen) { struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); unsigned int digestsize = crypto_ahash_digestsize(hash); unsigned int blocksize = crypto_ahash_blocksize(hash); const char *alg_name; crypto_ahash_setkey(ctx->ahash_fbk, key, keylen); if (keylen <= blocksize) { memcpy(ctx->key, key, keylen); ctx->keylen = keylen; return 0; } ctx->keylen = digestsize; switch (digestsize) { case SHA224_DIGEST_SIZE: alg_name = "sha224-starfive"; break; case SHA256_DIGEST_SIZE: if (ctx->hash_mode == STARFIVE_HASH_SM3) alg_name = "sm3-starfive"; else alg_name = "sha256-starfive"; break; case SHA384_DIGEST_SIZE: alg_name = "sha384-starfive"; break; case SHA512_DIGEST_SIZE: alg_name = "sha512-starfive"; break; default: return -EINVAL; } return starfive_hash_long_setkey(ctx, key, keylen, alg_name); } static int starfive_sha224_init_tfm(struct crypto_ahash *hash) { return starfive_hash_init_tfm(hash, "sha224-generic", STARFIVE_HASH_SHA224); } static int starfive_sha256_init_tfm(struct crypto_ahash *hash) { return starfive_hash_init_tfm(hash, "sha256-generic", STARFIVE_HASH_SHA256); } static int starfive_sha384_init_tfm(struct crypto_ahash *hash) { return starfive_hash_init_tfm(hash, "sha384-generic", STARFIVE_HASH_SHA384); } static int starfive_sha512_init_tfm(struct crypto_ahash *hash) { return starfive_hash_init_tfm(hash, "sha512-generic", STARFIVE_HASH_SHA512); } static int starfive_sm3_init_tfm(struct crypto_ahash *hash) { return starfive_hash_init_tfm(hash, "sm3-generic", STARFIVE_HASH_SM3); } static int starfive_hmac_sha224_init_tfm(struct crypto_ahash *hash) { struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); ctx->is_hmac = true; return starfive_hash_init_tfm(hash, "hmac(sha224-generic)", STARFIVE_HASH_SHA224); } static int starfive_hmac_sha256_init_tfm(struct crypto_ahash *hash) { struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); ctx->is_hmac = true; return starfive_hash_init_tfm(hash, "hmac(sha256-generic)", STARFIVE_HASH_SHA256); } static int starfive_hmac_sha384_init_tfm(struct crypto_ahash *hash) { struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); ctx->is_hmac = true; return starfive_hash_init_tfm(hash, "hmac(sha384-generic)", STARFIVE_HASH_SHA384); } static int starfive_hmac_sha512_init_tfm(struct crypto_ahash *hash) { struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); ctx->is_hmac = true; return starfive_hash_init_tfm(hash, "hmac(sha512-generic)", STARFIVE_HASH_SHA512); } static int starfive_hmac_sm3_init_tfm(struct crypto_ahash *hash) { struct starfive_cryp_ctx *ctx = crypto_ahash_ctx(hash); ctx->is_hmac = true; return starfive_hash_init_tfm(hash, "hmac(sm3-generic)", STARFIVE_HASH_SM3); } static struct ahash_engine_alg algs_sha2_sm3[] = { { .base.init = starfive_hash_init, .base.update = starfive_hash_update, .base.final = starfive_hash_final, .base.finup = starfive_hash_finup, .base.digest = starfive_hash_digest, .base.export = starfive_hash_export, .base.import = starfive_hash_import, .base.init_tfm = starfive_sha224_init_tfm, .base.exit_tfm = starfive_hash_exit_tfm, .base.halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name = "sha224-starfive", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = starfive_hash_one_request, }, }, { .base.init = starfive_hash_init, .base.update = starfive_hash_update, .base.final = starfive_hash_final, .base.finup = starfive_hash_finup, .base.digest = starfive_hash_digest, .base.export = starfive_hash_export, .base.import = starfive_hash_import, .base.init_tfm = starfive_hmac_sha224_init_tfm, .base.exit_tfm = starfive_hash_exit_tfm, .base.setkey = starfive_hash_setkey, .base.halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "hmac(sha224)", .cra_driver_name = "sha224-hmac-starfive", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = starfive_hash_one_request, }, }, { .base.init = starfive_hash_init, .base.update = starfive_hash_update, .base.final = starfive_hash_final, .base.finup = starfive_hash_finup, .base.digest = starfive_hash_digest, .base.export = starfive_hash_export, .base.import = starfive_hash_import, .base.init_tfm = starfive_sha256_init_tfm, .base.exit_tfm = starfive_hash_exit_tfm, .base.halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-starfive", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = starfive_hash_one_request, }, }, { .base.init = starfive_hash_init, .base.update = starfive_hash_update, .base.final = starfive_hash_final, .base.finup = starfive_hash_finup, .base.digest = starfive_hash_digest, .base.export = starfive_hash_export, .base.import = starfive_hash_import, .base.init_tfm = starfive_hmac_sha256_init_tfm, .base.exit_tfm = starfive_hash_exit_tfm, .base.setkey = starfive_hash_setkey, .base.halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "hmac(sha256)", .cra_driver_name = "sha256-hmac-starfive", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = starfive_hash_one_request, }, }, { .base.init = starfive_hash_init, .base.update = starfive_hash_update, .base.final = starfive_hash_final, .base.finup = starfive_hash_finup, .base.digest = starfive_hash_digest, .base.export = starfive_hash_export, .base.import = starfive_hash_import, .base.init_tfm = starfive_sha384_init_tfm, .base.exit_tfm = starfive_hash_exit_tfm, .base.halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", .cra_driver_name = "sha384-starfive", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = starfive_hash_one_request, }, }, { .base.init = starfive_hash_init, .base.update = starfive_hash_update, .base.final = starfive_hash_final, .base.finup = starfive_hash_finup, .base.digest = starfive_hash_digest, .base.export = starfive_hash_export, .base.import = starfive_hash_import, .base.init_tfm = starfive_hmac_sha384_init_tfm, .base.exit_tfm = starfive_hash_exit_tfm, .base.setkey = starfive_hash_setkey, .base.halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct sha512_state), .base = { .cra_name = "hmac(sha384)", .cra_driver_name = "sha384-hmac-starfive", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = starfive_hash_one_request, }, }, { .base.init = starfive_hash_init, .base.update = starfive_hash_update, .base.final = starfive_hash_final, .base.finup = starfive_hash_finup, .base.digest = starfive_hash_digest, .base.export = starfive_hash_export, .base.import = starfive_hash_import, .base.init_tfm = starfive_sha512_init_tfm, .base.exit_tfm = starfive_hash_exit_tfm, .base.halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_driver_name = "sha512-starfive", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = starfive_hash_one_request, }, }, { .base.init = starfive_hash_init, .base.update = starfive_hash_update, .base.final = starfive_hash_final, .base.finup = starfive_hash_finup, .base.digest = starfive_hash_digest, .base.export = starfive_hash_export, .base.import = starfive_hash_import, .base.init_tfm = starfive_hmac_sha512_init_tfm, .base.exit_tfm = starfive_hash_exit_tfm, .base.setkey = starfive_hash_setkey, .base.halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct sha512_state), .base = { .cra_name = "hmac(sha512)", .cra_driver_name = "sha512-hmac-starfive", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = starfive_hash_one_request, }, }, { .base.init = starfive_hash_init, .base.update = starfive_hash_update, .base.final = starfive_hash_final, .base.finup = starfive_hash_finup, .base.digest = starfive_hash_digest, .base.export = starfive_hash_export, .base.import = starfive_hash_import, .base.init_tfm = starfive_sm3_init_tfm, .base.exit_tfm = starfive_hash_exit_tfm, .base.halg = { .digestsize = SM3_DIGEST_SIZE, .statesize = sizeof(struct sm3_state), .base = { .cra_name = "sm3", .cra_driver_name = "sm3-starfive", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SM3_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = starfive_hash_one_request, }, }, { .base.init = starfive_hash_init, .base.update = starfive_hash_update, .base.final = starfive_hash_final, .base.finup = starfive_hash_finup, .base.digest = starfive_hash_digest, .base.export = starfive_hash_export, .base.import = starfive_hash_import, .base.init_tfm = starfive_hmac_sm3_init_tfm, .base.exit_tfm = starfive_hash_exit_tfm, .base.setkey = starfive_hash_setkey, .base.halg = { .digestsize = SM3_DIGEST_SIZE, .statesize = sizeof(struct sm3_state), .base = { .cra_name = "hmac(sm3)", .cra_driver_name = "sm3-hmac-starfive", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SM3_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = starfive_hash_one_request, }, }, }; int starfive_hash_register_algs(void) { return crypto_engine_register_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3)); } void starfive_hash_unregister_algs(void) { crypto_engine_unregister_ahashes(algs_sha2_sm3, ARRAY_SIZE(algs_sha2_sm3)); }
linux-master
drivers/crypto/starfive/jh7110-hash.c
// SPDX-License-Identifier: GPL-2.0 /* * StarFive AES acceleration driver * * Copyright (c) 2022 StarFive Technology */ #include <crypto/engine.h> #include <crypto/gcm.h> #include <crypto/internal/aead.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include "jh7110-cryp.h" #include <linux/err.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #define STARFIVE_AES_REGS_OFFSET 0x100 #define STARFIVE_AES_AESDIO0R (STARFIVE_AES_REGS_OFFSET + 0x0) #define STARFIVE_AES_KEY0 (STARFIVE_AES_REGS_OFFSET + 0x4) #define STARFIVE_AES_KEY1 (STARFIVE_AES_REGS_OFFSET + 0x8) #define STARFIVE_AES_KEY2 (STARFIVE_AES_REGS_OFFSET + 0xC) #define STARFIVE_AES_KEY3 (STARFIVE_AES_REGS_OFFSET + 0x10) #define STARFIVE_AES_KEY4 (STARFIVE_AES_REGS_OFFSET + 0x14) #define STARFIVE_AES_KEY5 (STARFIVE_AES_REGS_OFFSET + 0x18) #define STARFIVE_AES_KEY6 (STARFIVE_AES_REGS_OFFSET + 0x1C) #define STARFIVE_AES_KEY7 (STARFIVE_AES_REGS_OFFSET + 0x20) #define STARFIVE_AES_CSR (STARFIVE_AES_REGS_OFFSET + 0x24) #define STARFIVE_AES_IV0 (STARFIVE_AES_REGS_OFFSET + 0x28) #define STARFIVE_AES_IV1 (STARFIVE_AES_REGS_OFFSET + 0x2C) #define STARFIVE_AES_IV2 (STARFIVE_AES_REGS_OFFSET + 0x30) #define STARFIVE_AES_IV3 (STARFIVE_AES_REGS_OFFSET + 0x34) #define STARFIVE_AES_NONCE0 (STARFIVE_AES_REGS_OFFSET + 0x3C) #define STARFIVE_AES_NONCE1 (STARFIVE_AES_REGS_OFFSET + 0x40) #define STARFIVE_AES_NONCE2 (STARFIVE_AES_REGS_OFFSET + 0x44) #define STARFIVE_AES_NONCE3 (STARFIVE_AES_REGS_OFFSET + 0x48) #define STARFIVE_AES_ALEN0 (STARFIVE_AES_REGS_OFFSET + 0x4C) #define STARFIVE_AES_ALEN1 (STARFIVE_AES_REGS_OFFSET + 0x50) #define STARFIVE_AES_MLEN0 (STARFIVE_AES_REGS_OFFSET + 0x54) #define STARFIVE_AES_MLEN1 (STARFIVE_AES_REGS_OFFSET + 0x58) #define STARFIVE_AES_IVLEN (STARFIVE_AES_REGS_OFFSET + 0x5C) #define FLG_MODE_MASK GENMASK(2, 0) #define FLG_ENCRYPT BIT(4) /* Misc */ #define CCM_B0_ADATA 0x40 #define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32)) static inline int starfive_aes_wait_busy(struct starfive_cryp_dev *cryp) { u32 status; return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status, !(status & STARFIVE_AES_BUSY), 10, 100000); } static inline int starfive_aes_wait_keydone(struct starfive_cryp_dev *cryp) { u32 status; return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status, (status & STARFIVE_AES_KEY_DONE), 10, 100000); } static inline int starfive_aes_wait_gcmdone(struct starfive_cryp_dev *cryp) { u32 status; return readl_relaxed_poll_timeout(cryp->base + STARFIVE_AES_CSR, status, (status & STARFIVE_AES_GCM_DONE), 10, 100000); } static inline int is_gcm(struct starfive_cryp_dev *cryp) { return (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_GCM; } static inline int is_encrypt(struct starfive_cryp_dev *cryp) { return cryp->flags & FLG_ENCRYPT; } static void starfive_aes_aead_hw_start(struct starfive_cryp_ctx *ctx, u32 hw_mode) { struct starfive_cryp_dev *cryp = ctx->cryp; unsigned int value; switch (hw_mode) { case STARFIVE_AES_MODE_GCM: value = readl(ctx->cryp->base + STARFIVE_AES_CSR); value |= STARFIVE_AES_GCM_START; writel(value, cryp->base + STARFIVE_AES_CSR); starfive_aes_wait_gcmdone(cryp); break; case STARFIVE_AES_MODE_CCM: value = readl(ctx->cryp->base + STARFIVE_AES_CSR); value |= STARFIVE_AES_CCM_START; writel(value, cryp->base + STARFIVE_AES_CSR); break; } } static inline void starfive_aes_set_ivlen(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; if (is_gcm(cryp)) writel(GCM_AES_IV_SIZE, cryp->base + STARFIVE_AES_IVLEN); else writel(AES_BLOCK_SIZE, cryp->base + STARFIVE_AES_IVLEN); } static inline void starfive_aes_set_alen(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; writel(upper_32_bits(cryp->assoclen), cryp->base + STARFIVE_AES_ALEN0); writel(lower_32_bits(cryp->assoclen), cryp->base + STARFIVE_AES_ALEN1); } static inline void starfive_aes_set_mlen(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; writel(upper_32_bits(cryp->total_in), cryp->base + STARFIVE_AES_MLEN0); writel(lower_32_bits(cryp->total_in), cryp->base + STARFIVE_AES_MLEN1); } static inline int starfive_aes_ccm_check_iv(const u8 *iv) { /* 2 <= L <= 8, so 1 <= L' <= 7. */ if (iv[0] < 1 || iv[0] > 7) return -EINVAL; return 0; } static int starfive_aes_write_iv(struct starfive_cryp_ctx *ctx, u32 *iv) { struct starfive_cryp_dev *cryp = ctx->cryp; writel(iv[0], cryp->base + STARFIVE_AES_IV0); writel(iv[1], cryp->base + STARFIVE_AES_IV1); writel(iv[2], cryp->base + STARFIVE_AES_IV2); if (is_gcm(cryp)) { if (starfive_aes_wait_gcmdone(cryp)) return -ETIMEDOUT; return 0; } writel(iv[3], cryp->base + STARFIVE_AES_IV3); return 0; } static inline void starfive_aes_get_iv(struct starfive_cryp_dev *cryp, u32 *iv) { iv[0] = readl(cryp->base + STARFIVE_AES_IV0); iv[1] = readl(cryp->base + STARFIVE_AES_IV1); iv[2] = readl(cryp->base + STARFIVE_AES_IV2); iv[3] = readl(cryp->base + STARFIVE_AES_IV3); } static inline void starfive_aes_write_nonce(struct starfive_cryp_ctx *ctx, u32 *nonce) { struct starfive_cryp_dev *cryp = ctx->cryp; writel(nonce[0], cryp->base + STARFIVE_AES_NONCE0); writel(nonce[1], cryp->base + STARFIVE_AES_NONCE1); writel(nonce[2], cryp->base + STARFIVE_AES_NONCE2); writel(nonce[3], cryp->base + STARFIVE_AES_NONCE3); } static int starfive_aes_write_key(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; u32 *key = (u32 *)ctx->key; if (ctx->keylen >= AES_KEYSIZE_128) { writel(key[0], cryp->base + STARFIVE_AES_KEY0); writel(key[1], cryp->base + STARFIVE_AES_KEY1); writel(key[2], cryp->base + STARFIVE_AES_KEY2); writel(key[3], cryp->base + STARFIVE_AES_KEY3); } if (ctx->keylen >= AES_KEYSIZE_192) { writel(key[4], cryp->base + STARFIVE_AES_KEY4); writel(key[5], cryp->base + STARFIVE_AES_KEY5); } if (ctx->keylen >= AES_KEYSIZE_256) { writel(key[6], cryp->base + STARFIVE_AES_KEY6); writel(key[7], cryp->base + STARFIVE_AES_KEY7); } if (starfive_aes_wait_keydone(cryp)) return -ETIMEDOUT; return 0; } static int starfive_aes_ccm_init(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; u8 iv[AES_BLOCK_SIZE], b0[AES_BLOCK_SIZE]; unsigned int textlen; memcpy(iv, cryp->req.areq->iv, AES_BLOCK_SIZE); memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1); /* Build B0 */ memcpy(b0, iv, AES_BLOCK_SIZE); b0[0] |= (8 * ((cryp->authsize - 2) / 2)); if (cryp->assoclen) b0[0] |= CCM_B0_ADATA; textlen = cryp->total_in; b0[AES_BLOCK_SIZE - 2] = textlen >> 8; b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF; starfive_aes_write_nonce(ctx, (u32 *)b0); return 0; } static int starfive_aes_hw_init(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_request_ctx *rctx = ctx->rctx; struct starfive_cryp_dev *cryp = ctx->cryp; u32 hw_mode; /* reset */ rctx->csr.aes.v = 0; rctx->csr.aes.aesrst = 1; writel(rctx->csr.aes.v, cryp->base + STARFIVE_AES_CSR); /* csr setup */ hw_mode = cryp->flags & FLG_MODE_MASK; rctx->csr.aes.v = 0; switch (ctx->keylen) { case AES_KEYSIZE_128: rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_128; break; case AES_KEYSIZE_192: rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_192; break; case AES_KEYSIZE_256: rctx->csr.aes.keymode = STARFIVE_AES_KEYMODE_256; break; } rctx->csr.aes.mode = hw_mode; rctx->csr.aes.cmode = !is_encrypt(cryp); rctx->csr.aes.ie = 1; if (hw_mode == STARFIVE_AES_MODE_CFB || hw_mode == STARFIVE_AES_MODE_OFB) rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_128; else rctx->csr.aes.stmode = STARFIVE_AES_MODE_XFB_1; if (cryp->side_chan) { rctx->csr.aes.delay_aes = 1; rctx->csr.aes.vaes_start = 1; } writel(rctx->csr.aes.v, cryp->base + STARFIVE_AES_CSR); cryp->err = starfive_aes_write_key(ctx); if (cryp->err) return cryp->err; switch (hw_mode) { case STARFIVE_AES_MODE_GCM: starfive_aes_set_alen(ctx); starfive_aes_set_mlen(ctx); starfive_aes_set_ivlen(ctx); starfive_aes_aead_hw_start(ctx, hw_mode); starfive_aes_write_iv(ctx, (void *)cryp->req.areq->iv); break; case STARFIVE_AES_MODE_CCM: starfive_aes_set_alen(ctx); starfive_aes_set_mlen(ctx); starfive_aes_ccm_init(ctx); starfive_aes_aead_hw_start(ctx, hw_mode); break; case STARFIVE_AES_MODE_OFB: case STARFIVE_AES_MODE_CFB: case STARFIVE_AES_MODE_CBC: case STARFIVE_AES_MODE_CTR: starfive_aes_write_iv(ctx, (void *)cryp->req.sreq->iv); break; default: break; } return cryp->err; } static int starfive_aes_read_authtag(struct starfive_cryp_dev *cryp) { int i, start_addr; if (starfive_aes_wait_busy(cryp)) return dev_err_probe(cryp->dev, -ETIMEDOUT, "Timeout waiting for tag generation."); start_addr = STARFIVE_AES_NONCE0; if (is_gcm(cryp)) for (i = 0; i < AES_BLOCK_32; i++, start_addr += 4) cryp->tag_out[i] = readl(cryp->base + start_addr); else for (i = 0; i < AES_BLOCK_32; i++) cryp->tag_out[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R); if (is_encrypt(cryp)) { scatterwalk_copychunks(cryp->tag_out, &cryp->out_walk, cryp->authsize, 1); } else { scatterwalk_copychunks(cryp->tag_in, &cryp->in_walk, cryp->authsize, 0); if (crypto_memneq(cryp->tag_in, cryp->tag_out, cryp->authsize)) return dev_err_probe(cryp->dev, -EBADMSG, "Failed tag verification\n"); } return 0; } static void starfive_aes_finish_req(struct starfive_cryp_dev *cryp) { union starfive_aes_csr csr; int err = cryp->err; if (!err && cryp->authsize) err = starfive_aes_read_authtag(cryp); if (!err && ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC || (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CTR)) starfive_aes_get_iv(cryp, (void *)cryp->req.sreq->iv); /* reset irq flags*/ csr.v = 0; csr.aesrst = 1; writel(csr.v, cryp->base + STARFIVE_AES_CSR); if (cryp->authsize) crypto_finalize_aead_request(cryp->engine, cryp->req.areq, err); else crypto_finalize_skcipher_request(cryp->engine, cryp->req.sreq, err); } void starfive_aes_done_task(unsigned long param) { struct starfive_cryp_dev *cryp = (struct starfive_cryp_dev *)param; u32 block[AES_BLOCK_32]; u32 stat; int i; for (i = 0; i < AES_BLOCK_32; i++) block[i] = readl(cryp->base + STARFIVE_AES_AESDIO0R); scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, AES_BLOCK_SIZE, cryp->total_out), 1); cryp->total_out -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_out); if (!cryp->total_out) { starfive_aes_finish_req(cryp); return; } memset(block, 0, AES_BLOCK_SIZE); scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE, cryp->total_in), 0); cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in); for (i = 0; i < AES_BLOCK_32; i++) writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R); stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); stat &= ~STARFIVE_IE_MASK_AES_DONE; writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); } static int starfive_aes_gcm_write_adata(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; struct starfive_cryp_request_ctx *rctx = ctx->rctx; u32 *buffer; int total_len, loop; total_len = ALIGN(cryp->assoclen, AES_BLOCK_SIZE) / sizeof(unsigned int); buffer = (u32 *)rctx->adata; for (loop = 0; loop < total_len; loop += 4) { writel(*buffer, cryp->base + STARFIVE_AES_NONCE0); buffer++; writel(*buffer, cryp->base + STARFIVE_AES_NONCE1); buffer++; writel(*buffer, cryp->base + STARFIVE_AES_NONCE2); buffer++; writel(*buffer, cryp->base + STARFIVE_AES_NONCE3); buffer++; } if (starfive_aes_wait_gcmdone(cryp)) return dev_err_probe(cryp->dev, -ETIMEDOUT, "Timeout processing gcm aad block"); return 0; } static int starfive_aes_ccm_write_adata(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; struct starfive_cryp_request_ctx *rctx = ctx->rctx; u32 *buffer; u8 *ci; int total_len, loop; total_len = cryp->assoclen; ci = rctx->adata; writeb(*ci, cryp->base + STARFIVE_AES_AESDIO0R); ci++; writeb(*ci, cryp->base + STARFIVE_AES_AESDIO0R); ci++; total_len -= 2; buffer = (u32 *)ci; for (loop = 0; loop < 3; loop++, buffer++) writel(*buffer, cryp->base + STARFIVE_AES_AESDIO0R); total_len -= 12; while (total_len > 0) { for (loop = 0; loop < AES_BLOCK_32; loop++, buffer++) writel(*buffer, cryp->base + STARFIVE_AES_AESDIO0R); total_len -= AES_BLOCK_SIZE; } if (starfive_aes_wait_busy(cryp)) return dev_err_probe(cryp->dev, -ETIMEDOUT, "Timeout processing ccm aad block"); return 0; } static int starfive_aes_prepare_req(struct skcipher_request *req, struct aead_request *areq) { struct starfive_cryp_ctx *ctx; struct starfive_cryp_request_ctx *rctx; struct starfive_cryp_dev *cryp; if (!req && !areq) return -EINVAL; ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) : crypto_aead_ctx(crypto_aead_reqtfm(areq)); cryp = ctx->cryp; rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq); if (req) { cryp->req.sreq = req; cryp->total_in = req->cryptlen; cryp->total_out = req->cryptlen; cryp->assoclen = 0; cryp->authsize = 0; } else { cryp->req.areq = areq; cryp->assoclen = areq->assoclen; cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq)); if (is_encrypt(cryp)) { cryp->total_in = areq->cryptlen; cryp->total_out = areq->cryptlen; } else { cryp->total_in = areq->cryptlen - cryp->authsize; cryp->total_out = cryp->total_in; } } rctx->in_sg = req ? req->src : areq->src; scatterwalk_start(&cryp->in_walk, rctx->in_sg); rctx->out_sg = req ? req->dst : areq->dst; scatterwalk_start(&cryp->out_walk, rctx->out_sg); if (cryp->assoclen) { rctx->adata = kzalloc(ALIGN(cryp->assoclen, AES_BLOCK_SIZE), GFP_KERNEL); if (!rctx->adata) return dev_err_probe(cryp->dev, -ENOMEM, "Failed to alloc memory for adata"); scatterwalk_copychunks(rctx->adata, &cryp->in_walk, cryp->assoclen, 0); scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->assoclen, 2); } ctx->rctx = rctx; return starfive_aes_hw_init(ctx); } static int starfive_aes_do_one_req(struct crypto_engine *engine, void *areq) { struct skcipher_request *req = container_of(areq, struct skcipher_request, base); struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); struct starfive_cryp_dev *cryp = ctx->cryp; u32 block[AES_BLOCK_32]; u32 stat; int err; int i; err = starfive_aes_prepare_req(req, NULL); if (err) return err; /* * Write first plain/ciphertext block to start the module * then let irq tasklet handle the rest of the data blocks. */ scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE, cryp->total_in), 0); cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in); for (i = 0; i < AES_BLOCK_32; i++) writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R); stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); stat &= ~STARFIVE_IE_MASK_AES_DONE; writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); return 0; } static int starfive_aes_init_tfm(struct crypto_skcipher *tfm) { struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); ctx->cryp = starfive_cryp_find_dev(ctx); if (!ctx->cryp) return -ENODEV; crypto_skcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) + sizeof(struct skcipher_request)); return 0; } static int starfive_aes_aead_do_one_req(struct crypto_engine *engine, void *areq) { struct aead_request *req = container_of(areq, struct aead_request, base); struct starfive_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct starfive_cryp_dev *cryp = ctx->cryp; struct starfive_cryp_request_ctx *rctx = ctx->rctx; u32 block[AES_BLOCK_32]; u32 stat; int err; int i; err = starfive_aes_prepare_req(NULL, req); if (err) return err; if (!cryp->assoclen) goto write_text; if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CCM) cryp->err = starfive_aes_ccm_write_adata(ctx); else cryp->err = starfive_aes_gcm_write_adata(ctx); kfree(rctx->adata); if (cryp->err) return cryp->err; write_text: if (!cryp->total_in) goto finish_req; /* * Write first plain/ciphertext block to start the module * then let irq tasklet handle the rest of the data blocks. */ scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, AES_BLOCK_SIZE, cryp->total_in), 0); cryp->total_in -= min_t(size_t, AES_BLOCK_SIZE, cryp->total_in); for (i = 0; i < AES_BLOCK_32; i++) writel(block[i], cryp->base + STARFIVE_AES_AESDIO0R); stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); stat &= ~STARFIVE_IE_MASK_AES_DONE; writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); return 0; finish_req: starfive_aes_finish_req(cryp); return 0; } static int starfive_aes_aead_init_tfm(struct crypto_aead *tfm) { struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm); struct starfive_cryp_dev *cryp = ctx->cryp; struct crypto_tfm *aead = crypto_aead_tfm(tfm); struct crypto_alg *alg = aead->__crt_alg; ctx->cryp = starfive_cryp_find_dev(ctx); if (!ctx->cryp) return -ENODEV; if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { ctx->aead_fbk = crypto_alloc_aead(alg->cra_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->aead_fbk)) return dev_err_probe(cryp->dev, PTR_ERR(ctx->aead_fbk), "%s() failed to allocate fallback for %s\n", __func__, alg->cra_name); } crypto_aead_set_reqsize(tfm, sizeof(struct starfive_cryp_ctx) + sizeof(struct aead_request)); return 0; } static void starfive_aes_aead_exit_tfm(struct crypto_aead *tfm) { struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->aead_fbk); } static int starfive_aes_crypt(struct skcipher_request *req, unsigned long flags) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); struct starfive_cryp_dev *cryp = ctx->cryp; unsigned int blocksize_align = crypto_skcipher_blocksize(tfm) - 1; cryp->flags = flags; if ((cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_ECB || (cryp->flags & FLG_MODE_MASK) == STARFIVE_AES_MODE_CBC) if (req->cryptlen & blocksize_align) return -EINVAL; return crypto_transfer_skcipher_request_to_engine(cryp->engine, req); } static int starfive_aes_aead_crypt(struct aead_request *req, unsigned long flags) { struct starfive_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct starfive_cryp_dev *cryp = ctx->cryp; cryp->flags = flags; /* * HW engine could not perform CCM tag verification on * non-blocksize aligned text, use fallback algo instead */ if (ctx->aead_fbk && !is_encrypt(cryp)) { struct aead_request *subreq = aead_request_ctx(req); aead_request_set_tfm(subreq, ctx->aead_fbk); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(subreq, req->assoclen); return crypto_aead_decrypt(subreq); } return crypto_transfer_aead_request_to_engine(cryp->engine, req); } static int starfive_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct starfive_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); if (!key || !keylen) return -EINVAL; if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) return -EINVAL; memcpy(ctx->key, key, keylen); ctx->keylen = keylen; return 0; } static int starfive_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm); if (!key || !keylen) return -EINVAL; if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) return -EINVAL; memcpy(ctx->key, key, keylen); ctx->keylen = keylen; if (ctx->aead_fbk) return crypto_aead_setkey(ctx->aead_fbk, key, keylen); return 0; } static int starfive_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { return crypto_gcm_check_authsize(authsize); } static int starfive_aes_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { struct starfive_cryp_ctx *ctx = crypto_aead_ctx(tfm); switch (authsize) { case 4: case 6: case 8: case 10: case 12: case 14: case 16: break; default: return -EINVAL; } return crypto_aead_setauthsize(ctx->aead_fbk, authsize); } static int starfive_aes_ecb_encrypt(struct skcipher_request *req) { return starfive_aes_crypt(req, STARFIVE_AES_MODE_ECB | FLG_ENCRYPT); } static int starfive_aes_ecb_decrypt(struct skcipher_request *req) { return starfive_aes_crypt(req, STARFIVE_AES_MODE_ECB); } static int starfive_aes_cbc_encrypt(struct skcipher_request *req) { return starfive_aes_crypt(req, STARFIVE_AES_MODE_CBC | FLG_ENCRYPT); } static int starfive_aes_cbc_decrypt(struct skcipher_request *req) { return starfive_aes_crypt(req, STARFIVE_AES_MODE_CBC); } static int starfive_aes_cfb_encrypt(struct skcipher_request *req) { return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB | FLG_ENCRYPT); } static int starfive_aes_cfb_decrypt(struct skcipher_request *req) { return starfive_aes_crypt(req, STARFIVE_AES_MODE_CFB); } static int starfive_aes_ofb_encrypt(struct skcipher_request *req) { return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB | FLG_ENCRYPT); } static int starfive_aes_ofb_decrypt(struct skcipher_request *req) { return starfive_aes_crypt(req, STARFIVE_AES_MODE_OFB); } static int starfive_aes_ctr_encrypt(struct skcipher_request *req) { return starfive_aes_crypt(req, STARFIVE_AES_MODE_CTR | FLG_ENCRYPT); } static int starfive_aes_ctr_decrypt(struct skcipher_request *req) { return starfive_aes_crypt(req, STARFIVE_AES_MODE_CTR); } static int starfive_aes_gcm_encrypt(struct aead_request *req) { return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_GCM | FLG_ENCRYPT); } static int starfive_aes_gcm_decrypt(struct aead_request *req) { return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_GCM); } static int starfive_aes_ccm_encrypt(struct aead_request *req) { int ret; ret = starfive_aes_ccm_check_iv(req->iv); if (ret) return ret; return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM | FLG_ENCRYPT); } static int starfive_aes_ccm_decrypt(struct aead_request *req) { int ret; ret = starfive_aes_ccm_check_iv(req->iv); if (ret) return ret; return starfive_aes_aead_crypt(req, STARFIVE_AES_MODE_CCM); } static struct skcipher_engine_alg skcipher_algs[] = { { .base.init = starfive_aes_init_tfm, .base.setkey = starfive_aes_setkey, .base.encrypt = starfive_aes_ecb_encrypt, .base.decrypt = starfive_aes_ecb_decrypt, .base.min_keysize = AES_MIN_KEY_SIZE, .base.max_keysize = AES_MAX_KEY_SIZE, .base.base = { .cra_name = "ecb(aes)", .cra_driver_name = "starfive-ecb-aes", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, .op = { .do_one_request = starfive_aes_do_one_req, }, }, { .base.init = starfive_aes_init_tfm, .base.setkey = starfive_aes_setkey, .base.encrypt = starfive_aes_cbc_encrypt, .base.decrypt = starfive_aes_cbc_decrypt, .base.min_keysize = AES_MIN_KEY_SIZE, .base.max_keysize = AES_MAX_KEY_SIZE, .base.ivsize = AES_BLOCK_SIZE, .base.base = { .cra_name = "cbc(aes)", .cra_driver_name = "starfive-cbc-aes", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, .op = { .do_one_request = starfive_aes_do_one_req, }, }, { .base.init = starfive_aes_init_tfm, .base.setkey = starfive_aes_setkey, .base.encrypt = starfive_aes_ctr_encrypt, .base.decrypt = starfive_aes_ctr_decrypt, .base.min_keysize = AES_MIN_KEY_SIZE, .base.max_keysize = AES_MAX_KEY_SIZE, .base.ivsize = AES_BLOCK_SIZE, .base.base = { .cra_name = "ctr(aes)", .cra_driver_name = "starfive-ctr-aes", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, .op = { .do_one_request = starfive_aes_do_one_req, }, }, { .base.init = starfive_aes_init_tfm, .base.setkey = starfive_aes_setkey, .base.encrypt = starfive_aes_cfb_encrypt, .base.decrypt = starfive_aes_cfb_decrypt, .base.min_keysize = AES_MIN_KEY_SIZE, .base.max_keysize = AES_MAX_KEY_SIZE, .base.ivsize = AES_BLOCK_SIZE, .base.base = { .cra_name = "cfb(aes)", .cra_driver_name = "starfive-cfb-aes", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, .op = { .do_one_request = starfive_aes_do_one_req, }, }, { .base.init = starfive_aes_init_tfm, .base.setkey = starfive_aes_setkey, .base.encrypt = starfive_aes_ofb_encrypt, .base.decrypt = starfive_aes_ofb_decrypt, .base.min_keysize = AES_MIN_KEY_SIZE, .base.max_keysize = AES_MAX_KEY_SIZE, .base.ivsize = AES_BLOCK_SIZE, .base.base = { .cra_name = "ofb(aes)", .cra_driver_name = "starfive-ofb-aes", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, .op = { .do_one_request = starfive_aes_do_one_req, }, }, }; static struct aead_engine_alg aead_algs[] = { { .base.setkey = starfive_aes_aead_setkey, .base.setauthsize = starfive_aes_gcm_setauthsize, .base.encrypt = starfive_aes_gcm_encrypt, .base.decrypt = starfive_aes_gcm_decrypt, .base.init = starfive_aes_aead_init_tfm, .base.exit = starfive_aes_aead_exit_tfm, .base.ivsize = GCM_AES_IV_SIZE, .base.maxauthsize = AES_BLOCK_SIZE, .base.base = { .cra_name = "gcm(aes)", .cra_driver_name = "starfive-gcm-aes", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, .op = { .do_one_request = starfive_aes_aead_do_one_req, }, }, { .base.setkey = starfive_aes_aead_setkey, .base.setauthsize = starfive_aes_ccm_setauthsize, .base.encrypt = starfive_aes_ccm_encrypt, .base.decrypt = starfive_aes_ccm_decrypt, .base.init = starfive_aes_aead_init_tfm, .base.exit = starfive_aes_aead_exit_tfm, .base.ivsize = AES_BLOCK_SIZE, .base.maxauthsize = AES_BLOCK_SIZE, .base.base = { .cra_name = "ccm(aes)", .cra_driver_name = "starfive-ccm-aes", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), .cra_alignmask = 0xf, .cra_module = THIS_MODULE, }, .op = { .do_one_request = starfive_aes_aead_do_one_req, }, }, }; int starfive_aes_register_algs(void) { int ret; ret = crypto_engine_register_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); if (ret) return ret; ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); if (ret) crypto_engine_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); return ret; } void starfive_aes_unregister_algs(void) { crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs)); crypto_engine_unregister_skciphers(skcipher_algs, ARRAY_SIZE(skcipher_algs)); }
linux-master
drivers/crypto/starfive/jh7110-aes.c
// SPDX-License-Identifier: GPL-2.0 /* * StarFive Public Key Algo acceleration driver * * Copyright (c) 2022 StarFive Technology */ #include <linux/crypto.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/dma-direct.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/io.h> #include <linux/mod_devicetable.h> #include <crypto/akcipher.h> #include <crypto/algapi.h> #include <crypto/internal/akcipher.h> #include <crypto/internal/rsa.h> #include <crypto/scatterwalk.h> #include "jh7110-cryp.h" #define STARFIVE_PKA_REGS_OFFSET 0x400 #define STARFIVE_PKA_CACR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x0) #define STARFIVE_PKA_CASR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x4) #define STARFIVE_PKA_CAAR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x8) #define STARFIVE_PKA_CAER_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x108) #define STARFIVE_PKA_CANR_OFFSET (STARFIVE_PKA_REGS_OFFSET + 0x208) // R^2 mod N and N0' #define CRYPTO_CMD_PRE 0x0 // A * R mod N ==> A #define CRYPTO_CMD_ARN 0x5 // A * E * R mod N ==> A #define CRYPTO_CMD_AERN 0x6 // A * A * R mod N ==> A #define CRYPTO_CMD_AARN 0x7 #define STARFIVE_RSA_MAX_KEYSZ 256 #define STARFIVE_RSA_RESET 0x2 static inline int starfive_pka_wait_done(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; return wait_for_completion_timeout(&cryp->pka_done, usecs_to_jiffies(100000)); } static inline void starfive_pka_irq_mask_clear(struct starfive_cryp_ctx *ctx) { struct starfive_cryp_dev *cryp = ctx->cryp; u32 stat; stat = readl(cryp->base + STARFIVE_IE_MASK_OFFSET); stat &= ~STARFIVE_IE_MASK_PKA_DONE; writel(stat, cryp->base + STARFIVE_IE_MASK_OFFSET); reinit_completion(&cryp->pka_done); } static void starfive_rsa_free_key(struct starfive_rsa_key *key) { if (key->d) kfree_sensitive(key->d); if (key->e) kfree_sensitive(key->e); if (key->n) kfree_sensitive(key->n); memset(key, 0, sizeof(*key)); } static unsigned int starfive_rsa_get_nbit(u8 *pa, u32 snum, int key_sz) { u32 i; u8 value; i = snum >> 3; value = pa[key_sz - i - 1]; value >>= snum & 0x7; value &= 0x1; return value; } static int starfive_rsa_montgomery_form(struct starfive_cryp_ctx *ctx, u32 *out, u32 *in, u8 mont, u32 *mod, int bit_len) { struct starfive_cryp_dev *cryp = ctx->cryp; struct starfive_cryp_request_ctx *rctx = ctx->rctx; int count = rctx->total / sizeof(u32) - 1; int loop; u32 temp; u8 opsize; opsize = (bit_len - 1) >> 5; rctx->csr.pka.v = 0; writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); for (loop = 0; loop <= opsize; loop++) writel(mod[opsize - loop], cryp->base + STARFIVE_PKA_CANR_OFFSET + loop * 4); if (mont) { rctx->csr.pka.v = 0; rctx->csr.pka.cln_done = 1; rctx->csr.pka.opsize = opsize; rctx->csr.pka.exposize = opsize; rctx->csr.pka.cmd = CRYPTO_CMD_PRE; rctx->csr.pka.start = 1; rctx->csr.pka.not_r2 = 1; rctx->csr.pka.ie = 1; starfive_pka_irq_mask_clear(ctx); writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); if (!starfive_pka_wait_done(ctx)) return -ETIMEDOUT; for (loop = 0; loop <= opsize; loop++) writel(in[opsize - loop], cryp->base + STARFIVE_PKA_CAAR_OFFSET + loop * 4); writel(0x1000000, cryp->base + STARFIVE_PKA_CAER_OFFSET); for (loop = 1; loop <= opsize; loop++) writel(0, cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4); rctx->csr.pka.v = 0; rctx->csr.pka.cln_done = 1; rctx->csr.pka.opsize = opsize; rctx->csr.pka.exposize = opsize; rctx->csr.pka.cmd = CRYPTO_CMD_AERN; rctx->csr.pka.start = 1; rctx->csr.pka.ie = 1; starfive_pka_irq_mask_clear(ctx); writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); if (!starfive_pka_wait_done(ctx)) return -ETIMEDOUT; } else { rctx->csr.pka.v = 0; rctx->csr.pka.cln_done = 1; rctx->csr.pka.opsize = opsize; rctx->csr.pka.exposize = opsize; rctx->csr.pka.cmd = CRYPTO_CMD_PRE; rctx->csr.pka.start = 1; rctx->csr.pka.pre_expf = 1; rctx->csr.pka.ie = 1; starfive_pka_irq_mask_clear(ctx); writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); if (!starfive_pka_wait_done(ctx)) return -ETIMEDOUT; for (loop = 0; loop <= count; loop++) writel(in[count - loop], cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4); /*pad with 0 up to opsize*/ for (loop = count + 1; loop <= opsize; loop++) writel(0, cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4); rctx->csr.pka.v = 0; rctx->csr.pka.cln_done = 1; rctx->csr.pka.opsize = opsize; rctx->csr.pka.exposize = opsize; rctx->csr.pka.cmd = CRYPTO_CMD_ARN; rctx->csr.pka.start = 1; rctx->csr.pka.ie = 1; starfive_pka_irq_mask_clear(ctx); writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); if (!starfive_pka_wait_done(ctx)) return -ETIMEDOUT; } for (loop = 0; loop <= opsize; loop++) { temp = readl(cryp->base + STARFIVE_PKA_CAAR_OFFSET + 0x4 * loop); out[opsize - loop] = temp; } return 0; } static int starfive_rsa_cpu_start(struct starfive_cryp_ctx *ctx, u32 *result, u8 *de, u32 *n, int key_sz) { struct starfive_cryp_dev *cryp = ctx->cryp; struct starfive_cryp_request_ctx *rctx = ctx->rctx; struct starfive_rsa_key *key = &ctx->rsa_key; u32 temp; int ret = 0; int opsize, mlen, loop; unsigned int *mta; opsize = (key_sz - 1) >> 2; mta = kmalloc(key_sz, GFP_KERNEL); if (!mta) return -ENOMEM; ret = starfive_rsa_montgomery_form(ctx, mta, (u32 *)rctx->rsa_data, 0, n, key_sz << 3); if (ret) { dev_err_probe(cryp->dev, ret, "Conversion to Montgomery failed"); goto rsa_err; } for (loop = 0; loop <= opsize; loop++) writel(mta[opsize - loop], cryp->base + STARFIVE_PKA_CAER_OFFSET + loop * 4); for (loop = key->bitlen - 1; loop > 0; loop--) { mlen = starfive_rsa_get_nbit(de, loop - 1, key_sz); rctx->csr.pka.v = 0; rctx->csr.pka.cln_done = 1; rctx->csr.pka.opsize = opsize; rctx->csr.pka.exposize = opsize; rctx->csr.pka.cmd = CRYPTO_CMD_AARN; rctx->csr.pka.start = 1; rctx->csr.pka.ie = 1; starfive_pka_irq_mask_clear(ctx); writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); ret = -ETIMEDOUT; if (!starfive_pka_wait_done(ctx)) goto rsa_err; if (mlen) { rctx->csr.pka.v = 0; rctx->csr.pka.cln_done = 1; rctx->csr.pka.opsize = opsize; rctx->csr.pka.exposize = opsize; rctx->csr.pka.cmd = CRYPTO_CMD_AERN; rctx->csr.pka.start = 1; rctx->csr.pka.ie = 1; starfive_pka_irq_mask_clear(ctx); writel(rctx->csr.pka.v, cryp->base + STARFIVE_PKA_CACR_OFFSET); if (!starfive_pka_wait_done(ctx)) goto rsa_err; } } for (loop = 0; loop <= opsize; loop++) { temp = readl(cryp->base + STARFIVE_PKA_CAAR_OFFSET + 0x4 * loop); result[opsize - loop] = temp; } ret = starfive_rsa_montgomery_form(ctx, result, result, 1, n, key_sz << 3); if (ret) dev_err_probe(cryp->dev, ret, "Conversion from Montgomery failed"); rsa_err: kfree(mta); return ret; } static int starfive_rsa_start(struct starfive_cryp_ctx *ctx, u8 *result, u8 *de, u8 *n, int key_sz) { return starfive_rsa_cpu_start(ctx, (u32 *)result, de, (u32 *)n, key_sz); } static int starfive_rsa_enc_core(struct starfive_cryp_ctx *ctx, int enc) { struct starfive_cryp_dev *cryp = ctx->cryp; struct starfive_cryp_request_ctx *rctx = ctx->rctx; struct starfive_rsa_key *key = &ctx->rsa_key; int ret = 0; writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET); rctx->total = sg_copy_to_buffer(rctx->in_sg, rctx->nents, rctx->rsa_data, rctx->total); if (enc) { key->bitlen = key->e_bitlen; ret = starfive_rsa_start(ctx, rctx->rsa_data, key->e, key->n, key->key_sz); } else { key->bitlen = key->d_bitlen; ret = starfive_rsa_start(ctx, rctx->rsa_data, key->d, key->n, key->key_sz); } if (ret) goto err_rsa_crypt; sg_copy_buffer(rctx->out_sg, sg_nents(rctx->out_sg), rctx->rsa_data, key->key_sz, 0, 0); err_rsa_crypt: writel(STARFIVE_RSA_RESET, cryp->base + STARFIVE_PKA_CACR_OFFSET); kfree(rctx->rsa_data); return ret; } static int starfive_rsa_enc(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); struct starfive_cryp_dev *cryp = ctx->cryp; struct starfive_rsa_key *key = &ctx->rsa_key; struct starfive_cryp_request_ctx *rctx = akcipher_request_ctx(req); int ret; if (!key->key_sz) { akcipher_request_set_tfm(req, ctx->akcipher_fbk); ret = crypto_akcipher_encrypt(req); akcipher_request_set_tfm(req, tfm); return ret; } if (unlikely(!key->n || !key->e)) return -EINVAL; if (req->dst_len < key->key_sz) return dev_err_probe(cryp->dev, -EOVERFLOW, "Output buffer length less than parameter n\n"); rctx->in_sg = req->src; rctx->out_sg = req->dst; rctx->total = req->src_len; rctx->nents = sg_nents(rctx->in_sg); ctx->rctx = rctx; return starfive_rsa_enc_core(ctx, 1); } static int starfive_rsa_dec(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); struct starfive_cryp_dev *cryp = ctx->cryp; struct starfive_rsa_key *key = &ctx->rsa_key; struct starfive_cryp_request_ctx *rctx = akcipher_request_ctx(req); int ret; if (!key->key_sz) { akcipher_request_set_tfm(req, ctx->akcipher_fbk); ret = crypto_akcipher_decrypt(req); akcipher_request_set_tfm(req, tfm); return ret; } if (unlikely(!key->n || !key->d)) return -EINVAL; if (req->dst_len < key->key_sz) return dev_err_probe(cryp->dev, -EOVERFLOW, "Output buffer length less than parameter n\n"); rctx->in_sg = req->src; rctx->out_sg = req->dst; ctx->rctx = rctx; rctx->total = req->src_len; return starfive_rsa_enc_core(ctx, 0); } static int starfive_rsa_set_n(struct starfive_rsa_key *rsa_key, const char *value, size_t vlen) { const char *ptr = value; unsigned int bitslen; int ret; while (!*ptr && vlen) { ptr++; vlen--; } rsa_key->key_sz = vlen; bitslen = rsa_key->key_sz << 3; /* check valid key size */ if (bitslen & 0x1f) return -EINVAL; ret = -ENOMEM; rsa_key->n = kmemdup(ptr, rsa_key->key_sz, GFP_KERNEL); if (!rsa_key->n) goto err; return 0; err: rsa_key->key_sz = 0; rsa_key->n = NULL; starfive_rsa_free_key(rsa_key); return ret; } static int starfive_rsa_set_e(struct starfive_rsa_key *rsa_key, const char *value, size_t vlen) { const char *ptr = value; unsigned char pt; int loop; while (!*ptr && vlen) { ptr++; vlen--; } pt = *ptr; if (!rsa_key->key_sz || !vlen || vlen > rsa_key->key_sz) { rsa_key->e = NULL; return -EINVAL; } rsa_key->e = kzalloc(rsa_key->key_sz, GFP_KERNEL); if (!rsa_key->e) return -ENOMEM; for (loop = 8; loop > 0; loop--) { if (pt >> (loop - 1)) break; } rsa_key->e_bitlen = (vlen - 1) * 8 + loop; memcpy(rsa_key->e + (rsa_key->key_sz - vlen), ptr, vlen); return 0; } static int starfive_rsa_set_d(struct starfive_rsa_key *rsa_key, const char *value, size_t vlen) { const char *ptr = value; unsigned char pt; int loop; int ret; while (!*ptr && vlen) { ptr++; vlen--; } pt = *ptr; ret = -EINVAL; if (!rsa_key->key_sz || !vlen || vlen > rsa_key->key_sz) goto err; ret = -ENOMEM; rsa_key->d = kzalloc(rsa_key->key_sz, GFP_KERNEL); if (!rsa_key->d) goto err; for (loop = 8; loop > 0; loop--) { if (pt >> (loop - 1)) break; } rsa_key->d_bitlen = (vlen - 1) * 8 + loop; memcpy(rsa_key->d + (rsa_key->key_sz - vlen), ptr, vlen); return 0; err: rsa_key->d = NULL; return ret; } static int starfive_rsa_setkey(struct crypto_akcipher *tfm, const void *key, unsigned int keylen, bool private) { struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); struct rsa_key raw_key = {NULL}; struct starfive_rsa_key *rsa_key = &ctx->rsa_key; int ret; if (private) ret = rsa_parse_priv_key(&raw_key, key, keylen); else ret = rsa_parse_pub_key(&raw_key, key, keylen); if (ret < 0) goto err; starfive_rsa_free_key(rsa_key); /* Use fallback for mod > 256 + 1 byte prefix */ if (raw_key.n_sz > STARFIVE_RSA_MAX_KEYSZ + 1) return 0; ret = starfive_rsa_set_n(rsa_key, raw_key.n, raw_key.n_sz); if (ret) return ret; ret = starfive_rsa_set_e(rsa_key, raw_key.e, raw_key.e_sz); if (ret) goto err; if (private) { ret = starfive_rsa_set_d(rsa_key, raw_key.d, raw_key.d_sz); if (ret) goto err; } if (!rsa_key->n || !rsa_key->e) { ret = -EINVAL; goto err; } if (private && !rsa_key->d) { ret = -EINVAL; goto err; } return 0; err: starfive_rsa_free_key(rsa_key); return ret; } static int starfive_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); int ret; ret = crypto_akcipher_set_pub_key(ctx->akcipher_fbk, key, keylen); if (ret) return ret; return starfive_rsa_setkey(tfm, key, keylen, false); } static int starfive_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); int ret; ret = crypto_akcipher_set_priv_key(ctx->akcipher_fbk, key, keylen); if (ret) return ret; return starfive_rsa_setkey(tfm, key, keylen, true); } static unsigned int starfive_rsa_max_size(struct crypto_akcipher *tfm) { struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); if (ctx->rsa_key.key_sz) return ctx->rsa_key.key_sz; return crypto_akcipher_maxsize(ctx->akcipher_fbk); } static int starfive_rsa_init_tfm(struct crypto_akcipher *tfm) { struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); ctx->akcipher_fbk = crypto_alloc_akcipher("rsa-generic", 0, 0); if (IS_ERR(ctx->akcipher_fbk)) return PTR_ERR(ctx->akcipher_fbk); ctx->cryp = starfive_cryp_find_dev(ctx); if (!ctx->cryp) { crypto_free_akcipher(ctx->akcipher_fbk); return -ENODEV; } akcipher_set_reqsize(tfm, sizeof(struct starfive_cryp_request_ctx) + sizeof(struct crypto_akcipher) + 32); return 0; } static void starfive_rsa_exit_tfm(struct crypto_akcipher *tfm) { struct starfive_cryp_ctx *ctx = akcipher_tfm_ctx(tfm); struct starfive_rsa_key *key = (struct starfive_rsa_key *)&ctx->rsa_key; crypto_free_akcipher(ctx->akcipher_fbk); starfive_rsa_free_key(key); } static struct akcipher_alg starfive_rsa = { .encrypt = starfive_rsa_enc, .decrypt = starfive_rsa_dec, .sign = starfive_rsa_dec, .verify = starfive_rsa_enc, .set_pub_key = starfive_rsa_set_pub_key, .set_priv_key = starfive_rsa_set_priv_key, .max_size = starfive_rsa_max_size, .init = starfive_rsa_init_tfm, .exit = starfive_rsa_exit_tfm, .base = { .cra_name = "rsa", .cra_driver_name = "starfive-rsa", .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_priority = 3000, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct starfive_cryp_ctx), }, }; int starfive_rsa_register_algs(void) { return crypto_register_akcipher(&starfive_rsa); } void starfive_rsa_unregister_algs(void) { crypto_unregister_akcipher(&starfive_rsa); }
linux-master
drivers/crypto/starfive/jh7110-rsa.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Platform Security Processor (PSP) interface * * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. * * Author: Brijesh Singh <[email protected]> */ #include <linux/kernel.h> #include <linux/irqreturn.h> #include "sp-dev.h" #include "psp-dev.h" #include "sev-dev.h" #include "tee-dev.h" #include "platform-access.h" #include "dbc.h" struct psp_device *psp_master; static struct psp_device *psp_alloc_struct(struct sp_device *sp) { struct device *dev = sp->dev; struct psp_device *psp; psp = devm_kzalloc(dev, sizeof(*psp), GFP_KERNEL); if (!psp) return NULL; psp->dev = dev; psp->sp = sp; snprintf(psp->name, sizeof(psp->name), "psp-%u", sp->ord); return psp; } static irqreturn_t psp_irq_handler(int irq, void *data) { struct psp_device *psp = data; unsigned int status; /* Read the interrupt status: */ status = ioread32(psp->io_regs + psp->vdata->intsts_reg); /* Clear the interrupt status by writing the same value we read. */ iowrite32(status, psp->io_regs + psp->vdata->intsts_reg); /* invoke subdevice interrupt handlers */ if (status) { if (psp->sev_irq_handler) psp->sev_irq_handler(irq, psp->sev_irq_data, status); } return IRQ_HANDLED; } static unsigned int psp_get_capability(struct psp_device *psp) { unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg); /* * Check for a access to the registers. If this read returns * 0xffffffff, it's likely that the system is running a broken * BIOS which disallows access to the device. Stop here and * fail the PSP initialization (but not the load, as the CCP * could get properly initialized). */ if (val == 0xffffffff) { dev_notice(psp->dev, "psp: unable to access the device: you might be running a broken BIOS.\n"); return -ENODEV; } psp->capability = val; /* Detect if TSME and SME are both enabled */ if (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING && psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET) && cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) dev_notice(psp->dev, "psp: Both TSME and SME are active, SME is unnecessary when TSME is active.\n"); return 0; } static int psp_check_sev_support(struct psp_device *psp) { /* Check if device supports SEV feature */ if (!(psp->capability & PSP_CAPABILITY_SEV)) { dev_dbg(psp->dev, "psp does not support SEV\n"); return -ENODEV; } return 0; } static int psp_check_tee_support(struct psp_device *psp) { /* Check if device supports TEE feature */ if (!(psp->capability & PSP_CAPABILITY_TEE)) { dev_dbg(psp->dev, "psp does not support TEE\n"); return -ENODEV; } return 0; } static void psp_init_platform_access(struct psp_device *psp) { int ret; ret = platform_access_dev_init(psp); if (ret) { dev_warn(psp->dev, "platform access init failed: %d\n", ret); return; } /* dbc must come after platform access as it tests the feature */ ret = dbc_dev_init(psp); if (ret) dev_warn(psp->dev, "failed to init dynamic boost control: %d\n", ret); } static int psp_init(struct psp_device *psp) { int ret; if (!psp_check_sev_support(psp)) { ret = sev_dev_init(psp); if (ret) return ret; } if (!psp_check_tee_support(psp)) { ret = tee_dev_init(psp); if (ret) return ret; } if (psp->vdata->platform_access) psp_init_platform_access(psp); return 0; } int psp_dev_init(struct sp_device *sp) { struct device *dev = sp->dev; struct psp_device *psp; int ret; ret = -ENOMEM; psp = psp_alloc_struct(sp); if (!psp) goto e_err; sp->psp_data = psp; psp->vdata = (struct psp_vdata *)sp->dev_vdata->psp_vdata; if (!psp->vdata) { ret = -ENODEV; dev_err(dev, "missing driver data\n"); goto e_err; } psp->io_regs = sp->io_map; ret = psp_get_capability(psp); if (ret) goto e_disable; /* Disable and clear interrupts until ready */ iowrite32(0, psp->io_regs + psp->vdata->inten_reg); iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg); /* Request an irq */ ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); if (ret) { dev_err(dev, "psp: unable to allocate an IRQ\n"); goto e_err; } /* master device must be set for platform access */ if (psp->sp->set_psp_master_device) psp->sp->set_psp_master_device(psp->sp); ret = psp_init(psp); if (ret) goto e_irq; /* Enable interrupt */ iowrite32(-1, psp->io_regs + psp->vdata->inten_reg); dev_notice(dev, "psp enabled\n"); return 0; e_irq: if (sp->clear_psp_master_device) sp->clear_psp_master_device(sp); sp_free_psp_irq(psp->sp, psp); e_err: sp->psp_data = NULL; dev_notice(dev, "psp initialization failed\n"); return ret; e_disable: sp->psp_data = NULL; return ret; } void psp_dev_destroy(struct sp_device *sp) { struct psp_device *psp = sp->psp_data; if (!psp) return; sev_dev_destroy(psp); tee_dev_destroy(psp); dbc_dev_destroy(psp); platform_access_dev_destroy(psp); sp_free_psp_irq(sp, psp); if (sp->clear_psp_master_device) sp->clear_psp_master_device(sp); } void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler, void *data) { psp->sev_irq_data = data; psp->sev_irq_handler = handler; } void psp_clear_sev_irq_handler(struct psp_device *psp) { psp_set_sev_irq_handler(psp, NULL, NULL); } struct psp_device *psp_get_master_device(void) { struct sp_device *sp = sp_get_psp_master_device(); return sp ? sp->psp_data : NULL; } void psp_pci_init(void) { psp_master = psp_get_master_device(); if (!psp_master) return; sev_pci_init(); } void psp_pci_exit(void) { if (!psp_master) return; sev_pci_exit(); }
linux-master
drivers/crypto/ccp/psp-dev.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) RSA crypto API support * * Copyright (C) 2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook <[email protected]> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <crypto/algapi.h> #include <crypto/internal/rsa.h> #include <crypto/internal/akcipher.h> #include <crypto/akcipher.h> #include <crypto/scatterwalk.h> #include "ccp-crypto.h" static inline struct akcipher_request *akcipher_request_cast( struct crypto_async_request *req) { return container_of(req, struct akcipher_request, base); } static inline int ccp_copy_and_save_keypart(u8 **kpbuf, unsigned int *kplen, const u8 *buf, size_t sz) { int nskip; for (nskip = 0; nskip < sz; nskip++) if (buf[nskip]) break; *kplen = sz - nskip; *kpbuf = kmemdup(buf + nskip, *kplen, GFP_KERNEL); if (!*kpbuf) return -ENOMEM; return 0; } static int ccp_rsa_complete(struct crypto_async_request *async_req, int ret) { struct akcipher_request *req = akcipher_request_cast(async_req); struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx_dma(req); if (ret) return ret; req->dst_len = rctx->cmd.u.rsa.key_size >> 3; return 0; } static unsigned int ccp_rsa_maxsize(struct crypto_akcipher *tfm) { struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm); return ctx->u.rsa.n_len; } static int ccp_rsa_crypt(struct akcipher_request *req, bool encrypt) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct ccp_rsa_req_ctx *rctx = akcipher_request_ctx_dma(req); int ret = 0; memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_RSA; rctx->cmd.u.rsa.key_size = ctx->u.rsa.key_len; /* in bits */ if (encrypt) { rctx->cmd.u.rsa.exp = &ctx->u.rsa.e_sg; rctx->cmd.u.rsa.exp_len = ctx->u.rsa.e_len; } else { rctx->cmd.u.rsa.exp = &ctx->u.rsa.d_sg; rctx->cmd.u.rsa.exp_len = ctx->u.rsa.d_len; } rctx->cmd.u.rsa.mod = &ctx->u.rsa.n_sg; rctx->cmd.u.rsa.mod_len = ctx->u.rsa.n_len; rctx->cmd.u.rsa.src = req->src; rctx->cmd.u.rsa.src_len = req->src_len; rctx->cmd.u.rsa.dst = req->dst; ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); return ret; } static int ccp_rsa_encrypt(struct akcipher_request *req) { return ccp_rsa_crypt(req, true); } static int ccp_rsa_decrypt(struct akcipher_request *req) { return ccp_rsa_crypt(req, false); } static int ccp_check_key_length(unsigned int len) { /* In bits */ if (len < 8 || len > 4096) return -EINVAL; return 0; } static void ccp_rsa_free_key_bufs(struct ccp_ctx *ctx) { /* Clean up old key data */ kfree_sensitive(ctx->u.rsa.e_buf); ctx->u.rsa.e_buf = NULL; ctx->u.rsa.e_len = 0; kfree_sensitive(ctx->u.rsa.n_buf); ctx->u.rsa.n_buf = NULL; ctx->u.rsa.n_len = 0; kfree_sensitive(ctx->u.rsa.d_buf); ctx->u.rsa.d_buf = NULL; ctx->u.rsa.d_len = 0; } static int ccp_rsa_setkey(struct crypto_akcipher *tfm, const void *key, unsigned int keylen, bool private) { struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct rsa_key raw_key; int ret; ccp_rsa_free_key_bufs(ctx); memset(&raw_key, 0, sizeof(raw_key)); /* Code borrowed from crypto/rsa.c */ if (private) ret = rsa_parse_priv_key(&raw_key, key, keylen); else ret = rsa_parse_pub_key(&raw_key, key, keylen); if (ret) goto n_key; ret = ccp_copy_and_save_keypart(&ctx->u.rsa.n_buf, &ctx->u.rsa.n_len, raw_key.n, raw_key.n_sz); if (ret) goto key_err; sg_init_one(&ctx->u.rsa.n_sg, ctx->u.rsa.n_buf, ctx->u.rsa.n_len); ctx->u.rsa.key_len = ctx->u.rsa.n_len << 3; /* convert to bits */ if (ccp_check_key_length(ctx->u.rsa.key_len)) { ret = -EINVAL; goto key_err; } ret = ccp_copy_and_save_keypart(&ctx->u.rsa.e_buf, &ctx->u.rsa.e_len, raw_key.e, raw_key.e_sz); if (ret) goto key_err; sg_init_one(&ctx->u.rsa.e_sg, ctx->u.rsa.e_buf, ctx->u.rsa.e_len); if (private) { ret = ccp_copy_and_save_keypart(&ctx->u.rsa.d_buf, &ctx->u.rsa.d_len, raw_key.d, raw_key.d_sz); if (ret) goto key_err; sg_init_one(&ctx->u.rsa.d_sg, ctx->u.rsa.d_buf, ctx->u.rsa.d_len); } return 0; key_err: ccp_rsa_free_key_bufs(ctx); n_key: return ret; } static int ccp_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { return ccp_rsa_setkey(tfm, key, keylen, true); } static int ccp_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { return ccp_rsa_setkey(tfm, key, keylen, false); } static int ccp_rsa_init_tfm(struct crypto_akcipher *tfm) { struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm); akcipher_set_reqsize_dma(tfm, sizeof(struct ccp_rsa_req_ctx)); ctx->complete = ccp_rsa_complete; return 0; } static void ccp_rsa_exit_tfm(struct crypto_akcipher *tfm) { struct ccp_ctx *ctx = akcipher_tfm_ctx_dma(tfm); ccp_rsa_free_key_bufs(ctx); } static struct akcipher_alg ccp_rsa_defaults = { .encrypt = ccp_rsa_encrypt, .decrypt = ccp_rsa_decrypt, .set_pub_key = ccp_rsa_setpubkey, .set_priv_key = ccp_rsa_setprivkey, .max_size = ccp_rsa_maxsize, .init = ccp_rsa_init_tfm, .exit = ccp_rsa_exit_tfm, .base = { .cra_name = "rsa", .cra_driver_name = "rsa-ccp", .cra_priority = CCP_CRA_PRIORITY, .cra_module = THIS_MODULE, .cra_ctxsize = 2 * sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, }, }; struct ccp_rsa_def { unsigned int version; const char *name; const char *driver_name; unsigned int reqsize; struct akcipher_alg *alg_defaults; }; static struct ccp_rsa_def rsa_algs[] = { { .version = CCP_VERSION(3, 0), .name = "rsa", .driver_name = "rsa-ccp", .reqsize = sizeof(struct ccp_rsa_req_ctx), .alg_defaults = &ccp_rsa_defaults, } }; static int ccp_register_rsa_alg(struct list_head *head, const struct ccp_rsa_def *def) { struct ccp_crypto_akcipher_alg *ccp_alg; struct akcipher_alg *alg; int ret; ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); if (!ccp_alg) return -ENOMEM; INIT_LIST_HEAD(&ccp_alg->entry); alg = &ccp_alg->alg; *alg = *def->alg_defaults; snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->driver_name); ret = crypto_register_akcipher(alg); if (ret) { pr_err("%s akcipher algorithm registration error (%d)\n", alg->base.cra_name, ret); kfree(ccp_alg); return ret; } list_add(&ccp_alg->entry, head); return 0; } int ccp_register_rsa_algs(struct list_head *head) { int i, ret; unsigned int ccpversion = ccp_version(); /* Register the RSA algorithm in standard mode * This works for CCP v3 and later */ for (i = 0; i < ARRAY_SIZE(rsa_algs); i++) { if (rsa_algs[i].version > ccpversion) continue; ret = ccp_register_rsa_alg(head, &rsa_algs[i]); if (ret) return ret; } return 0; }
linux-master
drivers/crypto/ccp/ccp-crypto-rsa.c
// SPDX-License-Identifier: MIT /* * AMD Trusted Execution Environment (TEE) interface * * Author: Rijo Thomas <[email protected]> * Author: Devaraj Rangasamy <[email protected]> * * Copyright (C) 2019,2021 Advanced Micro Devices, Inc. */ #include <linux/bitfield.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/gfp.h> #include <linux/psp.h> #include <linux/psp-tee.h> #include "psp-dev.h" #include "tee-dev.h" static bool psp_dead; static int tee_alloc_ring(struct psp_tee_device *tee, int ring_size) { struct ring_buf_manager *rb_mgr = &tee->rb_mgr; void *start_addr; if (!ring_size) return -EINVAL; /* We need actual physical address instead of DMA address, since * Trusted OS running on AMD Secure Processor will map this region */ start_addr = (void *)__get_free_pages(GFP_KERNEL, get_order(ring_size)); if (!start_addr) return -ENOMEM; memset(start_addr, 0x0, ring_size); rb_mgr->ring_start = start_addr; rb_mgr->ring_size = ring_size; rb_mgr->ring_pa = __psp_pa(start_addr); mutex_init(&rb_mgr->mutex); return 0; } static void tee_free_ring(struct psp_tee_device *tee) { struct ring_buf_manager *rb_mgr = &tee->rb_mgr; if (!rb_mgr->ring_start) return; free_pages((unsigned long)rb_mgr->ring_start, get_order(rb_mgr->ring_size)); rb_mgr->ring_start = NULL; rb_mgr->ring_size = 0; rb_mgr->ring_pa = 0; mutex_destroy(&rb_mgr->mutex); } static int tee_wait_cmd_poll(struct psp_tee_device *tee, unsigned int timeout, unsigned int *reg) { /* ~10ms sleep per loop => nloop = timeout * 100 */ int nloop = timeout * 100; while (--nloop) { *reg = ioread32(tee->io_regs + tee->vdata->cmdresp_reg); if (FIELD_GET(PSP_CMDRESP_RESP, *reg)) return 0; usleep_range(10000, 10100); } dev_err(tee->dev, "tee: command timed out, disabling PSP\n"); psp_dead = true; return -ETIMEDOUT; } static struct tee_init_ring_cmd *tee_alloc_cmd_buffer(struct psp_tee_device *tee) { struct tee_init_ring_cmd *cmd; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return NULL; cmd->hi_addr = upper_32_bits(tee->rb_mgr.ring_pa); cmd->low_addr = lower_32_bits(tee->rb_mgr.ring_pa); cmd->size = tee->rb_mgr.ring_size; dev_dbg(tee->dev, "tee: ring address: high = 0x%x low = 0x%x size = %u\n", cmd->hi_addr, cmd->low_addr, cmd->size); return cmd; } static inline void tee_free_cmd_buffer(struct tee_init_ring_cmd *cmd) { kfree(cmd); } static int tee_init_ring(struct psp_tee_device *tee) { int ring_size = MAX_RING_BUFFER_ENTRIES * sizeof(struct tee_ring_cmd); struct tee_init_ring_cmd *cmd; phys_addr_t cmd_buffer; unsigned int reg; int ret; BUILD_BUG_ON(sizeof(struct tee_ring_cmd) != 1024); ret = tee_alloc_ring(tee, ring_size); if (ret) { dev_err(tee->dev, "tee: ring allocation failed %d\n", ret); return ret; } tee->rb_mgr.wptr = 0; cmd = tee_alloc_cmd_buffer(tee); if (!cmd) { tee_free_ring(tee); return -ENOMEM; } cmd_buffer = __psp_pa((void *)cmd); /* Send command buffer details to Trusted OS by writing to * CPU-PSP message registers */ iowrite32(lower_32_bits(cmd_buffer), tee->io_regs + tee->vdata->cmdbuff_addr_lo_reg); iowrite32(upper_32_bits(cmd_buffer), tee->io_regs + tee->vdata->cmdbuff_addr_hi_reg); iowrite32(TEE_RING_INIT_CMD, tee->io_regs + tee->vdata->cmdresp_reg); ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg); if (ret) { dev_err(tee->dev, "tee: ring init command timed out\n"); tee_free_ring(tee); goto free_buf; } if (FIELD_GET(PSP_CMDRESP_STS, reg)) { dev_err(tee->dev, "tee: ring init command failed (%#010lx)\n", FIELD_GET(PSP_CMDRESP_STS, reg)); tee_free_ring(tee); ret = -EIO; } free_buf: tee_free_cmd_buffer(cmd); return ret; } static void tee_destroy_ring(struct psp_tee_device *tee) { unsigned int reg; int ret; if (!tee->rb_mgr.ring_start) return; if (psp_dead) goto free_ring; iowrite32(TEE_RING_DESTROY_CMD, tee->io_regs + tee->vdata->cmdresp_reg); ret = tee_wait_cmd_poll(tee, TEE_DEFAULT_TIMEOUT, &reg); if (ret) { dev_err(tee->dev, "tee: ring destroy command timed out\n"); } else if (FIELD_GET(PSP_CMDRESP_STS, reg)) { dev_err(tee->dev, "tee: ring destroy command failed (%#010lx)\n", FIELD_GET(PSP_CMDRESP_STS, reg)); } free_ring: tee_free_ring(tee); } int tee_dev_init(struct psp_device *psp) { struct device *dev = psp->dev; struct psp_tee_device *tee; int ret; ret = -ENOMEM; tee = devm_kzalloc(dev, sizeof(*tee), GFP_KERNEL); if (!tee) goto e_err; psp->tee_data = tee; tee->dev = dev; tee->psp = psp; tee->io_regs = psp->io_regs; tee->vdata = (struct tee_vdata *)psp->vdata->tee; if (!tee->vdata) { ret = -ENODEV; dev_err(dev, "tee: missing driver data\n"); goto e_err; } ret = tee_init_ring(tee); if (ret) { dev_err(dev, "tee: failed to init ring buffer\n"); goto e_err; } dev_notice(dev, "tee enabled\n"); return 0; e_err: psp->tee_data = NULL; dev_notice(dev, "tee initialization failed\n"); return ret; } void tee_dev_destroy(struct psp_device *psp) { struct psp_tee_device *tee = psp->tee_data; if (!tee) return; tee_destroy_ring(tee); } static int tee_submit_cmd(struct psp_tee_device *tee, enum tee_cmd_id cmd_id, void *buf, size_t len, struct tee_ring_cmd **resp) { struct tee_ring_cmd *cmd; int nloop = 1000, ret = 0; u32 rptr; *resp = NULL; mutex_lock(&tee->rb_mgr.mutex); /* Loop until empty entry found in ring buffer */ do { /* Get pointer to ring buffer command entry */ cmd = (struct tee_ring_cmd *) (tee->rb_mgr.ring_start + tee->rb_mgr.wptr); rptr = ioread32(tee->io_regs + tee->vdata->ring_rptr_reg); /* Check if ring buffer is full or command entry is waiting * for response from TEE */ if (!(tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr || cmd->flag == CMD_WAITING_FOR_RESPONSE)) break; dev_dbg(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u\n", rptr, tee->rb_mgr.wptr); /* Wait if ring buffer is full or TEE is processing data */ mutex_unlock(&tee->rb_mgr.mutex); schedule_timeout_interruptible(msecs_to_jiffies(10)); mutex_lock(&tee->rb_mgr.mutex); } while (--nloop); if (!nloop && (tee->rb_mgr.wptr + sizeof(struct tee_ring_cmd) == rptr || cmd->flag == CMD_WAITING_FOR_RESPONSE)) { dev_err(tee->dev, "tee: ring buffer full. rptr = %u wptr = %u response flag %u\n", rptr, tee->rb_mgr.wptr, cmd->flag); ret = -EBUSY; goto unlock; } /* Do not submit command if PSP got disabled while processing any * command in another thread */ if (psp_dead) { ret = -EBUSY; goto unlock; } /* Write command data into ring buffer */ cmd->cmd_id = cmd_id; cmd->cmd_state = TEE_CMD_STATE_INIT; memset(&cmd->buf[0], 0, sizeof(cmd->buf)); memcpy(&cmd->buf[0], buf, len); /* Indicate driver is waiting for response */ cmd->flag = CMD_WAITING_FOR_RESPONSE; /* Update local copy of write pointer */ tee->rb_mgr.wptr += sizeof(struct tee_ring_cmd); if (tee->rb_mgr.wptr >= tee->rb_mgr.ring_size) tee->rb_mgr.wptr = 0; /* Trigger interrupt to Trusted OS */ iowrite32(tee->rb_mgr.wptr, tee->io_regs + tee->vdata->ring_wptr_reg); /* The response is provided by Trusted OS in same * location as submitted data entry within ring buffer. */ *resp = cmd; unlock: mutex_unlock(&tee->rb_mgr.mutex); return ret; } static int tee_wait_cmd_completion(struct psp_tee_device *tee, struct tee_ring_cmd *resp, unsigned int timeout) { /* ~1ms sleep per loop => nloop = timeout * 1000 */ int nloop = timeout * 1000; while (--nloop) { if (resp->cmd_state == TEE_CMD_STATE_COMPLETED) return 0; usleep_range(1000, 1100); } dev_err(tee->dev, "tee: command 0x%x timed out, disabling PSP\n", resp->cmd_id); psp_dead = true; return -ETIMEDOUT; } int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len, u32 *status) { struct psp_device *psp = psp_get_master_device(); struct psp_tee_device *tee; struct tee_ring_cmd *resp; int ret; if (!buf || !status || !len || len > sizeof(resp->buf)) return -EINVAL; *status = 0; if (!psp || !psp->tee_data) return -ENODEV; if (psp_dead) return -EBUSY; tee = psp->tee_data; ret = tee_submit_cmd(tee, cmd_id, buf, len, &resp); if (ret) return ret; ret = tee_wait_cmd_completion(tee, resp, TEE_DEFAULT_TIMEOUT); if (ret) { resp->flag = CMD_RESPONSE_TIMEDOUT; return ret; } memcpy(buf, &resp->buf[0], len); *status = resp->status; resp->flag = CMD_RESPONSE_COPIED; return 0; } EXPORT_SYMBOL(psp_tee_process_cmd); int psp_check_tee_status(void) { struct psp_device *psp = psp_get_master_device(); if (!psp || !psp->tee_data) return -ENODEV; return 0; } EXPORT_SYMBOL(psp_check_tee_status);
linux-master
drivers/crypto/ccp/tee-dev.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Secure Processor driver * * Copyright (C) 2017-2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <[email protected]> * Author: Gary R Hook <[email protected]> * Author: Brijesh Singh <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/spinlock_types.h> #include <linux/types.h> #include <linux/ccp.h> #include "ccp-dev.h" #include "sp-dev.h" MODULE_AUTHOR("Tom Lendacky <[email protected]>"); MODULE_AUTHOR("Gary R Hook <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.1.0"); MODULE_DESCRIPTION("AMD Secure Processor driver"); /* List of SPs, SP count, read-write access lock, and access functions * * Lock structure: get sp_unit_lock for reading whenever we need to * examine the SP list. */ static DEFINE_RWLOCK(sp_unit_lock); static LIST_HEAD(sp_units); /* Ever-increasing value to produce unique unit numbers */ static atomic_t sp_ordinal; static void sp_add_device(struct sp_device *sp) { unsigned long flags; write_lock_irqsave(&sp_unit_lock, flags); list_add_tail(&sp->entry, &sp_units); write_unlock_irqrestore(&sp_unit_lock, flags); } static void sp_del_device(struct sp_device *sp) { unsigned long flags; write_lock_irqsave(&sp_unit_lock, flags); list_del(&sp->entry); write_unlock_irqrestore(&sp_unit_lock, flags); } static irqreturn_t sp_irq_handler(int irq, void *data) { struct sp_device *sp = data; if (sp->ccp_irq_handler) sp->ccp_irq_handler(irq, sp->ccp_irq_data); if (sp->psp_irq_handler) sp->psp_irq_handler(irq, sp->psp_irq_data); return IRQ_HANDLED; } int sp_request_ccp_irq(struct sp_device *sp, irq_handler_t handler, const char *name, void *data) { int ret; if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) { /* Need a common routine to manage all interrupts */ sp->ccp_irq_data = data; sp->ccp_irq_handler = handler; if (!sp->irq_registered) { ret = request_irq(sp->ccp_irq, sp_irq_handler, 0, sp->name, sp); if (ret) return ret; sp->irq_registered = true; } } else { /* Each sub-device can manage it's own interrupt */ ret = request_irq(sp->ccp_irq, handler, 0, name, data); if (ret) return ret; } return 0; } int sp_request_psp_irq(struct sp_device *sp, irq_handler_t handler, const char *name, void *data) { int ret; if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) { /* Need a common routine to manage all interrupts */ sp->psp_irq_data = data; sp->psp_irq_handler = handler; if (!sp->irq_registered) { ret = request_irq(sp->psp_irq, sp_irq_handler, 0, sp->name, sp); if (ret) return ret; sp->irq_registered = true; } } else { /* Each sub-device can manage it's own interrupt */ ret = request_irq(sp->psp_irq, handler, 0, name, data); if (ret) return ret; } return 0; } void sp_free_ccp_irq(struct sp_device *sp, void *data) { if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->psp_vdata) { /* Using common routine to manage all interrupts */ if (!sp->psp_irq_handler) { /* Nothing else using it, so free it */ free_irq(sp->ccp_irq, sp); sp->irq_registered = false; } sp->ccp_irq_handler = NULL; sp->ccp_irq_data = NULL; } else { /* Each sub-device can manage it's own interrupt */ free_irq(sp->ccp_irq, data); } } void sp_free_psp_irq(struct sp_device *sp, void *data) { if ((sp->psp_irq == sp->ccp_irq) && sp->dev_vdata->ccp_vdata) { /* Using common routine to manage all interrupts */ if (!sp->ccp_irq_handler) { /* Nothing else using it, so free it */ free_irq(sp->psp_irq, sp); sp->irq_registered = false; } sp->psp_irq_handler = NULL; sp->psp_irq_data = NULL; } else { /* Each sub-device can manage it's own interrupt */ free_irq(sp->psp_irq, data); } } /** * sp_alloc_struct - allocate and initialize the sp_device struct * * @dev: device struct of the SP */ struct sp_device *sp_alloc_struct(struct device *dev) { struct sp_device *sp; sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL); if (!sp) return NULL; sp->dev = dev; sp->ord = atomic_inc_return(&sp_ordinal); snprintf(sp->name, SP_MAX_NAME_LEN, "sp-%u", sp->ord); return sp; } int sp_init(struct sp_device *sp) { sp_add_device(sp); if (sp->dev_vdata->ccp_vdata) ccp_dev_init(sp); if (sp->dev_vdata->psp_vdata) psp_dev_init(sp); return 0; } void sp_destroy(struct sp_device *sp) { if (sp->dev_vdata->ccp_vdata) ccp_dev_destroy(sp); if (sp->dev_vdata->psp_vdata) psp_dev_destroy(sp); sp_del_device(sp); } int sp_suspend(struct sp_device *sp) { if (sp->dev_vdata->ccp_vdata) { ccp_dev_suspend(sp); } return 0; } int sp_resume(struct sp_device *sp) { if (sp->dev_vdata->ccp_vdata) { ccp_dev_resume(sp); } return 0; } struct sp_device *sp_get_psp_master_device(void) { struct sp_device *i, *ret = NULL; unsigned long flags; write_lock_irqsave(&sp_unit_lock, flags); if (list_empty(&sp_units)) goto unlock; list_for_each_entry(i, &sp_units, entry) { if (i->psp_data && i->get_psp_master_device) { ret = i->get_psp_master_device(); break; } } unlock: write_unlock_irqrestore(&sp_unit_lock, flags); return ret; } static int __init sp_mod_init(void) { #ifdef CONFIG_X86 int ret; ret = sp_pci_init(); if (ret) return ret; #ifdef CONFIG_CRYPTO_DEV_SP_PSP psp_pci_init(); #endif return 0; #endif #ifdef CONFIG_ARM64 int ret; ret = sp_platform_init(); if (ret) return ret; return 0; #endif return -ENODEV; } static void __exit sp_mod_exit(void) { #ifdef CONFIG_X86 #ifdef CONFIG_CRYPTO_DEV_SP_PSP psp_pci_exit(); #endif sp_pci_exit(); #endif #ifdef CONFIG_ARM64 sp_platform_exit(); #endif } module_init(sp_mod_init); module_exit(sp_mod_exit);
linux-master
drivers/crypto/ccp/sp-dev.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) driver * * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <[email protected]> * Author: Gary R Hook <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/ccp.h> #include "ccp-dev.h" static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count) { int start; struct ccp_device *ccp = cmd_q->ccp; for (;;) { mutex_lock(&ccp->sb_mutex); start = (u32)bitmap_find_next_zero_area(ccp->sb, ccp->sb_count, ccp->sb_start, count, 0); if (start <= ccp->sb_count) { bitmap_set(ccp->sb, start, count); mutex_unlock(&ccp->sb_mutex); break; } ccp->sb_avail = 0; mutex_unlock(&ccp->sb_mutex); /* Wait for KSB entries to become available */ if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) return 0; } return KSB_START + start; } static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start, unsigned int count) { struct ccp_device *ccp = cmd_q->ccp; if (!start) return; mutex_lock(&ccp->sb_mutex); bitmap_clear(ccp->sb, start - KSB_START, count); ccp->sb_avail = 1; mutex_unlock(&ccp->sb_mutex); wake_up_interruptible_all(&ccp->sb_queue); } static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q) { return CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); } static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count) { struct ccp_cmd_queue *cmd_q = op->cmd_q; struct ccp_device *ccp = cmd_q->ccp; void __iomem *cr_addr; u32 cr0, cmd; unsigned int i; int ret = 0; /* We could read a status register to see how many free slots * are actually available, but reading that register resets it * and you could lose some error information. */ cmd_q->free_slots--; cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT) | (op->jobid << REQ0_JOBID_SHIFT) | REQ0_WAIT_FOR_WRITE; if (op->soc) cr0 |= REQ0_STOP_ON_COMPLETE | REQ0_INT_ON_COMPLETE; if (op->ioc || !cmd_q->free_slots) cr0 |= REQ0_INT_ON_COMPLETE; /* Start at CMD_REQ1 */ cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR; mutex_lock(&ccp->req_mutex); /* Write CMD_REQ1 through CMD_REQx first */ for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR) iowrite32(*(cr + i), cr_addr); /* Tell the CCP to start */ wmb(); iowrite32(cr0, ccp->io_regs + CMD_REQ0); mutex_unlock(&ccp->req_mutex); if (cr0 & REQ0_INT_ON_COMPLETE) { /* Wait for the job to complete */ ret = wait_event_interruptible(cmd_q->int_queue, cmd_q->int_rcvd); if (ret || cmd_q->cmd_error) { /* On error delete all related jobs from the queue */ cmd = (cmd_q->id << DEL_Q_ID_SHIFT) | op->jobid; if (cmd_q->cmd_error) ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); if (!ret) ret = -EIO; } else if (op->soc) { /* Delete just head job from the queue on SoC */ cmd = DEL_Q_ACTIVE | (cmd_q->id << DEL_Q_ID_SHIFT) | op->jobid; iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB); } cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status); cmd_q->int_rcvd = 0; } return ret; } static int ccp_perform_aes(struct ccp_op *op) { u32 cr[6]; /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT) | (op->u.aes.type << REQ1_AES_TYPE_SHIFT) | (op->u.aes.mode << REQ1_AES_MODE_SHIFT) | (op->u.aes.action << REQ1_AES_ACTION_SHIFT) | (op->sb_key << REQ1_KEY_KSB_SHIFT); cr[1] = op->src.u.dma.length - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) | ccp_addr_hi(&op->dst.u.dma); if (op->u.aes.mode == CCP_AES_MODE_CFB) cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT); if (op->eom) cr[0] |= REQ1_EOM; if (op->init) cr[0] |= REQ1_INIT; return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } static int ccp_perform_xts_aes(struct ccp_op *op) { u32 cr[6]; /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT) | (op->u.xts.action << REQ1_AES_ACTION_SHIFT) | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT) | (op->sb_key << REQ1_KEY_KSB_SHIFT); cr[1] = op->src.u.dma.length - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) | ccp_addr_hi(&op->dst.u.dma); if (op->eom) cr[0] |= REQ1_EOM; if (op->init) cr[0] |= REQ1_INIT; return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } static int ccp_perform_sha(struct ccp_op *op) { u32 cr[6]; /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT) | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT) | REQ1_INIT; cr[1] = op->src.u.dma.length - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); if (op->eom) { cr[0] |= REQ1_EOM; cr[4] = lower_32_bits(op->u.sha.msg_bits); cr[5] = upper_32_bits(op->u.sha.msg_bits); } else { cr[4] = 0; cr[5] = 0; } return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } static int ccp_perform_rsa(struct ccp_op *op) { u32 cr[6]; /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT) | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT) | (op->sb_key << REQ1_KEY_KSB_SHIFT) | REQ1_EOM; cr[1] = op->u.rsa.input_len - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); cr[3] = (op->sb_ctx << REQ4_KSB_SHIFT) | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) | ccp_addr_hi(&op->dst.u.dma); return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } static int ccp_perform_passthru(struct ccp_op *op) { u32 cr[6]; /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT) | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT) | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT); if (op->src.type == CCP_MEMTYPE_SYSTEM) cr[1] = op->src.u.dma.length - 1; else cr[1] = op->dst.u.dma.length - 1; if (op->src.type == CCP_MEMTYPE_SYSTEM) { cr[2] = ccp_addr_lo(&op->src.u.dma); cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) cr[3] |= (op->sb_key << REQ4_KSB_SHIFT); } else { cr[2] = op->src.u.sb * CCP_SB_BYTES; cr[3] = (CCP_MEMTYPE_SB << REQ4_MEMTYPE_SHIFT); } if (op->dst.type == CCP_MEMTYPE_SYSTEM) { cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) | ccp_addr_hi(&op->dst.u.dma); } else { cr[4] = op->dst.u.sb * CCP_SB_BYTES; cr[5] = (CCP_MEMTYPE_SB << REQ6_MEMTYPE_SHIFT); } if (op->eom) cr[0] |= REQ1_EOM; return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } static int ccp_perform_ecc(struct ccp_op *op) { u32 cr[6]; /* Fill out the register contents for REQ1 through REQ6 */ cr[0] = REQ1_ECC_AFFINE_CONVERT | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT) | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT) | REQ1_EOM; cr[1] = op->src.u.dma.length - 1; cr[2] = ccp_addr_lo(&op->src.u.dma); cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT) | ccp_addr_hi(&op->src.u.dma); cr[4] = ccp_addr_lo(&op->dst.u.dma); cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT) | ccp_addr_hi(&op->dst.u.dma); return ccp_do_cmd(op, cr, ARRAY_SIZE(cr)); } static void ccp_disable_queue_interrupts(struct ccp_device *ccp) { iowrite32(0x00, ccp->io_regs + IRQ_MASK_REG); } static void ccp_enable_queue_interrupts(struct ccp_device *ccp) { iowrite32(ccp->qim, ccp->io_regs + IRQ_MASK_REG); } static void ccp_irq_bh(unsigned long data) { struct ccp_device *ccp = (struct ccp_device *)data; struct ccp_cmd_queue *cmd_q; u32 q_int, status; unsigned int i; status = ioread32(ccp->io_regs + IRQ_STATUS_REG); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; q_int = status & (cmd_q->int_ok | cmd_q->int_err); if (q_int) { cmd_q->int_status = status; cmd_q->q_status = ioread32(cmd_q->reg_status); cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); /* On error, only save the first error value */ if ((q_int & cmd_q->int_err) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); cmd_q->int_rcvd = 1; /* Acknowledge the interrupt and wake the kthread */ iowrite32(q_int, ccp->io_regs + IRQ_STATUS_REG); wake_up_interruptible(&cmd_q->int_queue); } } ccp_enable_queue_interrupts(ccp); } static irqreturn_t ccp_irq_handler(int irq, void *data) { struct ccp_device *ccp = (struct ccp_device *)data; ccp_disable_queue_interrupts(ccp); if (ccp->use_tasklet) tasklet_schedule(&ccp->irq_tasklet); else ccp_irq_bh((unsigned long)ccp); return IRQ_HANDLED; } static int ccp_init(struct ccp_device *ccp) { struct device *dev = ccp->dev; struct ccp_cmd_queue *cmd_q; struct dma_pool *dma_pool; char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; unsigned int qmr, i; int ret; /* Find available queues */ ccp->qim = 0; qmr = ioread32(ccp->io_regs + Q_MASK_REG); for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { if (!(qmr & (1 << i))) continue; /* Allocate a dma pool for this queue */ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", ccp->name, i); dma_pool = dma_pool_create(dma_pool_name, dev, CCP_DMAPOOL_MAX_SIZE, CCP_DMAPOOL_ALIGN, 0); if (!dma_pool) { dev_err(dev, "unable to allocate dma pool\n"); ret = -ENOMEM; goto e_pool; } cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; ccp->cmd_q_count++; cmd_q->ccp = ccp; cmd_q->id = i; cmd_q->dma_pool = dma_pool; /* Reserve 2 KSB regions for the queue */ cmd_q->sb_key = KSB_START + ccp->sb_start++; cmd_q->sb_ctx = KSB_START + ccp->sb_start++; ccp->sb_count -= 2; /* Preset some register values and masks that are queue * number dependent */ cmd_q->reg_status = ccp->io_regs + CMD_Q_STATUS_BASE + (CMD_Q_STATUS_INCR * i); cmd_q->reg_int_status = ccp->io_regs + CMD_Q_INT_STATUS_BASE + (CMD_Q_STATUS_INCR * i); cmd_q->int_ok = 1 << (i * 2); cmd_q->int_err = 1 << ((i * 2) + 1); cmd_q->free_slots = ccp_get_free_slots(cmd_q); init_waitqueue_head(&cmd_q->int_queue); /* Build queue interrupt mask (two interrupts per queue) */ ccp->qim |= cmd_q->int_ok | cmd_q->int_err; #ifdef CONFIG_ARM64 /* For arm64 set the recommended queue cache settings */ iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE + (CMD_Q_CACHE_INC * i)); #endif dev_dbg(dev, "queue #%u available\n", i); } if (ccp->cmd_q_count == 0) { dev_notice(dev, "no command queues available\n"); ret = -EIO; goto e_pool; } dev_notice(dev, "%u command queues available\n", ccp->cmd_q_count); /* Disable and clear interrupts until ready */ ccp_disable_queue_interrupts(ccp); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_status); } iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); /* Request an irq */ ret = sp_request_ccp_irq(ccp->sp, ccp_irq_handler, ccp->name, ccp); if (ret) { dev_err(dev, "unable to allocate an IRQ\n"); goto e_pool; } /* Initialize the ISR tasklet? */ if (ccp->use_tasklet) tasklet_init(&ccp->irq_tasklet, ccp_irq_bh, (unsigned long)ccp); dev_dbg(dev, "Starting threads...\n"); /* Create a kthread for each queue */ for (i = 0; i < ccp->cmd_q_count; i++) { struct task_struct *kthread; cmd_q = &ccp->cmd_q[i]; kthread = kthread_run(ccp_cmd_queue_thread, cmd_q, "%s-q%u", ccp->name, cmd_q->id); if (IS_ERR(kthread)) { dev_err(dev, "error creating queue thread (%ld)\n", PTR_ERR(kthread)); ret = PTR_ERR(kthread); goto e_kthread; } cmd_q->kthread = kthread; } dev_dbg(dev, "Enabling interrupts...\n"); /* Enable interrupts */ ccp_enable_queue_interrupts(ccp); dev_dbg(dev, "Registering device...\n"); ccp_add_device(ccp); ret = ccp_register_rng(ccp); if (ret) goto e_kthread; /* Register the DMA engine support */ ret = ccp_dmaengine_register(ccp); if (ret) goto e_hwrng; return 0; e_hwrng: ccp_unregister_rng(ccp); e_kthread: for (i = 0; i < ccp->cmd_q_count; i++) if (ccp->cmd_q[i].kthread) kthread_stop(ccp->cmd_q[i].kthread); sp_free_ccp_irq(ccp->sp, ccp); e_pool: for (i = 0; i < ccp->cmd_q_count; i++) dma_pool_destroy(ccp->cmd_q[i].dma_pool); return ret; } static void ccp_destroy(struct ccp_device *ccp) { struct ccp_cmd_queue *cmd_q; struct ccp_cmd *cmd; unsigned int i; /* Unregister the DMA engine */ ccp_dmaengine_unregister(ccp); /* Unregister the RNG */ ccp_unregister_rng(ccp); /* Remove this device from the list of available units */ ccp_del_device(ccp); /* Disable and clear interrupts */ ccp_disable_queue_interrupts(ccp); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_status); } iowrite32(ccp->qim, ccp->io_regs + IRQ_STATUS_REG); /* Stop the queue kthreads */ for (i = 0; i < ccp->cmd_q_count; i++) if (ccp->cmd_q[i].kthread) kthread_stop(ccp->cmd_q[i].kthread); sp_free_ccp_irq(ccp->sp, ccp); for (i = 0; i < ccp->cmd_q_count; i++) dma_pool_destroy(ccp->cmd_q[i].dma_pool); /* Flush the cmd and backlog queue */ while (!list_empty(&ccp->cmd)) { /* Invoke the callback directly with an error code */ cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); list_del(&cmd->entry); cmd->callback(cmd->data, -ENODEV); } while (!list_empty(&ccp->backlog)) { /* Invoke the callback directly with an error code */ cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); list_del(&cmd->entry); cmd->callback(cmd->data, -ENODEV); } } static const struct ccp_actions ccp3_actions = { .aes = ccp_perform_aes, .xts_aes = ccp_perform_xts_aes, .des3 = NULL, .sha = ccp_perform_sha, .rsa = ccp_perform_rsa, .passthru = ccp_perform_passthru, .ecc = ccp_perform_ecc, .sballoc = ccp_alloc_ksb, .sbfree = ccp_free_ksb, .init = ccp_init, .destroy = ccp_destroy, .get_free_slots = ccp_get_free_slots, .irqhandler = ccp_irq_handler, }; const struct ccp_vdata ccpv3_platform = { .version = CCP_VERSION(3, 0), .setup = NULL, .perform = &ccp3_actions, .offset = 0, .rsamax = CCP_RSA_MAX_WIDTH, }; const struct ccp_vdata ccpv3 = { .version = CCP_VERSION(3, 0), .setup = NULL, .perform = &ccp3_actions, .offset = 0x20000, .rsamax = CCP_RSA_MAX_WIDTH, };
linux-master
drivers/crypto/ccp/ccp-dev-v3.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) AES CMAC crypto API support * * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <[email protected]> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <crypto/algapi.h> #include <crypto/aes.h> #include <crypto/hash.h> #include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> #include "ccp-crypto.h" static int ccp_aes_cmac_complete(struct crypto_async_request *async_req, int ret) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req); unsigned int digest_size = crypto_ahash_digestsize(tfm); if (ret) goto e_free; if (rctx->hash_rem) { /* Save remaining data to buffer */ unsigned int offset = rctx->nbytes - rctx->hash_rem; scatterwalk_map_and_copy(rctx->buf, rctx->src, offset, rctx->hash_rem, 0); rctx->buf_count = rctx->hash_rem; } else { rctx->buf_count = 0; } /* Update result area if supplied */ if (req->result && rctx->final) memcpy(req->result, rctx->iv, digest_size); e_free: sg_free_table(&rctx->data_sg); return ret; } static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes, unsigned int final) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req); struct scatterlist *sg, *cmac_key_sg = NULL; unsigned int block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); unsigned int need_pad, sg_count; gfp_t gfp; u64 len; int ret; if (!ctx->u.aes.key_len) return -EINVAL; if (nbytes) rctx->null_msg = 0; len = (u64)rctx->buf_count + (u64)nbytes; if (!final && (len <= block_size)) { scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, 0, nbytes, 0); rctx->buf_count += nbytes; return 0; } rctx->src = req->src; rctx->nbytes = nbytes; rctx->final = final; rctx->hash_rem = final ? 0 : len & (block_size - 1); rctx->hash_cnt = len - rctx->hash_rem; if (!final && !rctx->hash_rem) { /* CCP can't do zero length final, so keep some data around */ rctx->hash_cnt -= block_size; rctx->hash_rem = block_size; } if (final && (rctx->null_msg || (len & (block_size - 1)))) need_pad = 1; else need_pad = 0; sg_init_one(&rctx->iv_sg, rctx->iv, sizeof(rctx->iv)); /* Build the data scatterlist table - allocate enough entries for all * possible data pieces (buffer, input data, padding) */ sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2; gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); if (ret) return ret; sg = NULL; if (rctx->buf_count) { sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); if (!sg) { ret = -EINVAL; goto e_free; } } if (nbytes) { sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); if (!sg) { ret = -EINVAL; goto e_free; } } if (need_pad) { int pad_length = block_size - (len & (block_size - 1)); rctx->hash_cnt += pad_length; memset(rctx->pad, 0, sizeof(rctx->pad)); rctx->pad[0] = 0x80; sg_init_one(&rctx->pad_sg, rctx->pad, pad_length); sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->pad_sg); if (!sg) { ret = -EINVAL; goto e_free; } } if (sg) { sg_mark_end(sg); sg = rctx->data_sg.sgl; } /* Initialize the K1/K2 scatterlist */ if (final) cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg : &ctx->u.aes.k1_sg; memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_AES; rctx->cmd.u.aes.type = ctx->u.aes.type; rctx->cmd.u.aes.mode = ctx->u.aes.mode; rctx->cmd.u.aes.action = CCP_AES_ACTION_ENCRYPT; rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; rctx->cmd.u.aes.iv = &rctx->iv_sg; rctx->cmd.u.aes.iv_len = AES_BLOCK_SIZE; rctx->cmd.u.aes.src = sg; rctx->cmd.u.aes.src_len = rctx->hash_cnt; rctx->cmd.u.aes.dst = NULL; rctx->cmd.u.aes.cmac_key = cmac_key_sg; rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len; rctx->cmd.u.aes.cmac_final = final; ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); return ret; e_free: sg_free_table(&rctx->data_sg); return ret; } static int ccp_aes_cmac_init(struct ahash_request *req) { struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req); memset(rctx, 0, sizeof(*rctx)); rctx->null_msg = 1; return 0; } static int ccp_aes_cmac_update(struct ahash_request *req) { return ccp_do_cmac_update(req, req->nbytes, 0); } static int ccp_aes_cmac_final(struct ahash_request *req) { return ccp_do_cmac_update(req, 0, 1); } static int ccp_aes_cmac_finup(struct ahash_request *req) { return ccp_do_cmac_update(req, req->nbytes, 1); } static int ccp_aes_cmac_digest(struct ahash_request *req) { int ret; ret = ccp_aes_cmac_init(req); if (ret) return ret; return ccp_aes_cmac_finup(req); } static int ccp_aes_cmac_export(struct ahash_request *req, void *out) { struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req); struct ccp_aes_cmac_exp_ctx state; /* Don't let anything leak to 'out' */ memset(&state, 0, sizeof(state)); state.null_msg = rctx->null_msg; memcpy(state.iv, rctx->iv, sizeof(state.iv)); state.buf_count = rctx->buf_count; memcpy(state.buf, rctx->buf, sizeof(state.buf)); /* 'out' may not be aligned so memcpy from local variable */ memcpy(out, &state, sizeof(state)); return 0; } static int ccp_aes_cmac_import(struct ahash_request *req, const void *in) { struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx_dma(req); struct ccp_aes_cmac_exp_ctx state; /* 'in' may not be aligned so memcpy to local variable */ memcpy(&state, in, sizeof(state)); memset(rctx, 0, sizeof(*rctx)); rctx->null_msg = state.null_msg; memcpy(rctx->iv, state.iv, sizeof(rctx->iv)); rctx->buf_count = state.buf_count; memcpy(rctx->buf, state.buf, sizeof(rctx->buf)); return 0; } static int ccp_aes_cmac_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int key_len) { struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); u64 k0_hi, k0_lo, k1_hi, k1_lo, k2_hi, k2_lo; u64 rb_hi = 0x00, rb_lo = 0x87; struct crypto_aes_ctx aes; __be64 *gk; int ret; switch (key_len) { case AES_KEYSIZE_128: ctx->u.aes.type = CCP_AES_TYPE_128; break; case AES_KEYSIZE_192: ctx->u.aes.type = CCP_AES_TYPE_192; break; case AES_KEYSIZE_256: ctx->u.aes.type = CCP_AES_TYPE_256; break; default: return -EINVAL; } ctx->u.aes.mode = alg->mode; /* Set to zero until complete */ ctx->u.aes.key_len = 0; /* Set the key for the AES cipher used to generate the keys */ ret = aes_expandkey(&aes, key, key_len); if (ret) return ret; /* Encrypt a block of zeroes - use key area in context */ memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key)); aes_encrypt(&aes, ctx->u.aes.key, ctx->u.aes.key); memzero_explicit(&aes, sizeof(aes)); /* Generate K1 and K2 */ k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key)); k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1)); k1_hi = (k0_hi << 1) | (k0_lo >> 63); k1_lo = k0_lo << 1; if (ctx->u.aes.key[0] & 0x80) { k1_hi ^= rb_hi; k1_lo ^= rb_lo; } gk = (__be64 *)ctx->u.aes.k1; *gk = cpu_to_be64(k1_hi); gk++; *gk = cpu_to_be64(k1_lo); k2_hi = (k1_hi << 1) | (k1_lo >> 63); k2_lo = k1_lo << 1; if (ctx->u.aes.k1[0] & 0x80) { k2_hi ^= rb_hi; k2_lo ^= rb_lo; } gk = (__be64 *)ctx->u.aes.k2; *gk = cpu_to_be64(k2_hi); gk++; *gk = cpu_to_be64(k2_lo); ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1); sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1)); sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2)); /* Save the supplied key */ memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key)); memcpy(ctx->u.aes.key, key, key_len); ctx->u.aes.key_len = key_len; sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); return ret; } static int ccp_aes_cmac_cra_init(struct crypto_tfm *tfm) { struct ccp_ctx *ctx = crypto_tfm_ctx_dma(tfm); struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); ctx->complete = ccp_aes_cmac_complete; ctx->u.aes.key_len = 0; crypto_ahash_set_reqsize_dma(ahash, sizeof(struct ccp_aes_cmac_req_ctx)); return 0; } int ccp_register_aes_cmac_algs(struct list_head *head) { struct ccp_crypto_ahash_alg *ccp_alg; struct ahash_alg *alg; struct hash_alg_common *halg; struct crypto_alg *base; int ret; ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); if (!ccp_alg) return -ENOMEM; INIT_LIST_HEAD(&ccp_alg->entry); ccp_alg->mode = CCP_AES_MODE_CMAC; alg = &ccp_alg->alg; alg->init = ccp_aes_cmac_init; alg->update = ccp_aes_cmac_update; alg->final = ccp_aes_cmac_final; alg->finup = ccp_aes_cmac_finup; alg->digest = ccp_aes_cmac_digest; alg->export = ccp_aes_cmac_export; alg->import = ccp_aes_cmac_import; alg->setkey = ccp_aes_cmac_setkey; halg = &alg->halg; halg->digestsize = AES_BLOCK_SIZE; halg->statesize = sizeof(struct ccp_aes_cmac_exp_ctx); base = &halg->base; snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "cmac(aes)"); snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "cmac-aes-ccp"); base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; base->cra_blocksize = AES_BLOCK_SIZE; base->cra_ctxsize = sizeof(struct ccp_ctx) + crypto_dma_padding(); base->cra_priority = CCP_CRA_PRIORITY; base->cra_init = ccp_aes_cmac_cra_init; base->cra_module = THIS_MODULE; ret = crypto_register_ahash(alg); if (ret) { pr_err("%s ahash algorithm registration error (%d)\n", base->cra_name, ret); kfree(ccp_alg); return ret; } list_add(&ccp_alg->entry, head); return 0; }
linux-master
drivers/crypto/ccp/ccp-crypto-aes-cmac.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Secure Encrypted Virtualization (SEV) interface * * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. * * Author: Brijesh Singh <[email protected]> */ #include <linux/bitfield.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/spinlock_types.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/ccp.h> #include <linux/firmware.h> #include <linux/gfp.h> #include <linux/cpufeature.h> #include <linux/fs.h> #include <linux/fs_struct.h> #include <linux/psp.h> #include <asm/smp.h> #include <asm/cacheflush.h> #include "psp-dev.h" #include "sev-dev.h" #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" #define SEV_FW_NAME_SIZE 64 static DEFINE_MUTEX(sev_cmd_mutex); static struct sev_misc_dev *misc_dev; static int psp_cmd_timeout = 100; module_param(psp_cmd_timeout, int, 0644); MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands"); static int psp_probe_timeout = 5; module_param(psp_probe_timeout, int, 0644); MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe"); static char *init_ex_path; module_param(init_ex_path, charp, 0444); MODULE_PARM_DESC(init_ex_path, " Path for INIT_EX data; if set try INIT_EX"); static bool psp_init_on_probe = true; module_param(psp_init_on_probe, bool, 0444); MODULE_PARM_DESC(psp_init_on_probe, " if true, the PSP will be initialized on module init. Else the PSP will be initialized on the first command requiring it"); MODULE_FIRMWARE("amd/amd_sev_fam17h_model0xh.sbin"); /* 1st gen EPYC */ MODULE_FIRMWARE("amd/amd_sev_fam17h_model3xh.sbin"); /* 2nd gen EPYC */ MODULE_FIRMWARE("amd/amd_sev_fam19h_model0xh.sbin"); /* 3rd gen EPYC */ MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */ static bool psp_dead; static int psp_timeout; /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator * to allocate the memory, which will return aligned memory for the specified * allocation order. */ #define SEV_ES_TMR_SIZE (1024 * 1024) static void *sev_es_tmr; /* INIT_EX NV Storage: * The NV Storage is a 32Kb area and must be 4Kb page aligned. Use the page * allocator to allocate the memory, which will return aligned memory for the * specified allocation order. */ #define NV_LENGTH (32 * 1024) static void *sev_init_ex_buffer; static inline bool sev_version_greater_or_equal(u8 maj, u8 min) { struct sev_device *sev = psp_master->sev_data; if (sev->api_major > maj) return true; if (sev->api_major == maj && sev->api_minor >= min) return true; return false; } static void sev_irq_handler(int irq, void *data, unsigned int status) { struct sev_device *sev = data; int reg; /* Check if it is command completion: */ if (!(status & SEV_CMD_COMPLETE)) return; /* Check if it is SEV command completion: */ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); if (FIELD_GET(PSP_CMDRESP_RESP, reg)) { sev->int_rcvd = 1; wake_up(&sev->int_queue); } } static int sev_wait_cmd_ioc(struct sev_device *sev, unsigned int *reg, unsigned int timeout) { int ret; ret = wait_event_timeout(sev->int_queue, sev->int_rcvd, timeout * HZ); if (!ret) return -ETIMEDOUT; *reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); return 0; } static int sev_cmd_buffer_len(int cmd) { switch (cmd) { case SEV_CMD_INIT: return sizeof(struct sev_data_init); case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex); case SEV_CMD_PLATFORM_STATUS: return sizeof(struct sev_user_data_status); case SEV_CMD_PEK_CSR: return sizeof(struct sev_data_pek_csr); case SEV_CMD_PEK_CERT_IMPORT: return sizeof(struct sev_data_pek_cert_import); case SEV_CMD_PDH_CERT_EXPORT: return sizeof(struct sev_data_pdh_cert_export); case SEV_CMD_LAUNCH_START: return sizeof(struct sev_data_launch_start); case SEV_CMD_LAUNCH_UPDATE_DATA: return sizeof(struct sev_data_launch_update_data); case SEV_CMD_LAUNCH_UPDATE_VMSA: return sizeof(struct sev_data_launch_update_vmsa); case SEV_CMD_LAUNCH_FINISH: return sizeof(struct sev_data_launch_finish); case SEV_CMD_LAUNCH_MEASURE: return sizeof(struct sev_data_launch_measure); case SEV_CMD_ACTIVATE: return sizeof(struct sev_data_activate); case SEV_CMD_DEACTIVATE: return sizeof(struct sev_data_deactivate); case SEV_CMD_DECOMMISSION: return sizeof(struct sev_data_decommission); case SEV_CMD_GUEST_STATUS: return sizeof(struct sev_data_guest_status); case SEV_CMD_DBG_DECRYPT: return sizeof(struct sev_data_dbg); case SEV_CMD_DBG_ENCRYPT: return sizeof(struct sev_data_dbg); case SEV_CMD_SEND_START: return sizeof(struct sev_data_send_start); case SEV_CMD_SEND_UPDATE_DATA: return sizeof(struct sev_data_send_update_data); case SEV_CMD_SEND_UPDATE_VMSA: return sizeof(struct sev_data_send_update_vmsa); case SEV_CMD_SEND_FINISH: return sizeof(struct sev_data_send_finish); case SEV_CMD_RECEIVE_START: return sizeof(struct sev_data_receive_start); case SEV_CMD_RECEIVE_FINISH: return sizeof(struct sev_data_receive_finish); case SEV_CMD_RECEIVE_UPDATE_DATA: return sizeof(struct sev_data_receive_update_data); case SEV_CMD_RECEIVE_UPDATE_VMSA: return sizeof(struct sev_data_receive_update_vmsa); case SEV_CMD_LAUNCH_UPDATE_SECRET: return sizeof(struct sev_data_launch_secret); case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware); case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id); case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report); case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel); default: return 0; } return 0; } static void *sev_fw_alloc(unsigned long len) { struct page *page; page = alloc_pages(GFP_KERNEL, get_order(len)); if (!page) return NULL; return page_address(page); } static struct file *open_file_as_root(const char *filename, int flags, umode_t mode) { struct file *fp; struct path root; struct cred *cred; const struct cred *old_cred; task_lock(&init_task); get_fs_root(init_task.fs, &root); task_unlock(&init_task); cred = prepare_creds(); if (!cred) return ERR_PTR(-ENOMEM); cred->fsuid = GLOBAL_ROOT_UID; old_cred = override_creds(cred); fp = file_open_root(&root, filename, flags, mode); path_put(&root); revert_creds(old_cred); return fp; } static int sev_read_init_ex_file(void) { struct sev_device *sev = psp_master->sev_data; struct file *fp; ssize_t nread; lockdep_assert_held(&sev_cmd_mutex); if (!sev_init_ex_buffer) return -EOPNOTSUPP; fp = open_file_as_root(init_ex_path, O_RDONLY, 0); if (IS_ERR(fp)) { int ret = PTR_ERR(fp); if (ret == -ENOENT) { dev_info(sev->dev, "SEV: %s does not exist and will be created later.\n", init_ex_path); ret = 0; } else { dev_err(sev->dev, "SEV: could not open %s for read, error %d\n", init_ex_path, ret); } return ret; } nread = kernel_read(fp, sev_init_ex_buffer, NV_LENGTH, NULL); if (nread != NV_LENGTH) { dev_info(sev->dev, "SEV: could not read %u bytes to non volatile memory area, ret %ld\n", NV_LENGTH, nread); } dev_dbg(sev->dev, "SEV: read %ld bytes from NV file\n", nread); filp_close(fp, NULL); return 0; } static int sev_write_init_ex_file(void) { struct sev_device *sev = psp_master->sev_data; struct file *fp; loff_t offset = 0; ssize_t nwrite; lockdep_assert_held(&sev_cmd_mutex); if (!sev_init_ex_buffer) return 0; fp = open_file_as_root(init_ex_path, O_CREAT | O_WRONLY, 0600); if (IS_ERR(fp)) { int ret = PTR_ERR(fp); dev_err(sev->dev, "SEV: could not open file for write, error %d\n", ret); return ret; } nwrite = kernel_write(fp, sev_init_ex_buffer, NV_LENGTH, &offset); vfs_fsync(fp, 0); filp_close(fp, NULL); if (nwrite != NV_LENGTH) { dev_err(sev->dev, "SEV: failed to write %u bytes to non volatile memory area, ret %ld\n", NV_LENGTH, nwrite); return -EIO; } dev_dbg(sev->dev, "SEV: write successful to NV file\n"); return 0; } static int sev_write_init_ex_file_if_required(int cmd_id) { lockdep_assert_held(&sev_cmd_mutex); if (!sev_init_ex_buffer) return 0; /* * Only a few platform commands modify the SPI/NV area, but none of the * non-platform commands do. Only INIT(_EX), PLATFORM_RESET, PEK_GEN, * PEK_CERT_IMPORT, and PDH_GEN do. */ switch (cmd_id) { case SEV_CMD_FACTORY_RESET: case SEV_CMD_INIT_EX: case SEV_CMD_PDH_GEN: case SEV_CMD_PEK_CERT_IMPORT: case SEV_CMD_PEK_GEN: break; default: return 0; } return sev_write_init_ex_file(); } static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) { struct psp_device *psp = psp_master; struct sev_device *sev; unsigned int phys_lsb, phys_msb; unsigned int reg, ret = 0; int buf_len; if (!psp || !psp->sev_data) return -ENODEV; if (psp_dead) return -EBUSY; sev = psp->sev_data; buf_len = sev_cmd_buffer_len(cmd); if (WARN_ON_ONCE(!data != !buf_len)) return -EINVAL; /* * Copy the incoming data to driver's scratch buffer as __pa() will not * work for some memory, e.g. vmalloc'd addresses, and @data may not be * physically contiguous. */ if (data) memcpy(sev->cmd_buf, data, buf_len); /* Get the physical address of the command buffer */ phys_lsb = data ? lower_32_bits(__psp_pa(sev->cmd_buf)) : 0; phys_msb = data ? upper_32_bits(__psp_pa(sev->cmd_buf)) : 0; dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", cmd, phys_msb, phys_lsb, psp_timeout); print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, buf_len, false); iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); sev->int_rcvd = 0; reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); /* wait for command completion */ ret = sev_wait_cmd_ioc(sev, &reg, psp_timeout); if (ret) { if (psp_ret) *psp_ret = 0; dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); psp_dead = true; return ret; } psp_timeout = psp_cmd_timeout; if (psp_ret) *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); if (FIELD_GET(PSP_CMDRESP_STS, reg)) { dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); ret = -EIO; } else { ret = sev_write_init_ex_file_if_required(cmd); } print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, buf_len, false); /* * Copy potential output from the PSP back to data. Do this even on * failure in case the caller wants to glean something from the error. */ if (data) memcpy(data, sev->cmd_buf, buf_len); return ret; } static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; mutex_lock(&sev_cmd_mutex); rc = __sev_do_cmd_locked(cmd, data, psp_ret); mutex_unlock(&sev_cmd_mutex); return rc; } static int __sev_init_locked(int *error) { struct sev_data_init data; memset(&data, 0, sizeof(data)); if (sev_es_tmr) { /* * Do not include the encryption mask on the physical * address of the TMR (firmware should clear it anyway). */ data.tmr_address = __pa(sev_es_tmr); data.flags |= SEV_INIT_FLAGS_SEV_ES; data.tmr_len = SEV_ES_TMR_SIZE; } return __sev_do_cmd_locked(SEV_CMD_INIT, &data, error); } static int __sev_init_ex_locked(int *error) { struct sev_data_init_ex data; memset(&data, 0, sizeof(data)); data.length = sizeof(data); data.nv_address = __psp_pa(sev_init_ex_buffer); data.nv_len = NV_LENGTH; if (sev_es_tmr) { /* * Do not include the encryption mask on the physical * address of the TMR (firmware should clear it anyway). */ data.tmr_address = __pa(sev_es_tmr); data.flags |= SEV_INIT_FLAGS_SEV_ES; data.tmr_len = SEV_ES_TMR_SIZE; } return __sev_do_cmd_locked(SEV_CMD_INIT_EX, &data, error); } static inline int __sev_do_init_locked(int *psp_ret) { if (sev_init_ex_buffer) return __sev_init_ex_locked(psp_ret); else return __sev_init_locked(psp_ret); } static int __sev_platform_init_locked(int *error) { int rc = 0, psp_ret = SEV_RET_NO_FW_CALL; struct psp_device *psp = psp_master; struct sev_device *sev; if (!psp || !psp->sev_data) return -ENODEV; sev = psp->sev_data; if (sev->state == SEV_STATE_INIT) return 0; if (sev_init_ex_buffer) { rc = sev_read_init_ex_file(); if (rc) return rc; } rc = __sev_do_init_locked(&psp_ret); if (rc && psp_ret == SEV_RET_SECURE_DATA_INVALID) { /* * Initialization command returned an integrity check failure * status code, meaning that firmware load and validation of SEV * related persistent data has failed. Retrying the * initialization function should succeed by replacing the state * with a reset state. */ dev_err(sev->dev, "SEV: retrying INIT command because of SECURE_DATA_INVALID error. Retrying once to reset PSP SEV state."); rc = __sev_do_init_locked(&psp_ret); } if (error) *error = psp_ret; if (rc) return rc; sev->state = SEV_STATE_INIT; /* Prepare for first SEV guest launch after INIT */ wbinvd_on_all_cpus(); rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error); if (rc) return rc; dev_dbg(sev->dev, "SEV firmware initialized\n"); dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, sev->api_minor, sev->build); return 0; } int sev_platform_init(int *error) { int rc; mutex_lock(&sev_cmd_mutex); rc = __sev_platform_init_locked(error); mutex_unlock(&sev_cmd_mutex); return rc; } EXPORT_SYMBOL_GPL(sev_platform_init); static int __sev_platform_shutdown_locked(int *error) { struct sev_device *sev = psp_master->sev_data; int ret; if (!sev || sev->state == SEV_STATE_UNINIT) return 0; ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); if (ret) return ret; sev->state = SEV_STATE_UNINIT; dev_dbg(sev->dev, "SEV firmware shutdown\n"); return ret; } static int sev_platform_shutdown(int *error) { int rc; mutex_lock(&sev_cmd_mutex); rc = __sev_platform_shutdown_locked(NULL); mutex_unlock(&sev_cmd_mutex); return rc; } static int sev_get_platform_state(int *state, int *error) { struct sev_user_data_status data; int rc; rc = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, error); if (rc) return rc; *state = data.state; return rc; } static int sev_ioctl_do_reset(struct sev_issue_cmd *argp, bool writable) { int state, rc; if (!writable) return -EPERM; /* * The SEV spec requires that FACTORY_RESET must be issued in * UNINIT state. Before we go further lets check if any guest is * active. * * If FW is in WORKING state then deny the request otherwise issue * SHUTDOWN command do INIT -> UNINIT before issuing the FACTORY_RESET. * */ rc = sev_get_platform_state(&state, &argp->error); if (rc) return rc; if (state == SEV_STATE_WORKING) return -EBUSY; if (state == SEV_STATE_INIT) { rc = __sev_platform_shutdown_locked(&argp->error); if (rc) return rc; } return __sev_do_cmd_locked(SEV_CMD_FACTORY_RESET, NULL, &argp->error); } static int sev_ioctl_do_platform_status(struct sev_issue_cmd *argp) { struct sev_user_data_status data; int ret; memset(&data, 0, sizeof(data)); ret = __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, &argp->error); if (ret) return ret; if (copy_to_user((void __user *)argp->data, &data, sizeof(data))) ret = -EFAULT; return ret; } static int sev_ioctl_do_pek_pdh_gen(int cmd, struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; int rc; if (!writable) return -EPERM; if (sev->state == SEV_STATE_UNINIT) { rc = __sev_platform_init_locked(&argp->error); if (rc) return rc; } return __sev_do_cmd_locked(cmd, NULL, &argp->error); } static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; struct sev_user_data_pek_csr input; struct sev_data_pek_csr data; void __user *input_address; void *blob = NULL; int ret; if (!writable) return -EPERM; if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT; memset(&data, 0, sizeof(data)); /* userspace wants to query CSR length */ if (!input.address || !input.length) goto cmd; /* allocate a physically contiguous buffer to store the CSR blob */ input_address = (void __user *)input.address; if (input.length > SEV_FW_BLOB_MAX_SIZE) return -EFAULT; blob = kzalloc(input.length, GFP_KERNEL); if (!blob) return -ENOMEM; data.address = __psp_pa(blob); data.len = input.length; cmd: if (sev->state == SEV_STATE_UNINIT) { ret = __sev_platform_init_locked(&argp->error); if (ret) goto e_free_blob; } ret = __sev_do_cmd_locked(SEV_CMD_PEK_CSR, &data, &argp->error); /* If we query the CSR length, FW responded with expected data. */ input.length = data.len; if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { ret = -EFAULT; goto e_free_blob; } if (blob) { if (copy_to_user(input_address, blob, input.length)) ret = -EFAULT; } e_free_blob: kfree(blob); return ret; } void *psp_copy_user_blob(u64 uaddr, u32 len) { if (!uaddr || !len) return ERR_PTR(-EINVAL); /* verify that blob length does not exceed our limit */ if (len > SEV_FW_BLOB_MAX_SIZE) return ERR_PTR(-EINVAL); return memdup_user((void __user *)uaddr, len); } EXPORT_SYMBOL_GPL(psp_copy_user_blob); static int sev_get_api_version(void) { struct sev_device *sev = psp_master->sev_data; struct sev_user_data_status status; int error = 0, ret; ret = sev_platform_status(&status, &error); if (ret) { dev_err(sev->dev, "SEV: failed to get status. Error: %#x\n", error); return 1; } sev->api_major = status.api_major; sev->api_minor = status.api_minor; sev->build = status.build; sev->state = status.state; return 0; } static int sev_get_firmware(struct device *dev, const struct firmware **firmware) { char fw_name_specific[SEV_FW_NAME_SIZE]; char fw_name_subset[SEV_FW_NAME_SIZE]; snprintf(fw_name_specific, sizeof(fw_name_specific), "amd/amd_sev_fam%.2xh_model%.2xh.sbin", boot_cpu_data.x86, boot_cpu_data.x86_model); snprintf(fw_name_subset, sizeof(fw_name_subset), "amd/amd_sev_fam%.2xh_model%.1xxh.sbin", boot_cpu_data.x86, (boot_cpu_data.x86_model & 0xf0) >> 4); /* Check for SEV FW for a particular model. * Ex. amd_sev_fam17h_model00h.sbin for Family 17h Model 00h * * or * * Check for SEV FW common to a subset of models. * Ex. amd_sev_fam17h_model0xh.sbin for * Family 17h Model 00h -- Family 17h Model 0Fh * * or * * Fall-back to using generic name: sev.fw */ if ((firmware_request_nowarn(firmware, fw_name_specific, dev) >= 0) || (firmware_request_nowarn(firmware, fw_name_subset, dev) >= 0) || (firmware_request_nowarn(firmware, SEV_FW_FILE, dev) >= 0)) return 0; return -ENOENT; } /* Don't fail if SEV FW couldn't be updated. Continue with existing SEV FW */ static int sev_update_firmware(struct device *dev) { struct sev_data_download_firmware *data; const struct firmware *firmware; int ret, error, order; struct page *p; u64 data_size; if (!sev_version_greater_or_equal(0, 15)) { dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); return -1; } if (sev_get_firmware(dev, &firmware) == -ENOENT) { dev_dbg(dev, "No SEV firmware file present\n"); return -1; } /* * SEV FW expects the physical address given to it to be 32 * byte aligned. Memory allocated has structure placed at the * beginning followed by the firmware being passed to the SEV * FW. Allocate enough memory for data structure + alignment * padding + SEV FW. */ data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); order = get_order(firmware->size + data_size); p = alloc_pages(GFP_KERNEL, order); if (!p) { ret = -1; goto fw_err; } /* * Copy firmware data to a kernel allocated contiguous * memory region. */ data = page_address(p); memcpy(page_address(p) + data_size, firmware->data, firmware->size); data->address = __psp_pa(page_address(p) + data_size); data->len = firmware->size; ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); /* * A quirk for fixing the committed TCB version, when upgrading from * earlier firmware version than 1.50. */ if (!ret && !sev_version_greater_or_equal(1, 50)) ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); if (ret) dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); else dev_info(dev, "SEV firmware update successful\n"); __free_pages(p, order); fw_err: release_firmware(firmware); return ret; } static int sev_ioctl_do_pek_import(struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; struct sev_user_data_pek_cert_import input; struct sev_data_pek_cert_import data; void *pek_blob, *oca_blob; int ret; if (!writable) return -EPERM; if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT; /* copy PEK certificate blobs from userspace */ pek_blob = psp_copy_user_blob(input.pek_cert_address, input.pek_cert_len); if (IS_ERR(pek_blob)) return PTR_ERR(pek_blob); data.reserved = 0; data.pek_cert_address = __psp_pa(pek_blob); data.pek_cert_len = input.pek_cert_len; /* copy PEK certificate blobs from userspace */ oca_blob = psp_copy_user_blob(input.oca_cert_address, input.oca_cert_len); if (IS_ERR(oca_blob)) { ret = PTR_ERR(oca_blob); goto e_free_pek; } data.oca_cert_address = __psp_pa(oca_blob); data.oca_cert_len = input.oca_cert_len; /* If platform is not in INIT state then transition it to INIT */ if (sev->state != SEV_STATE_INIT) { ret = __sev_platform_init_locked(&argp->error); if (ret) goto e_free_oca; } ret = __sev_do_cmd_locked(SEV_CMD_PEK_CERT_IMPORT, &data, &argp->error); e_free_oca: kfree(oca_blob); e_free_pek: kfree(pek_blob); return ret; } static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp) { struct sev_user_data_get_id2 input; struct sev_data_get_id data; void __user *input_address; void *id_blob = NULL; int ret; /* SEV GET_ID is available from SEV API v0.16 and up */ if (!sev_version_greater_or_equal(0, 16)) return -ENOTSUPP; if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT; input_address = (void __user *)input.address; if (input.address && input.length) { /* * The length of the ID shouldn't be assumed by software since * it may change in the future. The allocation size is limited * to 1 << (PAGE_SHIFT + MAX_ORDER) by the page allocator. * If the allocation fails, simply return ENOMEM rather than * warning in the kernel log. */ id_blob = kzalloc(input.length, GFP_KERNEL | __GFP_NOWARN); if (!id_blob) return -ENOMEM; data.address = __psp_pa(id_blob); data.len = input.length; } else { data.address = 0; data.len = 0; } ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, &data, &argp->error); /* * Firmware will return the length of the ID value (either the minimum * required length or the actual length written), return it to the user. */ input.length = data.len; if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { ret = -EFAULT; goto e_free; } if (id_blob) { if (copy_to_user(input_address, id_blob, data.len)) { ret = -EFAULT; goto e_free; } } e_free: kfree(id_blob); return ret; } static int sev_ioctl_do_get_id(struct sev_issue_cmd *argp) { struct sev_data_get_id *data; u64 data_size, user_size; void *id_blob, *mem; int ret; /* SEV GET_ID available from SEV API v0.16 and up */ if (!sev_version_greater_or_equal(0, 16)) return -ENOTSUPP; /* SEV FW expects the buffer it fills with the ID to be * 8-byte aligned. Memory allocated should be enough to * hold data structure + alignment padding + memory * where SEV FW writes the ID. */ data_size = ALIGN(sizeof(struct sev_data_get_id), 8); user_size = sizeof(struct sev_user_data_get_id); mem = kzalloc(data_size + user_size, GFP_KERNEL); if (!mem) return -ENOMEM; data = mem; id_blob = mem + data_size; data->address = __psp_pa(id_blob); data->len = user_size; ret = __sev_do_cmd_locked(SEV_CMD_GET_ID, data, &argp->error); if (!ret) { if (copy_to_user((void __user *)argp->data, id_blob, data->len)) ret = -EFAULT; } kfree(mem); return ret; } static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) { struct sev_device *sev = psp_master->sev_data; struct sev_user_data_pdh_cert_export input; void *pdh_blob = NULL, *cert_blob = NULL; struct sev_data_pdh_cert_export data; void __user *input_cert_chain_address; void __user *input_pdh_cert_address; int ret; /* If platform is not in INIT state then transition it to INIT. */ if (sev->state != SEV_STATE_INIT) { if (!writable) return -EPERM; ret = __sev_platform_init_locked(&argp->error); if (ret) return ret; } if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) return -EFAULT; memset(&data, 0, sizeof(data)); /* Userspace wants to query the certificate length. */ if (!input.pdh_cert_address || !input.pdh_cert_len || !input.cert_chain_address) goto cmd; input_pdh_cert_address = (void __user *)input.pdh_cert_address; input_cert_chain_address = (void __user *)input.cert_chain_address; /* Allocate a physically contiguous buffer to store the PDH blob. */ if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) return -EFAULT; /* Allocate a physically contiguous buffer to store the cert chain blob. */ if (input.cert_chain_len > SEV_FW_BLOB_MAX_SIZE) return -EFAULT; pdh_blob = kzalloc(input.pdh_cert_len, GFP_KERNEL); if (!pdh_blob) return -ENOMEM; data.pdh_cert_address = __psp_pa(pdh_blob); data.pdh_cert_len = input.pdh_cert_len; cert_blob = kzalloc(input.cert_chain_len, GFP_KERNEL); if (!cert_blob) { ret = -ENOMEM; goto e_free_pdh; } data.cert_chain_address = __psp_pa(cert_blob); data.cert_chain_len = input.cert_chain_len; cmd: ret = __sev_do_cmd_locked(SEV_CMD_PDH_CERT_EXPORT, &data, &argp->error); /* If we query the length, FW responded with expected data. */ input.cert_chain_len = data.cert_chain_len; input.pdh_cert_len = data.pdh_cert_len; if (copy_to_user((void __user *)argp->data, &input, sizeof(input))) { ret = -EFAULT; goto e_free_cert; } if (pdh_blob) { if (copy_to_user(input_pdh_cert_address, pdh_blob, input.pdh_cert_len)) { ret = -EFAULT; goto e_free_cert; } } if (cert_blob) { if (copy_to_user(input_cert_chain_address, cert_blob, input.cert_chain_len)) ret = -EFAULT; } e_free_cert: kfree(cert_blob); e_free_pdh: kfree(pdh_blob); return ret; } static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) { void __user *argp = (void __user *)arg; struct sev_issue_cmd input; int ret = -EFAULT; bool writable = file->f_mode & FMODE_WRITE; if (!psp_master || !psp_master->sev_data) return -ENODEV; if (ioctl != SEV_ISSUE_CMD) return -EINVAL; if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) return -EFAULT; if (input.cmd > SEV_MAX) return -EINVAL; mutex_lock(&sev_cmd_mutex); switch (input.cmd) { case SEV_FACTORY_RESET: ret = sev_ioctl_do_reset(&input, writable); break; case SEV_PLATFORM_STATUS: ret = sev_ioctl_do_platform_status(&input); break; case SEV_PEK_GEN: ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PEK_GEN, &input, writable); break; case SEV_PDH_GEN: ret = sev_ioctl_do_pek_pdh_gen(SEV_CMD_PDH_GEN, &input, writable); break; case SEV_PEK_CSR: ret = sev_ioctl_do_pek_csr(&input, writable); break; case SEV_PEK_CERT_IMPORT: ret = sev_ioctl_do_pek_import(&input, writable); break; case SEV_PDH_CERT_EXPORT: ret = sev_ioctl_do_pdh_export(&input, writable); break; case SEV_GET_ID: pr_warn_once("SEV_GET_ID command is deprecated, use SEV_GET_ID2\n"); ret = sev_ioctl_do_get_id(&input); break; case SEV_GET_ID2: ret = sev_ioctl_do_get_id2(&input); break; default: ret = -EINVAL; goto out; } if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: mutex_unlock(&sev_cmd_mutex); return ret; } static const struct file_operations sev_fops = { .owner = THIS_MODULE, .unlocked_ioctl = sev_ioctl, }; int sev_platform_status(struct sev_user_data_status *data, int *error) { return sev_do_cmd(SEV_CMD_PLATFORM_STATUS, data, error); } EXPORT_SYMBOL_GPL(sev_platform_status); int sev_guest_deactivate(struct sev_data_deactivate *data, int *error) { return sev_do_cmd(SEV_CMD_DEACTIVATE, data, error); } EXPORT_SYMBOL_GPL(sev_guest_deactivate); int sev_guest_activate(struct sev_data_activate *data, int *error) { return sev_do_cmd(SEV_CMD_ACTIVATE, data, error); } EXPORT_SYMBOL_GPL(sev_guest_activate); int sev_guest_decommission(struct sev_data_decommission *data, int *error) { return sev_do_cmd(SEV_CMD_DECOMMISSION, data, error); } EXPORT_SYMBOL_GPL(sev_guest_decommission); int sev_guest_df_flush(int *error) { return sev_do_cmd(SEV_CMD_DF_FLUSH, NULL, error); } EXPORT_SYMBOL_GPL(sev_guest_df_flush); static void sev_exit(struct kref *ref) { misc_deregister(&misc_dev->misc); kfree(misc_dev); misc_dev = NULL; } static int sev_misc_init(struct sev_device *sev) { struct device *dev = sev->dev; int ret; /* * SEV feature support can be detected on multiple devices but the SEV * FW commands must be issued on the master. During probe, we do not * know the master hence we create /dev/sev on the first device probe. * sev_do_cmd() finds the right master device to which to issue the * command to the firmware. */ if (!misc_dev) { struct miscdevice *misc; misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL); if (!misc_dev) return -ENOMEM; misc = &misc_dev->misc; misc->minor = MISC_DYNAMIC_MINOR; misc->name = DEVICE_NAME; misc->fops = &sev_fops; ret = misc_register(misc); if (ret) return ret; kref_init(&misc_dev->refcount); } else { kref_get(&misc_dev->refcount); } init_waitqueue_head(&sev->int_queue); sev->misc = misc_dev; dev_dbg(dev, "registered SEV device\n"); return 0; } int sev_dev_init(struct psp_device *psp) { struct device *dev = psp->dev; struct sev_device *sev; int ret = -ENOMEM; if (!boot_cpu_has(X86_FEATURE_SEV)) { dev_info_once(dev, "SEV: memory encryption not enabled by BIOS\n"); return 0; } sev = devm_kzalloc(dev, sizeof(*sev), GFP_KERNEL); if (!sev) goto e_err; sev->cmd_buf = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0); if (!sev->cmd_buf) goto e_sev; psp->sev_data = sev; sev->dev = dev; sev->psp = psp; sev->io_regs = psp->io_regs; sev->vdata = (struct sev_vdata *)psp->vdata->sev; if (!sev->vdata) { ret = -ENODEV; dev_err(dev, "sev: missing driver data\n"); goto e_buf; } psp_set_sev_irq_handler(psp, sev_irq_handler, sev); ret = sev_misc_init(sev); if (ret) goto e_irq; dev_notice(dev, "sev enabled\n"); return 0; e_irq: psp_clear_sev_irq_handler(psp); e_buf: devm_free_pages(dev, (unsigned long)sev->cmd_buf); e_sev: devm_kfree(dev, sev); e_err: psp->sev_data = NULL; dev_notice(dev, "sev initialization failed\n"); return ret; } static void sev_firmware_shutdown(struct sev_device *sev) { sev_platform_shutdown(NULL); if (sev_es_tmr) { /* The TMR area was encrypted, flush it from the cache */ wbinvd_on_all_cpus(); free_pages((unsigned long)sev_es_tmr, get_order(SEV_ES_TMR_SIZE)); sev_es_tmr = NULL; } if (sev_init_ex_buffer) { free_pages((unsigned long)sev_init_ex_buffer, get_order(NV_LENGTH)); sev_init_ex_buffer = NULL; } } void sev_dev_destroy(struct psp_device *psp) { struct sev_device *sev = psp->sev_data; if (!sev) return; sev_firmware_shutdown(sev); if (sev->misc) kref_put(&misc_dev->refcount, sev_exit); psp_clear_sev_irq_handler(psp); } int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, void *data, int *error) { if (!filep || filep->f_op != &sev_fops) return -EBADF; return sev_do_cmd(cmd, data, error); } EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; int error, rc; if (!sev) return; psp_timeout = psp_probe_timeout; if (sev_get_api_version()) goto err; if (sev_update_firmware(sev->dev) == 0) sev_get_api_version(); /* If an init_ex_path is provided rely on INIT_EX for PSP initialization * instead of INIT. */ if (init_ex_path) { sev_init_ex_buffer = sev_fw_alloc(NV_LENGTH); if (!sev_init_ex_buffer) { dev_err(sev->dev, "SEV: INIT_EX NV memory allocation failed\n"); goto err; } } /* Obtain the TMR memory area for SEV-ES use */ sev_es_tmr = sev_fw_alloc(SEV_ES_TMR_SIZE); if (sev_es_tmr) /* Must flush the cache before giving it to the firmware */ clflush_cache_range(sev_es_tmr, SEV_ES_TMR_SIZE); else dev_warn(sev->dev, "SEV: TMR allocation failed, SEV-ES support unavailable\n"); if (!psp_init_on_probe) return; /* Initialize the platform */ rc = sev_platform_init(&error); if (rc) dev_err(sev->dev, "SEV: failed to INIT error %#x, rc %d\n", error, rc); return; err: psp_master->sev_data = NULL; } void sev_pci_exit(void) { struct sev_device *sev = psp_master->sev_data; if (!sev) return; sev_firmware_shutdown(sev); }
linux-master
drivers/crypto/ccp/sev-dev.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) DES3 crypto API support * * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook <[email protected]> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <crypto/algapi.h> #include <crypto/scatterwalk.h> #include <crypto/internal/des.h> #include "ccp-crypto.h" static int ccp_des3_complete(struct crypto_async_request *async_req, int ret) { struct skcipher_request *req = skcipher_request_cast(async_req); struct ccp_ctx *ctx = crypto_skcipher_ctx_dma( crypto_skcipher_reqtfm(req)); struct ccp_des3_req_ctx *rctx = skcipher_request_ctx_dma(req); if (ret) return ret; if (ctx->u.des3.mode != CCP_DES3_MODE_ECB) memcpy(req->iv, rctx->iv, DES3_EDE_BLOCK_SIZE); return 0; } static int ccp_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) { struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); int err; err = verify_skcipher_des3_key(tfm, key); if (err) return err; /* It's not clear that there is any support for a keysize of 112. * If needed, the caller should make K1 == K3 */ ctx->u.des3.type = CCP_DES3_TYPE_168; ctx->u.des3.mode = alg->mode; ctx->u.des3.key_len = key_len; memcpy(ctx->u.des3.key, key, key_len); sg_init_one(&ctx->u.des3.key_sg, ctx->u.des3.key, key_len); return 0; } static int ccp_des3_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); struct ccp_des3_req_ctx *rctx = skcipher_request_ctx_dma(req); struct scatterlist *iv_sg = NULL; unsigned int iv_len = 0; if (!ctx->u.des3.key_len) return -EINVAL; if (((ctx->u.des3.mode == CCP_DES3_MODE_ECB) || (ctx->u.des3.mode == CCP_DES3_MODE_CBC)) && (req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) return -EINVAL; if (ctx->u.des3.mode != CCP_DES3_MODE_ECB) { if (!req->iv) return -EINVAL; memcpy(rctx->iv, req->iv, DES3_EDE_BLOCK_SIZE); iv_sg = &rctx->iv_sg; iv_len = DES3_EDE_BLOCK_SIZE; sg_init_one(iv_sg, rctx->iv, iv_len); } memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_DES3; rctx->cmd.u.des3.type = ctx->u.des3.type; rctx->cmd.u.des3.mode = ctx->u.des3.mode; rctx->cmd.u.des3.action = (encrypt) ? CCP_DES3_ACTION_ENCRYPT : CCP_DES3_ACTION_DECRYPT; rctx->cmd.u.des3.key = &ctx->u.des3.key_sg; rctx->cmd.u.des3.key_len = ctx->u.des3.key_len; rctx->cmd.u.des3.iv = iv_sg; rctx->cmd.u.des3.iv_len = iv_len; rctx->cmd.u.des3.src = req->src; rctx->cmd.u.des3.src_len = req->cryptlen; rctx->cmd.u.des3.dst = req->dst; return ccp_crypto_enqueue_request(&req->base, &rctx->cmd); } static int ccp_des3_encrypt(struct skcipher_request *req) { return ccp_des3_crypt(req, true); } static int ccp_des3_decrypt(struct skcipher_request *req) { return ccp_des3_crypt(req, false); } static int ccp_des3_init_tfm(struct crypto_skcipher *tfm) { struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); ctx->complete = ccp_des3_complete; ctx->u.des3.key_len = 0; crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct ccp_des3_req_ctx)); return 0; } static const struct skcipher_alg ccp_des3_defaults = { .setkey = ccp_des3_setkey, .encrypt = ccp_des3_encrypt, .decrypt = ccp_des3_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .init = ccp_des3_init_tfm, .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, .base.cra_priority = CCP_CRA_PRIORITY, .base.cra_module = THIS_MODULE, }; struct ccp_des3_def { enum ccp_des3_mode mode; unsigned int version; const char *name; const char *driver_name; unsigned int blocksize; unsigned int ivsize; const struct skcipher_alg *alg_defaults; }; static const struct ccp_des3_def des3_algs[] = { { .mode = CCP_DES3_MODE_ECB, .version = CCP_VERSION(5, 0), .name = "ecb(des3_ede)", .driver_name = "ecb-des3-ccp", .blocksize = DES3_EDE_BLOCK_SIZE, .ivsize = 0, .alg_defaults = &ccp_des3_defaults, }, { .mode = CCP_DES3_MODE_CBC, .version = CCP_VERSION(5, 0), .name = "cbc(des3_ede)", .driver_name = "cbc-des3-ccp", .blocksize = DES3_EDE_BLOCK_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .alg_defaults = &ccp_des3_defaults, }, }; static int ccp_register_des3_alg(struct list_head *head, const struct ccp_des3_def *def) { struct ccp_crypto_skcipher_alg *ccp_alg; struct skcipher_alg *alg; int ret; ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); if (!ccp_alg) return -ENOMEM; INIT_LIST_HEAD(&ccp_alg->entry); ccp_alg->mode = def->mode; /* Copy the defaults and override as necessary */ alg = &ccp_alg->alg; *alg = *def->alg_defaults; snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->driver_name); alg->base.cra_blocksize = def->blocksize; alg->ivsize = def->ivsize; ret = crypto_register_skcipher(alg); if (ret) { pr_err("%s skcipher algorithm registration error (%d)\n", alg->base.cra_name, ret); kfree(ccp_alg); return ret; } list_add(&ccp_alg->entry, head); return 0; } int ccp_register_des3_algs(struct list_head *head) { int i, ret; unsigned int ccpversion = ccp_version(); for (i = 0; i < ARRAY_SIZE(des3_algs); i++) { if (des3_algs[i].version > ccpversion) continue; ret = ccp_register_des3_alg(head, &des3_algs[i]); if (ret) return ret; } return 0; }
linux-master
drivers/crypto/ccp/ccp-crypto-des3.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) driver * * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. * * Author: Gary R Hook <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/spinlock.h> #include <linux/mutex.h> #include <linux/ccp.h> #include "ccp-dev.h" #include "../../dma/dmaengine.h" #define CCP_DMA_WIDTH(_mask) \ ({ \ u64 mask = _mask + 1; \ (mask == 0) ? 64 : fls64(mask); \ }) /* The CCP as a DMA provider can be configured for public or private * channels. Default is specified in the vdata for the device (PCI ID). * This module parameter will override for all channels on all devices: * dma_chan_attr = 0x2 to force all channels public * = 0x1 to force all channels private * = 0x0 to defer to the vdata setting * = any other value: warning, revert to 0x0 */ static unsigned int dma_chan_attr = CCP_DMA_DFLT; module_param(dma_chan_attr, uint, 0444); MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public"); static unsigned int dmaengine = 1; module_param(dmaengine, uint, 0444); MODULE_PARM_DESC(dmaengine, "Register services with the DMA subsystem (any non-zero value, default: 1)"); static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp) { switch (dma_chan_attr) { case CCP_DMA_DFLT: return ccp->vdata->dma_chan_attr; case CCP_DMA_PRIV: return DMA_PRIVATE; case CCP_DMA_PUB: return 0; default: dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n", dma_chan_attr); return ccp->vdata->dma_chan_attr; } } static void ccp_free_cmd_resources(struct ccp_device *ccp, struct list_head *list) { struct ccp_dma_cmd *cmd, *ctmp; list_for_each_entry_safe(cmd, ctmp, list, entry) { list_del(&cmd->entry); kmem_cache_free(ccp->dma_cmd_cache, cmd); } } static void ccp_free_desc_resources(struct ccp_device *ccp, struct list_head *list) { struct ccp_dma_desc *desc, *dtmp; list_for_each_entry_safe(desc, dtmp, list, entry) { ccp_free_cmd_resources(ccp, &desc->active); ccp_free_cmd_resources(ccp, &desc->pending); list_del(&desc->entry); kmem_cache_free(ccp->dma_desc_cache, desc); } } static void ccp_free_chan_resources(struct dma_chan *dma_chan) { struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, dma_chan); unsigned long flags; dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan); spin_lock_irqsave(&chan->lock, flags); ccp_free_desc_resources(chan->ccp, &chan->complete); ccp_free_desc_resources(chan->ccp, &chan->active); ccp_free_desc_resources(chan->ccp, &chan->pending); ccp_free_desc_resources(chan->ccp, &chan->created); spin_unlock_irqrestore(&chan->lock, flags); } static void ccp_cleanup_desc_resources(struct ccp_device *ccp, struct list_head *list) { struct ccp_dma_desc *desc, *dtmp; list_for_each_entry_safe_reverse(desc, dtmp, list, entry) { if (!async_tx_test_ack(&desc->tx_desc)) continue; dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc); ccp_free_cmd_resources(ccp, &desc->active); ccp_free_cmd_resources(ccp, &desc->pending); list_del(&desc->entry); kmem_cache_free(ccp->dma_desc_cache, desc); } } static void ccp_do_cleanup(unsigned long data) { struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data; unsigned long flags; dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__, dma_chan_name(&chan->dma_chan)); spin_lock_irqsave(&chan->lock, flags); ccp_cleanup_desc_resources(chan->ccp, &chan->complete); spin_unlock_irqrestore(&chan->lock, flags); } static int ccp_issue_next_cmd(struct ccp_dma_desc *desc) { struct ccp_dma_cmd *cmd; int ret; cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry); list_move(&cmd->entry, &desc->active); dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__, desc->tx_desc.cookie, cmd); ret = ccp_enqueue_cmd(&cmd->ccp_cmd); if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY)) return 0; dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__, ret, desc->tx_desc.cookie, cmd); return ret; } static void ccp_free_active_cmd(struct ccp_dma_desc *desc) { struct ccp_dma_cmd *cmd; cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd, entry); if (!cmd) return; dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n", __func__, desc->tx_desc.cookie, cmd); list_del(&cmd->entry); kmem_cache_free(desc->ccp->dma_cmd_cache, cmd); } static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan, struct ccp_dma_desc *desc) { /* Move current DMA descriptor to the complete list */ if (desc) list_move(&desc->entry, &chan->complete); /* Get the next DMA descriptor on the active list */ desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc, entry); return desc; } static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan, struct ccp_dma_desc *desc) { struct dma_async_tx_descriptor *tx_desc; unsigned long flags; /* Loop over descriptors until one is found with commands */ do { if (desc) { /* Remove the DMA command from the list and free it */ ccp_free_active_cmd(desc); if (!list_empty(&desc->pending)) { /* No errors, keep going */ if (desc->status != DMA_ERROR) return desc; /* Error, free remaining commands and move on */ ccp_free_cmd_resources(desc->ccp, &desc->pending); } tx_desc = &desc->tx_desc; } else { tx_desc = NULL; } spin_lock_irqsave(&chan->lock, flags); if (desc) { if (desc->status != DMA_ERROR) desc->status = DMA_COMPLETE; dev_dbg(desc->ccp->dev, "%s - tx %d complete, status=%u\n", __func__, desc->tx_desc.cookie, desc->status); dma_cookie_complete(tx_desc); dma_descriptor_unmap(tx_desc); } desc = __ccp_next_dma_desc(chan, desc); spin_unlock_irqrestore(&chan->lock, flags); if (tx_desc) { dmaengine_desc_get_callback_invoke(tx_desc, NULL); dma_run_dependencies(tx_desc); } } while (desc); return NULL; } static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan) { struct ccp_dma_desc *desc; if (list_empty(&chan->pending)) return NULL; desc = list_empty(&chan->active) ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry) : NULL; list_splice_tail_init(&chan->pending, &chan->active); return desc; } static void ccp_cmd_callback(void *data, int err) { struct ccp_dma_desc *desc = data; struct ccp_dma_chan *chan; int ret; if (err == -EINPROGRESS) return; chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan, dma_chan); dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n", __func__, desc->tx_desc.cookie, err); if (err) desc->status = DMA_ERROR; while (true) { /* Check for DMA descriptor completion */ desc = ccp_handle_active_desc(chan, desc); /* Don't submit cmd if no descriptor or DMA is paused */ if (!desc || (chan->status == DMA_PAUSED)) break; ret = ccp_issue_next_cmd(desc); if (!ret) break; desc->status = DMA_ERROR; } tasklet_schedule(&chan->cleanup_tasklet); } static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc) { struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc, tx_desc); struct ccp_dma_chan *chan; dma_cookie_t cookie; unsigned long flags; chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan); spin_lock_irqsave(&chan->lock, flags); cookie = dma_cookie_assign(tx_desc); list_move_tail(&desc->entry, &chan->pending); spin_unlock_irqrestore(&chan->lock, flags); dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n", __func__, cookie); return cookie; } static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan) { struct ccp_dma_cmd *cmd; cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT); if (cmd) memset(cmd, 0, sizeof(*cmd)); return cmd; } static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan, unsigned long flags) { struct ccp_dma_desc *desc; desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT); if (!desc) return NULL; dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan); desc->tx_desc.flags = flags; desc->tx_desc.tx_submit = ccp_tx_submit; desc->ccp = chan->ccp; INIT_LIST_HEAD(&desc->entry); INIT_LIST_HEAD(&desc->pending); INIT_LIST_HEAD(&desc->active); desc->status = DMA_IN_PROGRESS; return desc; } static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan, struct scatterlist *dst_sg, unsigned int dst_nents, struct scatterlist *src_sg, unsigned int src_nents, unsigned long flags) { struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, dma_chan); struct ccp_device *ccp = chan->ccp; struct ccp_dma_desc *desc; struct ccp_dma_cmd *cmd; struct ccp_cmd *ccp_cmd; struct ccp_passthru_nomap_engine *ccp_pt; unsigned int src_offset, src_len; unsigned int dst_offset, dst_len; unsigned int len; unsigned long sflags; size_t total_len; if (!dst_sg || !src_sg) return NULL; if (!dst_nents || !src_nents) return NULL; desc = ccp_alloc_dma_desc(chan, flags); if (!desc) return NULL; total_len = 0; src_len = sg_dma_len(src_sg); src_offset = 0; dst_len = sg_dma_len(dst_sg); dst_offset = 0; while (true) { if (!src_len) { src_nents--; if (!src_nents) break; src_sg = sg_next(src_sg); if (!src_sg) break; src_len = sg_dma_len(src_sg); src_offset = 0; continue; } if (!dst_len) { dst_nents--; if (!dst_nents) break; dst_sg = sg_next(dst_sg); if (!dst_sg) break; dst_len = sg_dma_len(dst_sg); dst_offset = 0; continue; } len = min(dst_len, src_len); cmd = ccp_alloc_dma_cmd(chan); if (!cmd) goto err; ccp_cmd = &cmd->ccp_cmd; ccp_cmd->ccp = chan->ccp; ccp_pt = &ccp_cmd->u.passthru_nomap; ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; ccp_cmd->engine = CCP_ENGINE_PASSTHRU; ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP; ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP; ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset; ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset; ccp_pt->src_len = len; ccp_pt->final = 1; ccp_cmd->callback = ccp_cmd_callback; ccp_cmd->data = desc; list_add_tail(&cmd->entry, &desc->pending); dev_dbg(ccp->dev, "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__, cmd, &ccp_pt->src_dma, &ccp_pt->dst_dma, ccp_pt->src_len); total_len += len; src_len -= len; src_offset += len; dst_len -= len; dst_offset += len; } desc->len = total_len; if (list_empty(&desc->pending)) goto err; dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc); spin_lock_irqsave(&chan->lock, sflags); list_add_tail(&desc->entry, &chan->created); spin_unlock_irqrestore(&chan->lock, sflags); return desc; err: ccp_free_cmd_resources(ccp, &desc->pending); kmem_cache_free(ccp->dma_desc_cache, desc); return NULL; } static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy( struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags) { struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, dma_chan); struct ccp_dma_desc *desc; struct scatterlist dst_sg, src_sg; dev_dbg(chan->ccp->dev, "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n", __func__, &src, &dst, len, flags); sg_init_table(&dst_sg, 1); sg_dma_address(&dst_sg) = dst; sg_dma_len(&dst_sg) = len; sg_init_table(&src_sg, 1); sg_dma_address(&src_sg) = src; sg_dma_len(&src_sg) = len; desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags); if (!desc) return NULL; return &desc->tx_desc; } static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt( struct dma_chan *dma_chan, unsigned long flags) { struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, dma_chan); struct ccp_dma_desc *desc; desc = ccp_alloc_dma_desc(chan, flags); if (!desc) return NULL; return &desc->tx_desc; } static void ccp_issue_pending(struct dma_chan *dma_chan) { struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, dma_chan); struct ccp_dma_desc *desc; unsigned long flags; dev_dbg(chan->ccp->dev, "%s\n", __func__); spin_lock_irqsave(&chan->lock, flags); desc = __ccp_pending_to_active(chan); spin_unlock_irqrestore(&chan->lock, flags); /* If there was nothing active, start processing */ if (desc) ccp_cmd_callback(desc, 0); } static enum dma_status ccp_tx_status(struct dma_chan *dma_chan, dma_cookie_t cookie, struct dma_tx_state *state) { struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, dma_chan); struct ccp_dma_desc *desc; enum dma_status ret; unsigned long flags; if (chan->status == DMA_PAUSED) { ret = DMA_PAUSED; goto out; } ret = dma_cookie_status(dma_chan, cookie, state); if (ret == DMA_COMPLETE) { spin_lock_irqsave(&chan->lock, flags); /* Get status from complete chain, if still there */ list_for_each_entry(desc, &chan->complete, entry) { if (desc->tx_desc.cookie != cookie) continue; ret = desc->status; break; } spin_unlock_irqrestore(&chan->lock, flags); } out: dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret); return ret; } static int ccp_pause(struct dma_chan *dma_chan) { struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, dma_chan); chan->status = DMA_PAUSED; /*TODO: Wait for active DMA to complete before returning? */ return 0; } static int ccp_resume(struct dma_chan *dma_chan) { struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, dma_chan); struct ccp_dma_desc *desc; unsigned long flags; spin_lock_irqsave(&chan->lock, flags); desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc, entry); spin_unlock_irqrestore(&chan->lock, flags); /* Indicate the channel is running again */ chan->status = DMA_IN_PROGRESS; /* If there was something active, re-start */ if (desc) ccp_cmd_callback(desc, 0); return 0; } static int ccp_terminate_all(struct dma_chan *dma_chan) { struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan, dma_chan); unsigned long flags; dev_dbg(chan->ccp->dev, "%s\n", __func__); /*TODO: Wait for active DMA to complete before continuing */ spin_lock_irqsave(&chan->lock, flags); /*TODO: Purge the complete list? */ ccp_free_desc_resources(chan->ccp, &chan->active); ccp_free_desc_resources(chan->ccp, &chan->pending); ccp_free_desc_resources(chan->ccp, &chan->created); spin_unlock_irqrestore(&chan->lock, flags); return 0; } static void ccp_dma_release(struct ccp_device *ccp) { struct ccp_dma_chan *chan; struct dma_chan *dma_chan; unsigned int i; for (i = 0; i < ccp->cmd_q_count; i++) { chan = ccp->ccp_dma_chan + i; dma_chan = &chan->dma_chan; tasklet_kill(&chan->cleanup_tasklet); list_del_rcu(&dma_chan->device_node); } } static void ccp_dma_release_channels(struct ccp_device *ccp) { struct ccp_dma_chan *chan; struct dma_chan *dma_chan; unsigned int i; for (i = 0; i < ccp->cmd_q_count; i++) { chan = ccp->ccp_dma_chan + i; dma_chan = &chan->dma_chan; if (dma_chan->client_count) dma_release_channel(dma_chan); } } int ccp_dmaengine_register(struct ccp_device *ccp) { struct ccp_dma_chan *chan; struct dma_device *dma_dev = &ccp->dma_dev; struct dma_chan *dma_chan; char *dma_cmd_cache_name; char *dma_desc_cache_name; unsigned int i; int ret; if (!dmaengine) return 0; ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count, sizeof(*(ccp->ccp_dma_chan)), GFP_KERNEL); if (!ccp->ccp_dma_chan) return -ENOMEM; dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, "%s-dmaengine-cmd-cache", ccp->name); if (!dma_cmd_cache_name) return -ENOMEM; ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name, sizeof(struct ccp_dma_cmd), sizeof(void *), SLAB_HWCACHE_ALIGN, NULL); if (!ccp->dma_cmd_cache) return -ENOMEM; dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL, "%s-dmaengine-desc-cache", ccp->name); if (!dma_desc_cache_name) { ret = -ENOMEM; goto err_cache; } ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name, sizeof(struct ccp_dma_desc), sizeof(void *), SLAB_HWCACHE_ALIGN, NULL); if (!ccp->dma_desc_cache) { ret = -ENOMEM; goto err_cache; } dma_dev->dev = ccp->dev; dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev)); dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev)); dma_dev->directions = DMA_MEM_TO_MEM; dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); /* The DMA channels for this device can be set to public or private, * and overridden by the module parameter dma_chan_attr. * Default: according to the value in vdata (dma_chan_attr=0) * dma_chan_attr=0x1: all channels private (override vdata) * dma_chan_attr=0x2: all channels public (override vdata) */ if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE) dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); INIT_LIST_HEAD(&dma_dev->channels); for (i = 0; i < ccp->cmd_q_count; i++) { chan = ccp->ccp_dma_chan + i; dma_chan = &chan->dma_chan; chan->ccp = ccp; spin_lock_init(&chan->lock); INIT_LIST_HEAD(&chan->created); INIT_LIST_HEAD(&chan->pending); INIT_LIST_HEAD(&chan->active); INIT_LIST_HEAD(&chan->complete); tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup, (unsigned long)chan); dma_chan->device = dma_dev; dma_cookie_init(dma_chan); list_add_tail(&dma_chan->device_node, &dma_dev->channels); } dma_dev->device_free_chan_resources = ccp_free_chan_resources; dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy; dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt; dma_dev->device_issue_pending = ccp_issue_pending; dma_dev->device_tx_status = ccp_tx_status; dma_dev->device_pause = ccp_pause; dma_dev->device_resume = ccp_resume; dma_dev->device_terminate_all = ccp_terminate_all; ret = dma_async_device_register(dma_dev); if (ret) goto err_reg; return 0; err_reg: ccp_dma_release(ccp); kmem_cache_destroy(ccp->dma_desc_cache); err_cache: kmem_cache_destroy(ccp->dma_cmd_cache); return ret; } void ccp_dmaengine_unregister(struct ccp_device *ccp) { struct dma_device *dma_dev = &ccp->dma_dev; if (!dmaengine) return; ccp_dma_release_channels(ccp); dma_async_device_unregister(dma_dev); ccp_dma_release(ccp); kmem_cache_destroy(ccp->dma_desc_cache); kmem_cache_destroy(ccp->dma_cmd_cache); }
linux-master
drivers/crypto/ccp/ccp-dmaengine.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) AES crypto API support * * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <[email protected]> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <crypto/algapi.h> #include <crypto/aes.h> #include <crypto/ctr.h> #include <crypto/scatterwalk.h> #include "ccp-crypto.h" static int ccp_aes_complete(struct crypto_async_request *async_req, int ret) { struct skcipher_request *req = skcipher_request_cast(async_req); struct ccp_ctx *ctx = crypto_skcipher_ctx_dma( crypto_skcipher_reqtfm(req)); struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); if (ret) return ret; if (ctx->u.aes.mode != CCP_AES_MODE_ECB) memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE); return 0; } static int ccp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) { struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); switch (key_len) { case AES_KEYSIZE_128: ctx->u.aes.type = CCP_AES_TYPE_128; break; case AES_KEYSIZE_192: ctx->u.aes.type = CCP_AES_TYPE_192; break; case AES_KEYSIZE_256: ctx->u.aes.type = CCP_AES_TYPE_256; break; default: return -EINVAL; } ctx->u.aes.mode = alg->mode; ctx->u.aes.key_len = key_len; memcpy(ctx->u.aes.key, key, key_len); sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); return 0; } static int ccp_aes_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); struct scatterlist *iv_sg = NULL; unsigned int iv_len = 0; if (!ctx->u.aes.key_len) return -EINVAL; if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) || (ctx->u.aes.mode == CCP_AES_MODE_CBC)) && (req->cryptlen & (AES_BLOCK_SIZE - 1))) return -EINVAL; if (ctx->u.aes.mode != CCP_AES_MODE_ECB) { if (!req->iv) return -EINVAL; memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE); iv_sg = &rctx->iv_sg; iv_len = AES_BLOCK_SIZE; sg_init_one(iv_sg, rctx->iv, iv_len); } memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_AES; rctx->cmd.u.aes.type = ctx->u.aes.type; rctx->cmd.u.aes.mode = ctx->u.aes.mode; rctx->cmd.u.aes.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; rctx->cmd.u.aes.iv = iv_sg; rctx->cmd.u.aes.iv_len = iv_len; rctx->cmd.u.aes.src = req->src; rctx->cmd.u.aes.src_len = req->cryptlen; rctx->cmd.u.aes.dst = req->dst; return ccp_crypto_enqueue_request(&req->base, &rctx->cmd); } static int ccp_aes_encrypt(struct skcipher_request *req) { return ccp_aes_crypt(req, true); } static int ccp_aes_decrypt(struct skcipher_request *req) { return ccp_aes_crypt(req, false); } static int ccp_aes_init_tfm(struct crypto_skcipher *tfm) { struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); ctx->complete = ccp_aes_complete; ctx->u.aes.key_len = 0; crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_aes_req_ctx)); return 0; } static int ccp_aes_rfc3686_complete(struct crypto_async_request *async_req, int ret) { struct skcipher_request *req = skcipher_request_cast(async_req); struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); /* Restore the original pointer */ req->iv = rctx->rfc3686_info; return ccp_aes_complete(async_req, ret); } static int ccp_aes_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) { struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); if (key_len < CTR_RFC3686_NONCE_SIZE) return -EINVAL; key_len -= CTR_RFC3686_NONCE_SIZE; memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE); return ccp_aes_setkey(tfm, key, key_len); } static int ccp_aes_rfc3686_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); u8 *iv; /* Initialize the CTR block */ iv = rctx->rfc3686_iv; memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE); iv += CTR_RFC3686_NONCE_SIZE; memcpy(iv, req->iv, CTR_RFC3686_IV_SIZE); iv += CTR_RFC3686_IV_SIZE; *(__be32 *)iv = cpu_to_be32(1); /* Point to the new IV */ rctx->rfc3686_info = req->iv; req->iv = rctx->rfc3686_iv; return ccp_aes_crypt(req, encrypt); } static int ccp_aes_rfc3686_encrypt(struct skcipher_request *req) { return ccp_aes_rfc3686_crypt(req, true); } static int ccp_aes_rfc3686_decrypt(struct skcipher_request *req) { return ccp_aes_rfc3686_crypt(req, false); } static int ccp_aes_rfc3686_init_tfm(struct crypto_skcipher *tfm) { struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); ctx->complete = ccp_aes_rfc3686_complete; ctx->u.aes.key_len = 0; crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx)); return 0; } static const struct skcipher_alg ccp_aes_defaults = { .setkey = ccp_aes_setkey, .encrypt = ccp_aes_encrypt, .decrypt = ccp_aes_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .init = ccp_aes_init_tfm, .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, .base.cra_priority = CCP_CRA_PRIORITY, .base.cra_module = THIS_MODULE, }; static const struct skcipher_alg ccp_aes_rfc3686_defaults = { .setkey = ccp_aes_rfc3686_setkey, .encrypt = ccp_aes_rfc3686_encrypt, .decrypt = ccp_aes_rfc3686_decrypt, .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .init = ccp_aes_rfc3686_init_tfm, .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = CTR_RFC3686_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, .base.cra_priority = CCP_CRA_PRIORITY, .base.cra_module = THIS_MODULE, }; struct ccp_aes_def { enum ccp_aes_mode mode; unsigned int version; const char *name; const char *driver_name; unsigned int blocksize; unsigned int ivsize; const struct skcipher_alg *alg_defaults; }; static struct ccp_aes_def aes_algs[] = { { .mode = CCP_AES_MODE_ECB, .version = CCP_VERSION(3, 0), .name = "ecb(aes)", .driver_name = "ecb-aes-ccp", .blocksize = AES_BLOCK_SIZE, .ivsize = 0, .alg_defaults = &ccp_aes_defaults, }, { .mode = CCP_AES_MODE_CBC, .version = CCP_VERSION(3, 0), .name = "cbc(aes)", .driver_name = "cbc-aes-ccp", .blocksize = AES_BLOCK_SIZE, .ivsize = AES_BLOCK_SIZE, .alg_defaults = &ccp_aes_defaults, }, { .mode = CCP_AES_MODE_CFB, .version = CCP_VERSION(3, 0), .name = "cfb(aes)", .driver_name = "cfb-aes-ccp", .blocksize = 1, .ivsize = AES_BLOCK_SIZE, .alg_defaults = &ccp_aes_defaults, }, { .mode = CCP_AES_MODE_OFB, .version = CCP_VERSION(3, 0), .name = "ofb(aes)", .driver_name = "ofb-aes-ccp", .blocksize = 1, .ivsize = AES_BLOCK_SIZE, .alg_defaults = &ccp_aes_defaults, }, { .mode = CCP_AES_MODE_CTR, .version = CCP_VERSION(3, 0), .name = "ctr(aes)", .driver_name = "ctr-aes-ccp", .blocksize = 1, .ivsize = AES_BLOCK_SIZE, .alg_defaults = &ccp_aes_defaults, }, { .mode = CCP_AES_MODE_CTR, .version = CCP_VERSION(3, 0), .name = "rfc3686(ctr(aes))", .driver_name = "rfc3686-ctr-aes-ccp", .blocksize = 1, .ivsize = CTR_RFC3686_IV_SIZE, .alg_defaults = &ccp_aes_rfc3686_defaults, }, }; static int ccp_register_aes_alg(struct list_head *head, const struct ccp_aes_def *def) { struct ccp_crypto_skcipher_alg *ccp_alg; struct skcipher_alg *alg; int ret; ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); if (!ccp_alg) return -ENOMEM; INIT_LIST_HEAD(&ccp_alg->entry); ccp_alg->mode = def->mode; /* Copy the defaults and override as necessary */ alg = &ccp_alg->alg; *alg = *def->alg_defaults; snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->driver_name); alg->base.cra_blocksize = def->blocksize; alg->ivsize = def->ivsize; ret = crypto_register_skcipher(alg); if (ret) { pr_err("%s skcipher algorithm registration error (%d)\n", alg->base.cra_name, ret); kfree(ccp_alg); return ret; } list_add(&ccp_alg->entry, head); return 0; } int ccp_register_aes_algs(struct list_head *head) { int i, ret; unsigned int ccpversion = ccp_version(); for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { if (aes_algs[i].version > ccpversion) continue; ret = ccp_register_aes_alg(head, &aes_algs[i]); if (ret) return ret; } return 0; }
linux-master
drivers/crypto/ccp/ccp-crypto-aes.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Secure Processor Dynamic Boost Control interface * * Copyright (C) 2023 Advanced Micro Devices, Inc. * * Author: Mario Limonciello <[email protected]> */ #include "dbc.h" struct error_map { u32 psp; int ret; }; #define DBC_ERROR_ACCESS_DENIED 0x0001 #define DBC_ERROR_EXCESS_DATA 0x0004 #define DBC_ERROR_BAD_PARAMETERS 0x0006 #define DBC_ERROR_BAD_STATE 0x0007 #define DBC_ERROR_NOT_IMPLEMENTED 0x0009 #define DBC_ERROR_BUSY 0x000D #define DBC_ERROR_MESSAGE_FAILURE 0x0307 #define DBC_ERROR_OVERFLOW 0x300F #define DBC_ERROR_SIGNATURE_INVALID 0x3072 static struct error_map error_codes[] = { {DBC_ERROR_ACCESS_DENIED, -EACCES}, {DBC_ERROR_EXCESS_DATA, -E2BIG}, {DBC_ERROR_BAD_PARAMETERS, -EINVAL}, {DBC_ERROR_BAD_STATE, -EAGAIN}, {DBC_ERROR_MESSAGE_FAILURE, -ENOENT}, {DBC_ERROR_NOT_IMPLEMENTED, -ENOENT}, {DBC_ERROR_BUSY, -EBUSY}, {DBC_ERROR_OVERFLOW, -ENFILE}, {DBC_ERROR_SIGNATURE_INVALID, -EPERM}, {0x0, 0x0}, }; static int send_dbc_cmd(struct psp_dbc_device *dbc_dev, enum psp_platform_access_msg msg) { int ret; dbc_dev->mbox->req.header.status = 0; ret = psp_send_platform_access_msg(msg, (struct psp_request *)dbc_dev->mbox); if (ret == -EIO) { int i; dev_dbg(dbc_dev->dev, "msg 0x%x failed with PSP error: 0x%x\n", msg, dbc_dev->mbox->req.header.status); for (i = 0; error_codes[i].psp; i++) { if (dbc_dev->mbox->req.header.status == error_codes[i].psp) return error_codes[i].ret; } } return ret; } static int send_dbc_nonce(struct psp_dbc_device *dbc_dev) { int ret; dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_nonce); ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE); if (ret == -EAGAIN) { dev_dbg(dbc_dev->dev, "retrying get nonce\n"); ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_NONCE); } return ret; } static int send_dbc_parameter(struct psp_dbc_device *dbc_dev) { dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_param); switch (dbc_dev->mbox->dbc_param.user.msg_index) { case PARAM_SET_FMAX_CAP: case PARAM_SET_PWR_CAP: case PARAM_SET_GFX_MODE: return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_PARAMETER); case PARAM_GET_FMAX_CAP: case PARAM_GET_PWR_CAP: case PARAM_GET_CURR_TEMP: case PARAM_GET_FMAX_MAX: case PARAM_GET_FMAX_MIN: case PARAM_GET_SOC_PWR_MAX: case PARAM_GET_SOC_PWR_MIN: case PARAM_GET_SOC_PWR_CUR: case PARAM_GET_GFX_MODE: return send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_GET_PARAMETER); } return -EINVAL; } void dbc_dev_destroy(struct psp_device *psp) { struct psp_dbc_device *dbc_dev = psp->dbc_data; if (!dbc_dev) return; misc_deregister(&dbc_dev->char_dev); mutex_destroy(&dbc_dev->ioctl_mutex); psp->dbc_data = NULL; } static long dbc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct psp_device *psp_master = psp_get_master_device(); void __user *argp = (void __user *)arg; struct psp_dbc_device *dbc_dev; int ret; if (!psp_master || !psp_master->dbc_data) return -ENODEV; dbc_dev = psp_master->dbc_data; mutex_lock(&dbc_dev->ioctl_mutex); switch (cmd) { case DBCIOCNONCE: if (copy_from_user(&dbc_dev->mbox->dbc_nonce.user, argp, sizeof(struct dbc_user_nonce))) { ret = -EFAULT; goto unlock; } ret = send_dbc_nonce(dbc_dev); if (ret) goto unlock; if (copy_to_user(argp, &dbc_dev->mbox->dbc_nonce.user, sizeof(struct dbc_user_nonce))) { ret = -EFAULT; goto unlock; } break; case DBCIOCUID: dbc_dev->mbox->req.header.payload_size = sizeof(dbc_dev->mbox->dbc_set_uid); if (copy_from_user(&dbc_dev->mbox->dbc_set_uid.user, argp, sizeof(struct dbc_user_setuid))) { ret = -EFAULT; goto unlock; } ret = send_dbc_cmd(dbc_dev, PSP_DYNAMIC_BOOST_SET_UID); if (ret) goto unlock; if (copy_to_user(argp, &dbc_dev->mbox->dbc_set_uid.user, sizeof(struct dbc_user_setuid))) { ret = -EFAULT; goto unlock; } break; case DBCIOCPARAM: if (copy_from_user(&dbc_dev->mbox->dbc_param.user, argp, sizeof(struct dbc_user_param))) { ret = -EFAULT; goto unlock; } ret = send_dbc_parameter(dbc_dev); if (ret) goto unlock; if (copy_to_user(argp, &dbc_dev->mbox->dbc_param.user, sizeof(struct dbc_user_param))) { ret = -EFAULT; goto unlock; } break; default: ret = -EINVAL; } unlock: mutex_unlock(&dbc_dev->ioctl_mutex); return ret; } static const struct file_operations dbc_fops = { .owner = THIS_MODULE, .unlocked_ioctl = dbc_ioctl, }; int dbc_dev_init(struct psp_device *psp) { struct device *dev = psp->dev; struct psp_dbc_device *dbc_dev; int ret; if (!PSP_FEATURE(psp, DBC)) return 0; dbc_dev = devm_kzalloc(dev, sizeof(*dbc_dev), GFP_KERNEL); if (!dbc_dev) return -ENOMEM; BUILD_BUG_ON(sizeof(union dbc_buffer) > PAGE_SIZE); dbc_dev->mbox = (void *)devm_get_free_pages(dev, GFP_KERNEL, 0); if (!dbc_dev->mbox) { ret = -ENOMEM; goto cleanup_dev; } psp->dbc_data = dbc_dev; dbc_dev->dev = dev; ret = send_dbc_nonce(dbc_dev); if (ret == -EACCES) { dev_dbg(dbc_dev->dev, "dynamic boost control was previously authenticated\n"); ret = 0; } dev_dbg(dbc_dev->dev, "dynamic boost control is %savailable\n", ret ? "un" : ""); if (ret) { ret = 0; goto cleanup_mbox; } dbc_dev->char_dev.minor = MISC_DYNAMIC_MINOR; dbc_dev->char_dev.name = "dbc"; dbc_dev->char_dev.fops = &dbc_fops; dbc_dev->char_dev.mode = 0600; ret = misc_register(&dbc_dev->char_dev); if (ret) goto cleanup_mbox; mutex_init(&dbc_dev->ioctl_mutex); return 0; cleanup_mbox: devm_free_pages(dev, (unsigned long)dbc_dev->mbox); cleanup_dev: psp->dbc_data = NULL; devm_kfree(dev, dbc_dev); return ret; }
linux-master
drivers/crypto/ccp/dbc.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) driver * * Copyright (C) 2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook <[email protected]> */ #include <linux/debugfs.h> #include <linux/ccp.h> #include "ccp-dev.h" /* DebugFS helpers */ #define OBUFP (obuf + oboff) #define OBUFLEN 512 #define OBUFSPC (OBUFLEN - oboff) #define OSCNPRINTF(fmt, ...) \ scnprintf(OBUFP, OBUFSPC, fmt, ## __VA_ARGS__) #define BUFLEN 63 #define RI_VERSION_NUM 0x0000003F #define RI_AES_PRESENT 0x00000040 #define RI_3DES_PRESENT 0x00000080 #define RI_SHA_PRESENT 0x00000100 #define RI_RSA_PRESENT 0x00000200 #define RI_ECC_PRESENT 0x00000400 #define RI_ZDE_PRESENT 0x00000800 #define RI_ZCE_PRESENT 0x00001000 #define RI_TRNG_PRESENT 0x00002000 #define RI_ELFC_PRESENT 0x00004000 #define RI_ELFC_SHIFT 14 #define RI_NUM_VQM 0x00078000 #define RI_NVQM_SHIFT 15 #define RI_NVQM(r) (((r) * RI_NUM_VQM) >> RI_NVQM_SHIFT) #define RI_LSB_ENTRIES 0x0FF80000 #define RI_NLSB_SHIFT 19 #define RI_NLSB(r) (((r) * RI_LSB_ENTRIES) >> RI_NLSB_SHIFT) static ssize_t ccp5_debugfs_info_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp) { struct ccp_device *ccp = filp->private_data; unsigned int oboff = 0; unsigned int regval; ssize_t ret; char *obuf; if (!ccp) return 0; obuf = kmalloc(OBUFLEN, GFP_KERNEL); if (!obuf) return -ENOMEM; oboff += OSCNPRINTF("Device name: %s\n", ccp->name); oboff += OSCNPRINTF(" RNG name: %s\n", ccp->rngname); oboff += OSCNPRINTF(" # Queues: %d\n", ccp->cmd_q_count); oboff += OSCNPRINTF(" # Cmds: %d\n", ccp->cmd_count); regval = ioread32(ccp->io_regs + CMD5_PSP_CCP_VERSION); oboff += OSCNPRINTF(" Version: %d\n", regval & RI_VERSION_NUM); oboff += OSCNPRINTF(" Engines:"); if (regval & RI_AES_PRESENT) oboff += OSCNPRINTF(" AES"); if (regval & RI_3DES_PRESENT) oboff += OSCNPRINTF(" 3DES"); if (regval & RI_SHA_PRESENT) oboff += OSCNPRINTF(" SHA"); if (regval & RI_RSA_PRESENT) oboff += OSCNPRINTF(" RSA"); if (regval & RI_ECC_PRESENT) oboff += OSCNPRINTF(" ECC"); if (regval & RI_ZDE_PRESENT) oboff += OSCNPRINTF(" ZDE"); if (regval & RI_ZCE_PRESENT) oboff += OSCNPRINTF(" ZCE"); if (regval & RI_TRNG_PRESENT) oboff += OSCNPRINTF(" TRNG"); oboff += OSCNPRINTF("\n"); oboff += OSCNPRINTF(" Queues: %d\n", (regval & RI_NUM_VQM) >> RI_NVQM_SHIFT); oboff += OSCNPRINTF("LSB Entries: %d\n", (regval & RI_LSB_ENTRIES) >> RI_NLSB_SHIFT); ret = simple_read_from_buffer(ubuf, count, offp, obuf, oboff); kfree(obuf); return ret; } /* Return a formatted buffer containing the current * statistics across all queues for a CCP. */ static ssize_t ccp5_debugfs_stats_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp) { struct ccp_device *ccp = filp->private_data; unsigned long total_xts_aes_ops = 0; unsigned long total_3des_ops = 0; unsigned long total_aes_ops = 0; unsigned long total_sha_ops = 0; unsigned long total_rsa_ops = 0; unsigned long total_ecc_ops = 0; unsigned long total_pt_ops = 0; unsigned long total_ops = 0; unsigned int oboff = 0; ssize_t ret = 0; unsigned int i; char *obuf; for (i = 0; i < ccp->cmd_q_count; i++) { struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; total_ops += cmd_q->total_ops; total_aes_ops += cmd_q->total_aes_ops; total_xts_aes_ops += cmd_q->total_xts_aes_ops; total_3des_ops += cmd_q->total_3des_ops; total_sha_ops += cmd_q->total_sha_ops; total_rsa_ops += cmd_q->total_rsa_ops; total_pt_ops += cmd_q->total_pt_ops; total_ecc_ops += cmd_q->total_ecc_ops; } obuf = kmalloc(OBUFLEN, GFP_KERNEL); if (!obuf) return -ENOMEM; oboff += OSCNPRINTF("Total Interrupts Handled: %ld\n", ccp->total_interrupts); oboff += OSCNPRINTF(" Total Operations: %ld\n", total_ops); oboff += OSCNPRINTF(" AES: %ld\n", total_aes_ops); oboff += OSCNPRINTF(" XTS AES: %ld\n", total_xts_aes_ops); oboff += OSCNPRINTF(" SHA: %ld\n", total_3des_ops); oboff += OSCNPRINTF(" SHA: %ld\n", total_sha_ops); oboff += OSCNPRINTF(" RSA: %ld\n", total_rsa_ops); oboff += OSCNPRINTF(" Pass-Thru: %ld\n", total_pt_ops); oboff += OSCNPRINTF(" ECC: %ld\n", total_ecc_ops); ret = simple_read_from_buffer(ubuf, count, offp, obuf, oboff); kfree(obuf); return ret; } /* Reset the counters in a queue */ static void ccp5_debugfs_reset_queue_stats(struct ccp_cmd_queue *cmd_q) { cmd_q->total_ops = 0L; cmd_q->total_aes_ops = 0L; cmd_q->total_xts_aes_ops = 0L; cmd_q->total_3des_ops = 0L; cmd_q->total_sha_ops = 0L; cmd_q->total_rsa_ops = 0L; cmd_q->total_pt_ops = 0L; cmd_q->total_ecc_ops = 0L; } /* A value was written to the stats variable, which * should be used to reset the queue counters across * that device. */ static ssize_t ccp5_debugfs_stats_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *offp) { struct ccp_device *ccp = filp->private_data; int i; for (i = 0; i < ccp->cmd_q_count; i++) ccp5_debugfs_reset_queue_stats(&ccp->cmd_q[i]); ccp->total_interrupts = 0L; return count; } /* Return a formatted buffer containing the current information * for that queue */ static ssize_t ccp5_debugfs_queue_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp) { struct ccp_cmd_queue *cmd_q = filp->private_data; unsigned int oboff = 0; unsigned int regval; ssize_t ret; char *obuf; if (!cmd_q) return 0; obuf = kmalloc(OBUFLEN, GFP_KERNEL); if (!obuf) return -ENOMEM; oboff += OSCNPRINTF(" Total Queue Operations: %ld\n", cmd_q->total_ops); oboff += OSCNPRINTF(" AES: %ld\n", cmd_q->total_aes_ops); oboff += OSCNPRINTF(" XTS AES: %ld\n", cmd_q->total_xts_aes_ops); oboff += OSCNPRINTF(" SHA: %ld\n", cmd_q->total_3des_ops); oboff += OSCNPRINTF(" SHA: %ld\n", cmd_q->total_sha_ops); oboff += OSCNPRINTF(" RSA: %ld\n", cmd_q->total_rsa_ops); oboff += OSCNPRINTF(" Pass-Thru: %ld\n", cmd_q->total_pt_ops); oboff += OSCNPRINTF(" ECC: %ld\n", cmd_q->total_ecc_ops); regval = ioread32(cmd_q->reg_int_enable); oboff += OSCNPRINTF(" Enabled Interrupts:"); if (regval & INT_EMPTY_QUEUE) oboff += OSCNPRINTF(" EMPTY"); if (regval & INT_QUEUE_STOPPED) oboff += OSCNPRINTF(" STOPPED"); if (regval & INT_ERROR) oboff += OSCNPRINTF(" ERROR"); if (regval & INT_COMPLETION) oboff += OSCNPRINTF(" COMPLETION"); oboff += OSCNPRINTF("\n"); ret = simple_read_from_buffer(ubuf, count, offp, obuf, oboff); kfree(obuf); return ret; } /* A value was written to the stats variable for a * queue. Reset the queue counters to this value. */ static ssize_t ccp5_debugfs_queue_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *offp) { struct ccp_cmd_queue *cmd_q = filp->private_data; ccp5_debugfs_reset_queue_stats(cmd_q); return count; } static const struct file_operations ccp_debugfs_info_ops = { .owner = THIS_MODULE, .open = simple_open, .read = ccp5_debugfs_info_read, .write = NULL, }; static const struct file_operations ccp_debugfs_queue_ops = { .owner = THIS_MODULE, .open = simple_open, .read = ccp5_debugfs_queue_read, .write = ccp5_debugfs_queue_write, }; static const struct file_operations ccp_debugfs_stats_ops = { .owner = THIS_MODULE, .open = simple_open, .read = ccp5_debugfs_stats_read, .write = ccp5_debugfs_stats_write, }; static struct dentry *ccp_debugfs_dir; static DEFINE_MUTEX(ccp_debugfs_lock); #define MAX_NAME_LEN 20 void ccp5_debugfs_setup(struct ccp_device *ccp) { struct ccp_cmd_queue *cmd_q; char name[MAX_NAME_LEN + 1]; struct dentry *debugfs_q_instance; int i; if (!debugfs_initialized()) return; mutex_lock(&ccp_debugfs_lock); if (!ccp_debugfs_dir) ccp_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); mutex_unlock(&ccp_debugfs_lock); ccp->debugfs_instance = debugfs_create_dir(ccp->name, ccp_debugfs_dir); debugfs_create_file("info", 0400, ccp->debugfs_instance, ccp, &ccp_debugfs_info_ops); debugfs_create_file("stats", 0600, ccp->debugfs_instance, ccp, &ccp_debugfs_stats_ops); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; snprintf(name, MAX_NAME_LEN - 1, "q%d", cmd_q->id); debugfs_q_instance = debugfs_create_dir(name, ccp->debugfs_instance); debugfs_create_file("stats", 0600, debugfs_q_instance, cmd_q, &ccp_debugfs_queue_ops); } return; } void ccp5_debugfs_destroy(void) { debugfs_remove_recursive(ccp_debugfs_dir); }
linux-master
drivers/crypto/ccp/ccp-debugfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) crypto API support * * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <[email protected]> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/ccp.h> #include <linux/scatterlist.h> #include <crypto/internal/hash.h> #include <crypto/internal/akcipher.h> #include "ccp-crypto.h" MODULE_AUTHOR("Tom Lendacky <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support"); static unsigned int aes_disable; module_param(aes_disable, uint, 0444); MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value"); static unsigned int sha_disable; module_param(sha_disable, uint, 0444); MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value"); static unsigned int des3_disable; module_param(des3_disable, uint, 0444); MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value"); static unsigned int rsa_disable; module_param(rsa_disable, uint, 0444); MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value"); /* List heads for the supported algorithms */ static LIST_HEAD(hash_algs); static LIST_HEAD(skcipher_algs); static LIST_HEAD(aead_algs); static LIST_HEAD(akcipher_algs); /* For any tfm, requests for that tfm must be returned on the order * received. With multiple queues available, the CCP can process more * than one cmd at a time. Therefore we must maintain a cmd list to insure * the proper ordering of requests on a given tfm. */ struct ccp_crypto_queue { struct list_head cmds; struct list_head *backlog; unsigned int cmd_count; }; #define CCP_CRYPTO_MAX_QLEN 100 static struct ccp_crypto_queue req_queue; static DEFINE_SPINLOCK(req_queue_lock); struct ccp_crypto_cmd { struct list_head entry; struct ccp_cmd *cmd; /* Save the crypto_tfm and crypto_async_request addresses * separately to avoid any reference to a possibly invalid * crypto_async_request structure after invoking the request * callback */ struct crypto_async_request *req; struct crypto_tfm *tfm; /* Used for held command processing to determine state */ int ret; }; static inline bool ccp_crypto_success(int err) { if (err && (err != -EINPROGRESS) && (err != -EBUSY)) return false; return true; } static struct ccp_crypto_cmd *ccp_crypto_cmd_complete( struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog) { struct ccp_crypto_cmd *held = NULL, *tmp; unsigned long flags; *backlog = NULL; spin_lock_irqsave(&req_queue_lock, flags); /* Held cmds will be after the current cmd in the queue so start * searching for a cmd with a matching tfm for submission. */ tmp = crypto_cmd; list_for_each_entry_continue(tmp, &req_queue.cmds, entry) { if (crypto_cmd->tfm != tmp->tfm) continue; held = tmp; break; } /* Process the backlog: * Because cmds can be executed from any point in the cmd list * special precautions have to be taken when handling the backlog. */ if (req_queue.backlog != &req_queue.cmds) { /* Skip over this cmd if it is the next backlog cmd */ if (req_queue.backlog == &crypto_cmd->entry) req_queue.backlog = crypto_cmd->entry.next; *backlog = container_of(req_queue.backlog, struct ccp_crypto_cmd, entry); req_queue.backlog = req_queue.backlog->next; /* Skip over this cmd if it is now the next backlog cmd */ if (req_queue.backlog == &crypto_cmd->entry) req_queue.backlog = crypto_cmd->entry.next; } /* Remove the cmd entry from the list of cmds */ req_queue.cmd_count--; list_del(&crypto_cmd->entry); spin_unlock_irqrestore(&req_queue_lock, flags); return held; } static void ccp_crypto_complete(void *data, int err) { struct ccp_crypto_cmd *crypto_cmd = data; struct ccp_crypto_cmd *held, *next, *backlog; struct crypto_async_request *req = crypto_cmd->req; struct ccp_ctx *ctx = crypto_tfm_ctx_dma(req->tfm); int ret; if (err == -EINPROGRESS) { /* Only propagate the -EINPROGRESS if necessary */ if (crypto_cmd->ret == -EBUSY) { crypto_cmd->ret = -EINPROGRESS; crypto_request_complete(req, -EINPROGRESS); } return; } /* Operation has completed - update the queue before invoking * the completion callbacks and retrieve the next cmd (cmd with * a matching tfm) that can be submitted to the CCP. */ held = ccp_crypto_cmd_complete(crypto_cmd, &backlog); if (backlog) { backlog->ret = -EINPROGRESS; crypto_request_complete(backlog->req, -EINPROGRESS); } /* Transition the state from -EBUSY to -EINPROGRESS first */ if (crypto_cmd->ret == -EBUSY) crypto_request_complete(req, -EINPROGRESS); /* Completion callbacks */ ret = err; if (ctx->complete) ret = ctx->complete(req, ret); crypto_request_complete(req, ret); /* Submit the next cmd */ while (held) { /* Since we have already queued the cmd, we must indicate that * we can backlog so as not to "lose" this request. */ held->cmd->flags |= CCP_CMD_MAY_BACKLOG; ret = ccp_enqueue_cmd(held->cmd); if (ccp_crypto_success(ret)) break; /* Error occurred, report it and get the next entry */ ctx = crypto_tfm_ctx_dma(held->req->tfm); if (ctx->complete) ret = ctx->complete(held->req, ret); crypto_request_complete(held->req, ret); next = ccp_crypto_cmd_complete(held, &backlog); if (backlog) { backlog->ret = -EINPROGRESS; crypto_request_complete(backlog->req, -EINPROGRESS); } kfree(held); held = next; } kfree(crypto_cmd); } static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd) { struct ccp_crypto_cmd *active = NULL, *tmp; unsigned long flags; bool free_cmd = true; int ret; spin_lock_irqsave(&req_queue_lock, flags); /* Check if the cmd can/should be queued */ if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) { ret = -ENOSPC; goto e_lock; } } /* Look for an entry with the same tfm. If there is a cmd * with the same tfm in the list then the current cmd cannot * be submitted to the CCP yet. */ list_for_each_entry(tmp, &req_queue.cmds, entry) { if (crypto_cmd->tfm != tmp->tfm) continue; active = tmp; break; } ret = -EINPROGRESS; if (!active) { ret = ccp_enqueue_cmd(crypto_cmd->cmd); if (!ccp_crypto_success(ret)) goto e_lock; /* Error, don't queue it */ } if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) { ret = -EBUSY; if (req_queue.backlog == &req_queue.cmds) req_queue.backlog = &crypto_cmd->entry; } crypto_cmd->ret = ret; req_queue.cmd_count++; list_add_tail(&crypto_cmd->entry, &req_queue.cmds); free_cmd = false; e_lock: spin_unlock_irqrestore(&req_queue_lock, flags); if (free_cmd) kfree(crypto_cmd); return ret; } /** * ccp_crypto_enqueue_request - queue an crypto async request for processing * by the CCP * * @req: crypto_async_request struct to be processed * @cmd: ccp_cmd struct to be sent to the CCP */ int ccp_crypto_enqueue_request(struct crypto_async_request *req, struct ccp_cmd *cmd) { struct ccp_crypto_cmd *crypto_cmd; gfp_t gfp; gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp); if (!crypto_cmd) return -ENOMEM; /* The tfm pointer must be saved and not referenced from the * crypto_async_request (req) pointer because it is used after * completion callback for the request and the req pointer * might not be valid anymore. */ crypto_cmd->cmd = cmd; crypto_cmd->req = req; crypto_cmd->tfm = req->tfm; cmd->callback = ccp_crypto_complete; cmd->data = crypto_cmd; if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) cmd->flags |= CCP_CMD_MAY_BACKLOG; else cmd->flags &= ~CCP_CMD_MAY_BACKLOG; return ccp_crypto_enqueue_cmd(crypto_cmd); } struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table, struct scatterlist *sg_add) { struct scatterlist *sg, *sg_last = NULL; for (sg = table->sgl; sg; sg = sg_next(sg)) if (!sg_page(sg)) break; if (WARN_ON(!sg)) return NULL; for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) { sg_set_page(sg, sg_page(sg_add), sg_add->length, sg_add->offset); sg_last = sg; } if (WARN_ON(sg_add)) return NULL; return sg_last; } static int ccp_register_algs(void) { int ret; if (!aes_disable) { ret = ccp_register_aes_algs(&skcipher_algs); if (ret) return ret; ret = ccp_register_aes_cmac_algs(&hash_algs); if (ret) return ret; ret = ccp_register_aes_xts_algs(&skcipher_algs); if (ret) return ret; ret = ccp_register_aes_aeads(&aead_algs); if (ret) return ret; } if (!des3_disable) { ret = ccp_register_des3_algs(&skcipher_algs); if (ret) return ret; } if (!sha_disable) { ret = ccp_register_sha_algs(&hash_algs); if (ret) return ret; } if (!rsa_disable) { ret = ccp_register_rsa_algs(&akcipher_algs); if (ret) return ret; } return 0; } static void ccp_unregister_algs(void) { struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp; struct ccp_crypto_skcipher_alg *ablk_alg, *ablk_tmp; struct ccp_crypto_aead *aead_alg, *aead_tmp; struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp; list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) { crypto_unregister_ahash(&ahash_alg->alg); list_del(&ahash_alg->entry); kfree(ahash_alg); } list_for_each_entry_safe(ablk_alg, ablk_tmp, &skcipher_algs, entry) { crypto_unregister_skcipher(&ablk_alg->alg); list_del(&ablk_alg->entry); kfree(ablk_alg); } list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) { crypto_unregister_aead(&aead_alg->alg); list_del(&aead_alg->entry); kfree(aead_alg); } list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) { crypto_unregister_akcipher(&akc_alg->alg); list_del(&akc_alg->entry); kfree(akc_alg); } } static int __init ccp_crypto_init(void) { int ret; ret = ccp_present(); if (ret) { pr_err("Cannot load: there are no available CCPs\n"); return ret; } INIT_LIST_HEAD(&req_queue.cmds); req_queue.backlog = &req_queue.cmds; req_queue.cmd_count = 0; ret = ccp_register_algs(); if (ret) ccp_unregister_algs(); return ret; } static void __exit ccp_crypto_exit(void) { ccp_unregister_algs(); } module_init(ccp_crypto_init); module_exit(ccp_crypto_exit);
linux-master
drivers/crypto/ccp/ccp-crypto-main.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) driver * * Copyright (C) 2013-2019 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <[email protected]> * Author: Gary R Hook <[email protected]> */ #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <crypto/scatterwalk.h> #include <crypto/des.h> #include <linux/ccp.h> #include "ccp-dev.h" /* SHA initial context values */ static const __be32 ccp_sha1_init[SHA1_DIGEST_SIZE / sizeof(__be32)] = { cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), cpu_to_be32(SHA1_H4), }; static const __be32 ccp_sha224_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), }; static const __be32 ccp_sha256_init[SHA256_DIGEST_SIZE / sizeof(__be32)] = { cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), }; static const __be64 ccp_sha384_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1), cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3), cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5), cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7), }; static const __be64 ccp_sha512_init[SHA512_DIGEST_SIZE / sizeof(__be64)] = { cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1), cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3), cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5), cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7), }; #define CCP_NEW_JOBID(ccp) ((ccp->vdata->version == CCP_VERSION(3, 0)) ? \ ccp_gen_jobid(ccp) : 0) static u32 ccp_gen_jobid(struct ccp_device *ccp) { return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK; } static void ccp_sg_free(struct ccp_sg_workarea *wa) { if (wa->dma_count) dma_unmap_sg(wa->dma_dev, wa->dma_sg_head, wa->nents, wa->dma_dir); wa->dma_count = 0; } static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev, struct scatterlist *sg, u64 len, enum dma_data_direction dma_dir) { memset(wa, 0, sizeof(*wa)); wa->sg = sg; if (!sg) return 0; wa->nents = sg_nents_for_len(sg, len); if (wa->nents < 0) return wa->nents; wa->bytes_left = len; wa->sg_used = 0; if (len == 0) return 0; if (dma_dir == DMA_NONE) return 0; wa->dma_sg = sg; wa->dma_sg_head = sg; wa->dma_dev = dev; wa->dma_dir = dma_dir; wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir); if (!wa->dma_count) return -ENOMEM; return 0; } static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len) { unsigned int nbytes = min_t(u64, len, wa->bytes_left); unsigned int sg_combined_len = 0; if (!wa->sg) return; wa->sg_used += nbytes; wa->bytes_left -= nbytes; if (wa->sg_used == sg_dma_len(wa->dma_sg)) { /* Advance to the next DMA scatterlist entry */ wa->dma_sg = sg_next(wa->dma_sg); /* In the case that the DMA mapped scatterlist has entries * that have been merged, the non-DMA mapped scatterlist * must be advanced multiple times for each merged entry. * This ensures that the current non-DMA mapped entry * corresponds to the current DMA mapped entry. */ do { sg_combined_len += wa->sg->length; wa->sg = sg_next(wa->sg); } while (wa->sg_used > sg_combined_len); wa->sg_used = 0; } } static void ccp_dm_free(struct ccp_dm_workarea *wa) { if (wa->length <= CCP_DMAPOOL_MAX_SIZE) { if (wa->address) dma_pool_free(wa->dma_pool, wa->address, wa->dma.address); } else { if (wa->dma.address) dma_unmap_single(wa->dev, wa->dma.address, wa->length, wa->dma.dir); kfree(wa->address); } wa->address = NULL; wa->dma.address = 0; } static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa, struct ccp_cmd_queue *cmd_q, unsigned int len, enum dma_data_direction dir) { memset(wa, 0, sizeof(*wa)); if (!len) return 0; wa->dev = cmd_q->ccp->dev; wa->length = len; if (len <= CCP_DMAPOOL_MAX_SIZE) { wa->dma_pool = cmd_q->dma_pool; wa->address = dma_pool_zalloc(wa->dma_pool, GFP_KERNEL, &wa->dma.address); if (!wa->address) return -ENOMEM; wa->dma.length = CCP_DMAPOOL_MAX_SIZE; } else { wa->address = kzalloc(len, GFP_KERNEL); if (!wa->address) return -ENOMEM; wa->dma.address = dma_map_single(wa->dev, wa->address, len, dir); if (dma_mapping_error(wa->dev, wa->dma.address)) return -ENOMEM; wa->dma.length = len; } wa->dma.dir = dir; return 0; } static int ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, struct scatterlist *sg, unsigned int sg_offset, unsigned int len) { WARN_ON(!wa->address); if (len > (wa->length - wa_offset)) return -EINVAL; scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 0); return 0; } static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, struct scatterlist *sg, unsigned int sg_offset, unsigned int len) { WARN_ON(!wa->address); scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len, 1); } static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, struct scatterlist *sg, unsigned int sg_offset, unsigned int len) { u8 *p, *q; int rc; rc = ccp_set_dm_area(wa, wa_offset, sg, sg_offset, len); if (rc) return rc; p = wa->address + wa_offset; q = p + len - 1; while (p < q) { *p = *p ^ *q; *q = *p ^ *q; *p = *p ^ *q; p++; q--; } return 0; } static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset, struct scatterlist *sg, unsigned int sg_offset, unsigned int len) { u8 *p, *q; p = wa->address + wa_offset; q = p + len - 1; while (p < q) { *p = *p ^ *q; *q = *p ^ *q; *p = *p ^ *q; p++; q--; } ccp_get_dm_area(wa, wa_offset, sg, sg_offset, len); } static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q) { ccp_dm_free(&data->dm_wa); ccp_sg_free(&data->sg_wa); } static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, struct scatterlist *sg, u64 sg_len, unsigned int dm_len, enum dma_data_direction dir) { int ret; memset(data, 0, sizeof(*data)); ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, dir); if (ret) goto e_err; ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir); if (ret) goto e_err; return 0; e_err: ccp_free_data(data, cmd_q); return ret; } static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from) { struct ccp_sg_workarea *sg_wa = &data->sg_wa; struct ccp_dm_workarea *dm_wa = &data->dm_wa; unsigned int buf_count, nbytes; /* Clear the buffer if setting it */ if (!from) memset(dm_wa->address, 0, dm_wa->length); if (!sg_wa->sg) return 0; /* Perform the copy operation * nbytes will always be <= UINT_MAX because dm_wa->length is * an unsigned int */ nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length); scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used, nbytes, from); /* Update the structures and generate the count */ buf_count = 0; while (sg_wa->bytes_left && (buf_count < dm_wa->length)) { nbytes = min(sg_dma_len(sg_wa->dma_sg) - sg_wa->sg_used, dm_wa->length - buf_count); nbytes = min_t(u64, sg_wa->bytes_left, nbytes); buf_count += nbytes; ccp_update_sg_workarea(sg_wa, nbytes); } return buf_count; } static unsigned int ccp_fill_queue_buf(struct ccp_data *data) { return ccp_queue_buf(data, 0); } static unsigned int ccp_empty_queue_buf(struct ccp_data *data) { return ccp_queue_buf(data, 1); } static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst, struct ccp_op *op, unsigned int block_size, bool blocksize_op) { unsigned int sg_src_len, sg_dst_len, op_len; /* The CCP can only DMA from/to one address each per operation. This * requires that we find the smallest DMA area between the source * and destination. The resulting len values will always be <= UINT_MAX * because the dma length is an unsigned int. */ sg_src_len = sg_dma_len(src->sg_wa.dma_sg) - src->sg_wa.sg_used; sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len); if (dst) { sg_dst_len = sg_dma_len(dst->sg_wa.dma_sg) - dst->sg_wa.sg_used; sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len); op_len = min(sg_src_len, sg_dst_len); } else { op_len = sg_src_len; } /* The data operation length will be at least block_size in length * or the smaller of available sg room remaining for the source or * the destination */ op_len = max(op_len, block_size); /* Unless we have to buffer data, there's no reason to wait */ op->soc = 0; if (sg_src_len < block_size) { /* Not enough data in the sg element, so it * needs to be buffered into a blocksize chunk */ int cp_len = ccp_fill_queue_buf(src); op->soc = 1; op->src.u.dma.address = src->dm_wa.dma.address; op->src.u.dma.offset = 0; op->src.u.dma.length = (blocksize_op) ? block_size : cp_len; } else { /* Enough data in the sg element, but we need to * adjust for any previously copied data */ op->src.u.dma.address = sg_dma_address(src->sg_wa.dma_sg); op->src.u.dma.offset = src->sg_wa.sg_used; op->src.u.dma.length = op_len & ~(block_size - 1); ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length); } if (dst) { if (sg_dst_len < block_size) { /* Not enough room in the sg element or we're on the * last piece of data (when using padding), so the * output needs to be buffered into a blocksize chunk */ op->soc = 1; op->dst.u.dma.address = dst->dm_wa.dma.address; op->dst.u.dma.offset = 0; op->dst.u.dma.length = op->src.u.dma.length; } else { /* Enough room in the sg element, but we need to * adjust for any previously used area */ op->dst.u.dma.address = sg_dma_address(dst->sg_wa.dma_sg); op->dst.u.dma.offset = dst->sg_wa.sg_used; op->dst.u.dma.length = op->src.u.dma.length; } } } static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst, struct ccp_op *op) { op->init = 0; if (dst) { if (op->dst.u.dma.address == dst->dm_wa.dma.address) ccp_empty_queue_buf(dst); else ccp_update_sg_workarea(&dst->sg_wa, op->dst.u.dma.length); } } static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q, struct ccp_dm_workarea *wa, u32 jobid, u32 sb, u32 byte_swap, bool from) { struct ccp_op op; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = jobid; op.eom = 1; if (from) { op.soc = 1; op.src.type = CCP_MEMTYPE_SB; op.src.u.sb = sb; op.dst.type = CCP_MEMTYPE_SYSTEM; op.dst.u.dma.address = wa->dma.address; op.dst.u.dma.length = wa->length; } else { op.src.type = CCP_MEMTYPE_SYSTEM; op.src.u.dma.address = wa->dma.address; op.src.u.dma.length = wa->length; op.dst.type = CCP_MEMTYPE_SB; op.dst.u.sb = sb; } op.u.passthru.byte_swap = byte_swap; return cmd_q->ccp->vdata->perform->passthru(&op); } static int ccp_copy_to_sb(struct ccp_cmd_queue *cmd_q, struct ccp_dm_workarea *wa, u32 jobid, u32 sb, u32 byte_swap) { return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, false); } static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q, struct ccp_dm_workarea *wa, u32 jobid, u32 sb, u32 byte_swap) { return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true); } static noinline_for_stack int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx; struct ccp_data src; struct ccp_op op; unsigned int dm_offset; int ret; if (!((aes->key_len == AES_KEYSIZE_128) || (aes->key_len == AES_KEYSIZE_192) || (aes->key_len == AES_KEYSIZE_256))) return -EINVAL; if (aes->src_len & (AES_BLOCK_SIZE - 1)) return -EINVAL; if (aes->iv_len != AES_BLOCK_SIZE) return -EINVAL; if (!aes->key || !aes->iv || !aes->src) return -EINVAL; if (aes->cmac_final) { if (aes->cmac_key_len != AES_BLOCK_SIZE) return -EINVAL; if (!aes->cmac_key) return -EINVAL; } BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_key = cmd_q->sb_key; op.sb_ctx = cmd_q->sb_ctx; op.init = 1; op.u.aes.type = aes->type; op.u.aes.mode = aes->mode; op.u.aes.action = aes->action; /* All supported key sizes fit in a single (32-byte) SB entry * and must be in little endian format. Use the 256-bit byte * swap passthru option to convert from big endian to little * endian. */ ret = ccp_init_dm_workarea(&key, cmd_q, CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; dm_offset = CCP_SB_BYTES - aes->key_len; ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); if (ret) goto e_key; ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* The AES context fits in a single (32-byte) SB entry and * must be in little endian format. Use the 256-bit byte swap * passthru option to convert from big endian to little endian. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); if (ret) goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } /* Send data to the CCP AES engine */ ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (ret) goto e_ctx; while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true); if (aes->cmac_final && !src.sg_wa.bytes_left) { op.eom = 1; /* Push the K1/K2 key to the CCP now */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } ret = ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0, aes->cmac_key_len); if (ret) goto e_src; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } } ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } ccp_process_data(&src, NULL, &op); } /* Retrieve the AES context - convert from LE to BE using * 32-byte (256-bit) byteswapping */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_src; } /* ...but we only need AES_BLOCK_SIZE bytes */ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); e_src: ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static noinline_for_stack int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx, final_wa, tag; struct ccp_data src, dst; struct ccp_data aad; struct ccp_op op; unsigned int dm_offset; unsigned int authsize; unsigned int jobid; unsigned int ilen; bool in_place = true; /* Default value */ __be64 *final; int ret; struct scatterlist *p_inp, sg_inp[2]; struct scatterlist *p_tag, sg_tag[2]; struct scatterlist *p_outp, sg_outp[2]; struct scatterlist *p_aad; if (!aes->iv) return -EINVAL; if (!((aes->key_len == AES_KEYSIZE_128) || (aes->key_len == AES_KEYSIZE_192) || (aes->key_len == AES_KEYSIZE_256))) return -EINVAL; if (!aes->key) /* Gotta have a key SGL */ return -EINVAL; /* Zero defaults to 16 bytes, the maximum size */ authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE; switch (authsize) { case 16: case 15: case 14: case 13: case 12: case 8: case 4: break; default: return -EINVAL; } /* First, decompose the source buffer into AAD & PT, * and the destination buffer into AAD, CT & tag, or * the input into CT & tag. * It is expected that the input and output SGs will * be valid, even if the AAD and input lengths are 0. */ p_aad = aes->src; p_inp = scatterwalk_ffwd(sg_inp, aes->src, aes->aad_len); p_outp = scatterwalk_ffwd(sg_outp, aes->dst, aes->aad_len); if (aes->action == CCP_AES_ACTION_ENCRYPT) { ilen = aes->src_len; p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen); } else { /* Input length for decryption includes tag */ ilen = aes->src_len - authsize; p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen); } jobid = CCP_NEW_JOBID(cmd_q->ccp); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = jobid; op.sb_key = cmd_q->sb_key; /* Pre-allocated */ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ op.init = 1; op.u.aes.type = aes->type; /* Copy the key to the LSB */ ret = ccp_init_dm_workarea(&key, cmd_q, CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; dm_offset = CCP_SB_BYTES - aes->key_len; ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); if (ret) goto e_key; ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* Copy the context (IV) to the LSB. * There is an assumption here that the IV is 96 bits in length, plus * a nonce of 32 bits. If no IV is present, use a zeroed buffer. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); if (ret) goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } op.init = 1; if (aes->aad_len > 0) { /* Step 1: Run a GHASH over the Additional Authenticated Data */ ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (ret) goto e_ctx; op.u.aes.mode = CCP_AES_MODE_GHASH; op.u.aes.action = CCP_AES_GHASHAAD; while (aad.sg_wa.bytes_left) { ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true); ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_aad; } ccp_process_data(&aad, NULL, &op); op.init = 0; } } op.u.aes.mode = CCP_AES_MODE_GCTR; op.u.aes.action = aes->action; if (ilen > 0) { /* Step 2: Run a GCTR over the plaintext */ in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false; ret = ccp_init_data(&src, cmd_q, p_inp, ilen, AES_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_aad; if (in_place) { dst = src; } else { ret = ccp_init_data(&dst, cmd_q, p_outp, ilen, AES_BLOCK_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; } op.soc = 0; op.eom = 0; op.init = 1; while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); if (!src.sg_wa.bytes_left) { unsigned int nbytes = ilen % AES_BLOCK_SIZE; if (nbytes) { op.eom = 1; op.u.aes.size = (nbytes * 8) - 1; } } ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_process_data(&src, &dst, &op); op.init = 0; } } /* Step 3: Update the IV portion of the context with the original IV */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); if (ret) goto e_dst; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } /* Step 4: Concatenate the lengths of the AAD and source, and * hash that 16 byte buffer. */ ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); if (ret) goto e_dst; final = (__be64 *)final_wa.address; final[0] = cpu_to_be64(aes->aad_len * 8); final[1] = cpu_to_be64(ilen * 8); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = jobid; op.sb_key = cmd_q->sb_key; /* Pre-allocated */ op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ op.init = 1; op.u.aes.type = aes->type; op.u.aes.mode = CCP_AES_MODE_GHASH; op.u.aes.action = CCP_AES_GHASHFINAL; op.src.type = CCP_MEMTYPE_SYSTEM; op.src.u.dma.address = final_wa.dma.address; op.src.u.dma.length = AES_BLOCK_SIZE; op.dst.type = CCP_MEMTYPE_SYSTEM; op.dst.u.dma.address = final_wa.dma.address; op.dst.u.dma.length = AES_BLOCK_SIZE; op.eom = 1; op.u.aes.size = 0; ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) goto e_final_wa; if (aes->action == CCP_AES_ACTION_ENCRYPT) { /* Put the ciphered tag after the ciphertext. */ ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize); } else { /* Does this ciphered tag match the input? */ ret = ccp_init_dm_workarea(&tag, cmd_q, authsize, DMA_BIDIRECTIONAL); if (ret) goto e_final_wa; ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize); if (ret) { ccp_dm_free(&tag); goto e_final_wa; } ret = crypto_memneq(tag.address, final_wa.address, authsize) ? -EBADMSG : 0; ccp_dm_free(&tag); } e_final_wa: ccp_dm_free(&final_wa); e_dst: if (ilen > 0 && !in_place) ccp_free_data(&dst, cmd_q); e_src: if (ilen > 0) ccp_free_data(&src, cmd_q); e_aad: if (aes->aad_len) ccp_free_data(&aad, cmd_q); e_ctx: ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static noinline_for_stack int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; struct ccp_dm_workarea key, ctx; struct ccp_data src, dst; struct ccp_op op; unsigned int dm_offset; bool in_place = false; int ret; if (!((aes->key_len == AES_KEYSIZE_128) || (aes->key_len == AES_KEYSIZE_192) || (aes->key_len == AES_KEYSIZE_256))) return -EINVAL; if (((aes->mode == CCP_AES_MODE_ECB) || (aes->mode == CCP_AES_MODE_CBC)) && (aes->src_len & (AES_BLOCK_SIZE - 1))) return -EINVAL; if (!aes->key || !aes->src || !aes->dst) return -EINVAL; if (aes->mode != CCP_AES_MODE_ECB) { if (aes->iv_len != AES_BLOCK_SIZE) return -EINVAL; if (!aes->iv) return -EINVAL; } BUILD_BUG_ON(CCP_AES_KEY_SB_COUNT != 1); BUILD_BUG_ON(CCP_AES_CTX_SB_COUNT != 1); ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_key = cmd_q->sb_key; op.sb_ctx = cmd_q->sb_ctx; op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1; op.u.aes.type = aes->type; op.u.aes.mode = aes->mode; op.u.aes.action = aes->action; /* All supported key sizes fit in a single (32-byte) SB entry * and must be in little endian format. Use the 256-bit byte * swap passthru option to convert from big endian to little * endian. */ ret = ccp_init_dm_workarea(&key, cmd_q, CCP_AES_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; dm_offset = CCP_SB_BYTES - aes->key_len; ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); if (ret) goto e_key; ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* The AES context fits in a single (32-byte) SB entry and * must be in little endian format. Use the 256-bit byte swap * passthru option to convert from big endian to little endian. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; if (aes->mode != CCP_AES_MODE_ECB) { /* Load the AES context - convert to LE */ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); if (ret) goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } } switch (aes->mode) { case CCP_AES_MODE_CFB: /* CFB128 only */ case CCP_AES_MODE_CTR: op.u.aes.size = AES_BLOCK_SIZE * BITS_PER_BYTE - 1; break; default: op.u.aes.size = 0; } /* Prepare the input and output data workareas. For in-place * operations we need to set the dma direction to BIDIRECTIONAL * and copy the src workarea to the dst workarea. */ if (sg_virt(aes->src) == sg_virt(aes->dst)) in_place = true; ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len, AES_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_ctx; if (in_place) { dst = src; } else { ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len, AES_BLOCK_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; } /* Send data to the CCP AES engine */ while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); if (!src.sg_wa.bytes_left) { op.eom = 1; /* Since we don't retrieve the AES context in ECB * mode we have to wait for the operation to complete * on the last piece of data */ if (aes->mode == CCP_AES_MODE_ECB) op.soc = 1; } ret = cmd_q->ccp->vdata->perform->aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_process_data(&src, &dst, &op); } if (aes->mode != CCP_AES_MODE_ECB) { /* Retrieve the AES context - convert from LE to BE using * 32-byte (256-bit) byteswapping */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } /* ...but we only need AES_BLOCK_SIZE bytes */ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); } e_dst: if (!in_place) ccp_free_data(&dst, cmd_q); e_src: ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static noinline_for_stack int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_xts_aes_engine *xts = &cmd->u.xts; struct ccp_dm_workarea key, ctx; struct ccp_data src, dst; struct ccp_op op; unsigned int unit_size, dm_offset; bool in_place = false; unsigned int sb_count; enum ccp_aes_type aestype; int ret; switch (xts->unit_size) { case CCP_XTS_AES_UNIT_SIZE_16: unit_size = 16; break; case CCP_XTS_AES_UNIT_SIZE_512: unit_size = 512; break; case CCP_XTS_AES_UNIT_SIZE_1024: unit_size = 1024; break; case CCP_XTS_AES_UNIT_SIZE_2048: unit_size = 2048; break; case CCP_XTS_AES_UNIT_SIZE_4096: unit_size = 4096; break; default: return -EINVAL; } if (xts->key_len == AES_KEYSIZE_128) aestype = CCP_AES_TYPE_128; else if (xts->key_len == AES_KEYSIZE_256) aestype = CCP_AES_TYPE_256; else return -EINVAL; if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1))) return -EINVAL; if (xts->iv_len != AES_BLOCK_SIZE) return -EINVAL; if (!xts->key || !xts->iv || !xts->src || !xts->dst) return -EINVAL; BUILD_BUG_ON(CCP_XTS_AES_KEY_SB_COUNT != 1); BUILD_BUG_ON(CCP_XTS_AES_CTX_SB_COUNT != 1); ret = -EIO; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_key = cmd_q->sb_key; op.sb_ctx = cmd_q->sb_ctx; op.init = 1; op.u.xts.type = aestype; op.u.xts.action = xts->action; op.u.xts.unit_size = xts->unit_size; /* A version 3 device only supports 128-bit keys, which fits into a * single SB entry. A version 5 device uses a 512-bit vector, so two * SB entries. */ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) sb_count = CCP_XTS_AES_KEY_SB_COUNT; else sb_count = CCP5_XTS_AES_KEY_SB_COUNT; ret = ccp_init_dm_workarea(&key, cmd_q, sb_count * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { /* All supported key sizes must be in little endian format. * Use the 256-bit byte swap passthru option to convert from * big endian to little endian. */ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128; ret = ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len); if (ret) goto e_key; ret = ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len); if (ret) goto e_key; } else { /* Version 5 CCPs use a 512-bit space for the key: each portion * occupies 256 bits, or one entire slot, and is zero-padded. */ unsigned int pad; dm_offset = CCP_SB_BYTES; pad = dm_offset - xts->key_len; ret = ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len); if (ret) goto e_key; ret = ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len, xts->key_len); if (ret) goto e_key; } ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* The AES context fits in a single (32-byte) SB entry and * for XTS is already in little endian format so no byte swapping * is needed. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_XTS_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; ret = ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len); if (ret) goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } /* Prepare the input and output data workareas. For in-place * operations we need to set the dma direction to BIDIRECTIONAL * and copy the src workarea to the dst workarea. */ if (sg_virt(xts->src) == sg_virt(xts->dst)) in_place = true; ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len, unit_size, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_ctx; if (in_place) { dst = src; } else { ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len, unit_size, DMA_FROM_DEVICE); if (ret) goto e_src; } /* Send data to the CCP AES engine */ while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, unit_size, true); if (!src.sg_wa.bytes_left) op.eom = 1; ret = cmd_q->ccp->vdata->perform->xts_aes(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_process_data(&src, &dst, &op); } /* Retrieve the AES context - convert from LE to BE using * 32-byte (256-bit) byteswapping */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } /* ...but we only need AES_BLOCK_SIZE bytes */ dm_offset = CCP_SB_BYTES - AES_BLOCK_SIZE; ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len); e_dst: if (!in_place) ccp_free_data(&dst, cmd_q); e_src: ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static noinline_for_stack int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_des3_engine *des3 = &cmd->u.des3; struct ccp_dm_workarea key, ctx; struct ccp_data src, dst; struct ccp_op op; unsigned int dm_offset; unsigned int len_singlekey; bool in_place = false; int ret; /* Error checks */ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) return -EINVAL; if (!cmd_q->ccp->vdata->perform->des3) return -EINVAL; if (des3->key_len != DES3_EDE_KEY_SIZE) return -EINVAL; if (((des3->mode == CCP_DES3_MODE_ECB) || (des3->mode == CCP_DES3_MODE_CBC)) && (des3->src_len & (DES3_EDE_BLOCK_SIZE - 1))) return -EINVAL; if (!des3->key || !des3->src || !des3->dst) return -EINVAL; if (des3->mode != CCP_DES3_MODE_ECB) { if (des3->iv_len != DES3_EDE_BLOCK_SIZE) return -EINVAL; if (!des3->iv) return -EINVAL; } /* Zero out all the fields of the command desc */ memset(&op, 0, sizeof(op)); /* Set up the Function field */ op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_key = cmd_q->sb_key; op.init = (des3->mode == CCP_DES3_MODE_ECB) ? 0 : 1; op.u.des3.type = des3->type; op.u.des3.mode = des3->mode; op.u.des3.action = des3->action; /* * All supported key sizes fit in a single (32-byte) KSB entry and * (like AES) must be in little endian format. Use the 256-bit byte * swap passthru option to convert from big endian to little endian. */ ret = ccp_init_dm_workarea(&key, cmd_q, CCP_DES3_KEY_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; /* * The contents of the key triplet are in the reverse order of what * is required by the engine. Copy the 3 pieces individually to put * them where they belong. */ dm_offset = CCP_SB_BYTES - des3->key_len; /* Basic offset */ len_singlekey = des3->key_len / 3; ret = ccp_set_dm_area(&key, dm_offset + 2 * len_singlekey, des3->key, 0, len_singlekey); if (ret) goto e_key; ret = ccp_set_dm_area(&key, dm_offset + len_singlekey, des3->key, len_singlekey, len_singlekey); if (ret) goto e_key; ret = ccp_set_dm_area(&key, dm_offset, des3->key, 2 * len_singlekey, len_singlekey); if (ret) goto e_key; /* Copy the key to the SB */ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_key; } /* * The DES3 context fits in a single (32-byte) KSB entry and * must be in little endian format. Use the 256-bit byte swap * passthru option to convert from big endian to little endian. */ if (des3->mode != CCP_DES3_MODE_ECB) { op.sb_ctx = cmd_q->sb_ctx; ret = ccp_init_dm_workarea(&ctx, cmd_q, CCP_DES3_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; /* Load the context into the LSB */ dm_offset = CCP_SB_BYTES - des3->iv_len; ret = ccp_set_dm_area(&ctx, dm_offset, des3->iv, 0, des3->iv_len); if (ret) goto e_ctx; ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } } /* * Prepare the input and output data workareas. For in-place * operations we need to set the dma direction to BIDIRECTIONAL * and copy the src workarea to the dst workarea. */ if (sg_virt(des3->src) == sg_virt(des3->dst)) in_place = true; ret = ccp_init_data(&src, cmd_q, des3->src, des3->src_len, DES3_EDE_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_ctx; if (in_place) dst = src; else { ret = ccp_init_data(&dst, cmd_q, des3->dst, des3->src_len, DES3_EDE_BLOCK_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; } /* Send data to the CCP DES3 engine */ while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, &dst, &op, DES3_EDE_BLOCK_SIZE, true); if (!src.sg_wa.bytes_left) { op.eom = 1; /* Since we don't retrieve the context in ECB mode * we have to wait for the operation to complete * on the last piece of data */ op.soc = 0; } ret = cmd_q->ccp->vdata->perform->des3(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_process_data(&src, &dst, &op); } if (des3->mode != CCP_DES3_MODE_ECB) { /* Retrieve the context and make BE */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */ ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0, DES3_EDE_BLOCK_SIZE); } e_dst: if (!in_place) ccp_free_data(&dst, cmd_q); e_src: ccp_free_data(&src, cmd_q); e_ctx: if (des3->mode != CCP_DES3_MODE_ECB) ccp_dm_free(&ctx); e_key: ccp_dm_free(&key); return ret; } static noinline_for_stack int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_sha_engine *sha = &cmd->u.sha; struct ccp_dm_workarea ctx; struct ccp_data src; struct ccp_op op; unsigned int ioffset, ooffset; unsigned int digest_size; int sb_count; const void *init; u64 block_size; int ctx_size; int ret; switch (sha->type) { case CCP_SHA_TYPE_1: if (sha->ctx_len < SHA1_DIGEST_SIZE) return -EINVAL; block_size = SHA1_BLOCK_SIZE; break; case CCP_SHA_TYPE_224: if (sha->ctx_len < SHA224_DIGEST_SIZE) return -EINVAL; block_size = SHA224_BLOCK_SIZE; break; case CCP_SHA_TYPE_256: if (sha->ctx_len < SHA256_DIGEST_SIZE) return -EINVAL; block_size = SHA256_BLOCK_SIZE; break; case CCP_SHA_TYPE_384: if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) || sha->ctx_len < SHA384_DIGEST_SIZE) return -EINVAL; block_size = SHA384_BLOCK_SIZE; break; case CCP_SHA_TYPE_512: if (cmd_q->ccp->vdata->version < CCP_VERSION(4, 0) || sha->ctx_len < SHA512_DIGEST_SIZE) return -EINVAL; block_size = SHA512_BLOCK_SIZE; break; default: return -EINVAL; } if (!sha->ctx) return -EINVAL; if (!sha->final && (sha->src_len & (block_size - 1))) return -EINVAL; /* The version 3 device can't handle zero-length input */ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) { if (!sha->src_len) { unsigned int digest_len; const u8 *sha_zero; /* Not final, just return */ if (!sha->final) return 0; /* CCP can't do a zero length sha operation so the * caller must buffer the data. */ if (sha->msg_bits) return -EINVAL; /* The CCP cannot perform zero-length sha operations * so the caller is required to buffer data for the * final operation. However, a sha operation for a * message with a total length of zero is valid so * known values are required to supply the result. */ switch (sha->type) { case CCP_SHA_TYPE_1: sha_zero = sha1_zero_message_hash; digest_len = SHA1_DIGEST_SIZE; break; case CCP_SHA_TYPE_224: sha_zero = sha224_zero_message_hash; digest_len = SHA224_DIGEST_SIZE; break; case CCP_SHA_TYPE_256: sha_zero = sha256_zero_message_hash; digest_len = SHA256_DIGEST_SIZE; break; default: return -EINVAL; } scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0, digest_len, 1); return 0; } } /* Set variables used throughout */ switch (sha->type) { case CCP_SHA_TYPE_1: digest_size = SHA1_DIGEST_SIZE; init = (void *) ccp_sha1_init; ctx_size = SHA1_DIGEST_SIZE; sb_count = 1; if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) ooffset = ioffset = CCP_SB_BYTES - SHA1_DIGEST_SIZE; else ooffset = ioffset = 0; break; case CCP_SHA_TYPE_224: digest_size = SHA224_DIGEST_SIZE; init = (void *) ccp_sha224_init; ctx_size = SHA256_DIGEST_SIZE; sb_count = 1; ioffset = 0; if (cmd_q->ccp->vdata->version != CCP_VERSION(3, 0)) ooffset = CCP_SB_BYTES - SHA224_DIGEST_SIZE; else ooffset = 0; break; case CCP_SHA_TYPE_256: digest_size = SHA256_DIGEST_SIZE; init = (void *) ccp_sha256_init; ctx_size = SHA256_DIGEST_SIZE; sb_count = 1; ooffset = ioffset = 0; break; case CCP_SHA_TYPE_384: digest_size = SHA384_DIGEST_SIZE; init = (void *) ccp_sha384_init; ctx_size = SHA512_DIGEST_SIZE; sb_count = 2; ioffset = 0; ooffset = 2 * CCP_SB_BYTES - SHA384_DIGEST_SIZE; break; case CCP_SHA_TYPE_512: digest_size = SHA512_DIGEST_SIZE; init = (void *) ccp_sha512_init; ctx_size = SHA512_DIGEST_SIZE; sb_count = 2; ooffset = ioffset = 0; break; default: ret = -EINVAL; goto e_data; } /* For zero-length plaintext the src pointer is ignored; * otherwise both parts must be valid */ if (sha->src_len && !sha->src) return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ op.u.sha.type = sha->type; op.u.sha.msg_bits = sha->msg_bits; /* For SHA1/224/256 the context fits in a single (32-byte) SB entry; * SHA384/512 require 2 adjacent SB slots, with the right half in the * first slot, and the left half in the second. Each portion must then * be in little endian format: use the 256-bit byte swap option. */ ret = ccp_init_dm_workarea(&ctx, cmd_q, sb_count * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) return ret; if (sha->first) { switch (sha->type) { case CCP_SHA_TYPE_1: case CCP_SHA_TYPE_224: case CCP_SHA_TYPE_256: memcpy(ctx.address + ioffset, init, ctx_size); break; case CCP_SHA_TYPE_384: case CCP_SHA_TYPE_512: memcpy(ctx.address + ctx_size / 2, init, ctx_size / 2); memcpy(ctx.address, init + ctx_size / 2, ctx_size / 2); break; default: ret = -EINVAL; goto e_ctx; } } else { /* Restore the context */ ret = ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sb_count * CCP_SB_BYTES); if (ret) goto e_ctx; } ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } if (sha->src) { /* Send data to the CCP SHA engine; block_size is set above */ ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len, block_size, DMA_TO_DEVICE); if (ret) goto e_ctx; while (src.sg_wa.bytes_left) { ccp_prepare_data(&src, NULL, &op, block_size, false); if (sha->final && !src.sg_wa.bytes_left) op.eom = 1; ret = cmd_q->ccp->vdata->perform->sha(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; } ccp_process_data(&src, NULL, &op); } } else { op.eom = 1; ret = cmd_q->ccp->vdata->perform->sha(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; } } /* Retrieve the SHA context - convert from LE to BE using * 32-byte (256-bit) byteswapping to BE */ ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_data; } if (sha->final) { /* Finishing up, so get the digest */ switch (sha->type) { case CCP_SHA_TYPE_1: case CCP_SHA_TYPE_224: case CCP_SHA_TYPE_256: ccp_get_dm_area(&ctx, ooffset, sha->ctx, 0, digest_size); break; case CCP_SHA_TYPE_384: case CCP_SHA_TYPE_512: ccp_get_dm_area(&ctx, 0, sha->ctx, LSB_ITEM_SIZE - ooffset, LSB_ITEM_SIZE); ccp_get_dm_area(&ctx, LSB_ITEM_SIZE + ooffset, sha->ctx, 0, LSB_ITEM_SIZE - ooffset); break; default: ret = -EINVAL; goto e_data; } } else { /* Stash the context */ ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sb_count * CCP_SB_BYTES); } if (sha->final && sha->opad) { /* HMAC operation, recursively perform final SHA */ struct ccp_cmd hmac_cmd; struct scatterlist sg; u8 *hmac_buf; if (sha->opad_len != block_size) { ret = -EINVAL; goto e_data; } hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL); if (!hmac_buf) { ret = -ENOMEM; goto e_data; } sg_init_one(&sg, hmac_buf, block_size + digest_size); scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0); switch (sha->type) { case CCP_SHA_TYPE_1: case CCP_SHA_TYPE_224: case CCP_SHA_TYPE_256: memcpy(hmac_buf + block_size, ctx.address + ooffset, digest_size); break; case CCP_SHA_TYPE_384: case CCP_SHA_TYPE_512: memcpy(hmac_buf + block_size, ctx.address + LSB_ITEM_SIZE + ooffset, LSB_ITEM_SIZE); memcpy(hmac_buf + block_size + (LSB_ITEM_SIZE - ooffset), ctx.address, LSB_ITEM_SIZE); break; default: kfree(hmac_buf); ret = -EINVAL; goto e_data; } memset(&hmac_cmd, 0, sizeof(hmac_cmd)); hmac_cmd.engine = CCP_ENGINE_SHA; hmac_cmd.u.sha.type = sha->type; hmac_cmd.u.sha.ctx = sha->ctx; hmac_cmd.u.sha.ctx_len = sha->ctx_len; hmac_cmd.u.sha.src = &sg; hmac_cmd.u.sha.src_len = block_size + digest_size; hmac_cmd.u.sha.opad = NULL; hmac_cmd.u.sha.opad_len = 0; hmac_cmd.u.sha.first = 1; hmac_cmd.u.sha.final = 1; hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3; ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd); if (ret) cmd->engine_error = hmac_cmd.engine_error; kfree(hmac_buf); } e_data: if (sha->src) ccp_free_data(&src, cmd_q); e_ctx: ccp_dm_free(&ctx); return ret; } static noinline_for_stack int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_rsa_engine *rsa = &cmd->u.rsa; struct ccp_dm_workarea exp, src, dst; struct ccp_op op; unsigned int sb_count, i_len, o_len; int ret; /* Check against the maximum allowable size, in bits */ if (rsa->key_size > cmd_q->ccp->vdata->rsamax) return -EINVAL; if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst) return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); /* The RSA modulus must precede the message being acted upon, so * it must be copied to a DMA area where the message and the * modulus can be concatenated. Therefore the input buffer * length required is twice the output buffer length (which * must be a multiple of 256-bits). Compute o_len, i_len in bytes. * Buffer sizes must be a multiple of 32 bytes; rounding up may be * required. */ o_len = 32 * ((rsa->key_size + 255) / 256); i_len = o_len * 2; sb_count = 0; if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { /* sb_count is the number of storage block slots required * for the modulus. */ sb_count = o_len / CCP_SB_BYTES; op.sb_key = cmd_q->ccp->vdata->perform->sballoc(cmd_q, sb_count); if (!op.sb_key) return -EIO; } else { /* A version 5 device allows a modulus size that will not fit * in the LSB, so the command will transfer it from memory. * Set the sb key to the default, even though it's not used. */ op.sb_key = cmd_q->sb_key; } /* The RSA exponent must be in little endian format. Reverse its * byte order. */ ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE); if (ret) goto e_sb; ret = ccp_reverse_set_dm_area(&exp, 0, rsa->exp, 0, rsa->exp_len); if (ret) goto e_exp; if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0)) { /* Copy the exponent to the local storage block, using * as many 32-byte blocks as were allocated above. It's * already little endian, so no further change is required. */ ret = ccp_copy_to_sb(cmd_q, &exp, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_exp; } } else { /* The exponent can be retrieved from memory via DMA. */ op.exp.u.dma.address = exp.dma.address; op.exp.u.dma.offset = 0; } /* Concatenate the modulus and the message. Both the modulus and * the operands must be in little endian format. Since the input * is in big endian format it must be converted. */ ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE); if (ret) goto e_exp; ret = ccp_reverse_set_dm_area(&src, 0, rsa->mod, 0, rsa->mod_len); if (ret) goto e_src; ret = ccp_reverse_set_dm_area(&src, o_len, rsa->src, 0, rsa->src_len); if (ret) goto e_src; /* Prepare the output area for the operation */ ret = ccp_init_dm_workarea(&dst, cmd_q, o_len, DMA_FROM_DEVICE); if (ret) goto e_src; op.soc = 1; op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = i_len; op.dst.u.dma.address = dst.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = o_len; op.u.rsa.mod_size = rsa->key_size; op.u.rsa.input_len = i_len; ret = cmd_q->ccp->vdata->perform->rsa(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ccp_reverse_get_dm_area(&dst, 0, rsa->dst, 0, rsa->mod_len); e_dst: ccp_dm_free(&dst); e_src: ccp_dm_free(&src); e_exp: ccp_dm_free(&exp); e_sb: if (sb_count) cmd_q->ccp->vdata->perform->sbfree(cmd_q, op.sb_key, sb_count); return ret; } static noinline_for_stack int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_passthru_engine *pt = &cmd->u.passthru; struct ccp_dm_workarea mask; struct ccp_data src, dst; struct ccp_op op; bool in_place = false; unsigned int i; int ret = 0; if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) return -EINVAL; if (!pt->src || !pt->dst) return -EINVAL; if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) return -EINVAL; if (!pt->mask) return -EINVAL; } BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { /* Load the mask */ op.sb_key = cmd_q->sb_key; ret = ccp_init_dm_workarea(&mask, cmd_q, CCP_PASSTHRU_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; ret = ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len); if (ret) goto e_mask; ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_mask; } } /* Prepare the input and output data workareas. For in-place * operations we need to set the dma direction to BIDIRECTIONAL * and copy the src workarea to the dst workarea. */ if (sg_virt(pt->src) == sg_virt(pt->dst)) in_place = true; ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len, CCP_PASSTHRU_MASKSIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); if (ret) goto e_mask; if (in_place) { dst = src; } else { ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len, CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE); if (ret) goto e_src; } /* Send data to the CCP Passthru engine * Because the CCP engine works on a single source and destination * dma address at a time, each entry in the source scatterlist * (after the dma_map_sg call) must be less than or equal to the * (remaining) length in the destination scatterlist entry and the * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE */ dst.sg_wa.sg_used = 0; for (i = 1; i <= src.sg_wa.dma_count; i++) { if (!dst.sg_wa.sg || (sg_dma_len(dst.sg_wa.sg) < sg_dma_len(src.sg_wa.sg))) { ret = -EINVAL; goto e_dst; } if (i == src.sg_wa.dma_count) { op.eom = 1; op.soc = 1; } op.src.type = CCP_MEMTYPE_SYSTEM; op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); op.src.u.dma.offset = 0; op.src.u.dma.length = sg_dma_len(src.sg_wa.sg); op.dst.type = CCP_MEMTYPE_SYSTEM; op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); op.dst.u.dma.offset = dst.sg_wa.sg_used; op.dst.u.dma.length = op.src.u.dma.length; ret = cmd_q->ccp->vdata->perform->passthru(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } dst.sg_wa.sg_used += sg_dma_len(src.sg_wa.sg); if (dst.sg_wa.sg_used == sg_dma_len(dst.sg_wa.sg)) { dst.sg_wa.sg = sg_next(dst.sg_wa.sg); dst.sg_wa.sg_used = 0; } src.sg_wa.sg = sg_next(src.sg_wa.sg); } e_dst: if (!in_place) ccp_free_data(&dst, cmd_q); e_src: ccp_free_data(&src, cmd_q); e_mask: if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) ccp_dm_free(&mask); return ret; } static noinline_for_stack int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap; struct ccp_dm_workarea mask; struct ccp_op op; int ret; if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1))) return -EINVAL; if (!pt->src_dma || !pt->dst_dma) return -EINVAL; if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { if (pt->mask_len != CCP_PASSTHRU_MASKSIZE) return -EINVAL; if (!pt->mask) return -EINVAL; } BUILD_BUG_ON(CCP_PASSTHRU_SB_COUNT != 1); memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) { /* Load the mask */ op.sb_key = cmd_q->sb_key; mask.length = pt->mask_len; mask.dma.address = pt->mask; mask.dma.length = pt->mask_len; ret = ccp_copy_to_sb(cmd_q, &mask, op.jobid, op.sb_key, CCP_PASSTHRU_BYTESWAP_NOOP); if (ret) { cmd->engine_error = cmd_q->cmd_error; return ret; } } /* Send data to the CCP Passthru engine */ op.eom = 1; op.soc = 1; op.src.type = CCP_MEMTYPE_SYSTEM; op.src.u.dma.address = pt->src_dma; op.src.u.dma.offset = 0; op.src.u.dma.length = pt->src_len; op.dst.type = CCP_MEMTYPE_SYSTEM; op.dst.u.dma.address = pt->dst_dma; op.dst.u.dma.offset = 0; op.dst.u.dma.length = pt->src_len; ret = cmd_q->ccp->vdata->perform->passthru(&op); if (ret) cmd->engine_error = cmd_q->cmd_error; return ret; } static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_ecc_engine *ecc = &cmd->u.ecc; struct ccp_dm_workarea src, dst; struct ccp_op op; int ret; u8 *save; if (!ecc->u.mm.operand_1 || (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) if (!ecc->u.mm.operand_2 || (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (!ecc->u.mm.result || (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES)) return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); /* Concatenate the modulus and the operands. Both the modulus and * the operands must be in little endian format. Since the input * is in big endian format it must be converted and placed in a * fixed length buffer. */ ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, DMA_TO_DEVICE); if (ret) return ret; /* Save the workarea address since it is updated in order to perform * the concatenation */ save = src.address; /* Copy the ECC modulus */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; /* Copy the first operand */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_1, 0, ecc->u.mm.operand_1_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) { /* Copy the second operand */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.mm.operand_2, 0, ecc->u.mm.operand_2_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; } /* Restore the workarea address */ src.address = save; /* Prepare the output area for the operation */ ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; op.soc = 1; op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = src.length; op.dst.u.dma.address = dst.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = dst.length; op.u.ecc.function = cmd->u.ecc.function; ret = cmd_q->ccp->vdata->perform->ecc(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ecc->ecc_result = le16_to_cpup( (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { ret = -EIO; goto e_dst; } /* Save the ECC result */ ccp_reverse_get_dm_area(&dst, 0, ecc->u.mm.result, 0, CCP_ECC_MODULUS_BYTES); e_dst: ccp_dm_free(&dst); e_src: ccp_dm_free(&src); return ret; } static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_ecc_engine *ecc = &cmd->u.ecc; struct ccp_dm_workarea src, dst; struct ccp_op op; int ret; u8 *save; if (!ecc->u.pm.point_1.x || (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) || !ecc->u.pm.point_1.y || (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { if (!ecc->u.pm.point_2.x || (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) || !ecc->u.pm.point_2.y || (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; } else { if (!ecc->u.pm.domain_a || (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) if (!ecc->u.pm.scalar || (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; } if (!ecc->u.pm.result.x || (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) || !ecc->u.pm.result.y || (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES)) return -EINVAL; memset(&op, 0, sizeof(op)); op.cmd_q = cmd_q; op.jobid = CCP_NEW_JOBID(cmd_q->ccp); /* Concatenate the modulus and the operands. Both the modulus and * the operands must be in little endian format. Since the input * is in big endian format it must be converted and placed in a * fixed length buffer. */ ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE, DMA_TO_DEVICE); if (ret) return ret; /* Save the workarea address since it is updated in order to perform * the concatenation */ save = src.address; /* Copy the ECC modulus */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->mod, 0, ecc->mod_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; /* Copy the first point X and Y coordinate */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.x, 0, ecc->u.pm.point_1.x_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_1.y, 0, ecc->u.pm.point_1.y_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; /* Set the first point Z coordinate to 1 */ *src.address = 0x01; src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) { /* Copy the second point X and Y coordinate */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.x, 0, ecc->u.pm.point_2.x_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.point_2.y, 0, ecc->u.pm.point_2.y_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; /* Set the second point Z coordinate to 1 */ *src.address = 0x01; src.address += CCP_ECC_OPERAND_SIZE; } else { /* Copy the Domain "a" parameter */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.domain_a, 0, ecc->u.pm.domain_a_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) { /* Copy the scalar value */ ret = ccp_reverse_set_dm_area(&src, 0, ecc->u.pm.scalar, 0, ecc->u.pm.scalar_len); if (ret) goto e_src; src.address += CCP_ECC_OPERAND_SIZE; } } /* Restore the workarea address */ src.address = save; /* Prepare the output area for the operation */ ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; op.soc = 1; op.src.u.dma.address = src.dma.address; op.src.u.dma.offset = 0; op.src.u.dma.length = src.length; op.dst.u.dma.address = dst.dma.address; op.dst.u.dma.offset = 0; op.dst.u.dma.length = dst.length; op.u.ecc.function = cmd->u.ecc.function; ret = cmd_q->ccp->vdata->perform->ecc(&op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } ecc->ecc_result = le16_to_cpup( (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET)); if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) { ret = -EIO; goto e_dst; } /* Save the workarea address since it is updated as we walk through * to copy the point math result */ save = dst.address; /* Save the ECC result X and Y coordinates */ ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.x, 0, CCP_ECC_MODULUS_BYTES); dst.address += CCP_ECC_OUTPUT_SIZE; ccp_reverse_get_dm_area(&dst, 0, ecc->u.pm.result.y, 0, CCP_ECC_MODULUS_BYTES); /* Restore the workarea address */ dst.address = save; e_dst: ccp_dm_free(&dst); e_src: ccp_dm_free(&src); return ret; } static noinline_for_stack int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_ecc_engine *ecc = &cmd->u.ecc; ecc->ecc_result = 0; if (!ecc->mod || (ecc->mod_len > CCP_ECC_MODULUS_BYTES)) return -EINVAL; switch (ecc->function) { case CCP_ECC_FUNCTION_MMUL_384BIT: case CCP_ECC_FUNCTION_MADD_384BIT: case CCP_ECC_FUNCTION_MINV_384BIT: return ccp_run_ecc_mm_cmd(cmd_q, cmd); case CCP_ECC_FUNCTION_PADD_384BIT: case CCP_ECC_FUNCTION_PMUL_384BIT: case CCP_ECC_FUNCTION_PDBL_384BIT: return ccp_run_ecc_pm_cmd(cmd_q, cmd); default: return -EINVAL; } } int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; cmd->engine_error = 0; cmd_q->cmd_error = 0; cmd_q->int_rcvd = 0; cmd_q->free_slots = cmd_q->ccp->vdata->perform->get_free_slots(cmd_q); switch (cmd->engine) { case CCP_ENGINE_AES: switch (cmd->u.aes.mode) { case CCP_AES_MODE_CMAC: ret = ccp_run_aes_cmac_cmd(cmd_q, cmd); break; case CCP_AES_MODE_GCM: ret = ccp_run_aes_gcm_cmd(cmd_q, cmd); break; default: ret = ccp_run_aes_cmd(cmd_q, cmd); break; } break; case CCP_ENGINE_XTS_AES_128: ret = ccp_run_xts_aes_cmd(cmd_q, cmd); break; case CCP_ENGINE_DES3: ret = ccp_run_des3_cmd(cmd_q, cmd); break; case CCP_ENGINE_SHA: ret = ccp_run_sha_cmd(cmd_q, cmd); break; case CCP_ENGINE_RSA: ret = ccp_run_rsa_cmd(cmd_q, cmd); break; case CCP_ENGINE_PASSTHRU: if (cmd->flags & CCP_CMD_PASSTHRU_NO_DMA_MAP) ret = ccp_run_passthru_nomap_cmd(cmd_q, cmd); else ret = ccp_run_passthru_cmd(cmd_q, cmd); break; case CCP_ENGINE_ECC: ret = ccp_run_ecc_cmd(cmd_q, cmd); break; default: ret = -EINVAL; } return ret; }
linux-master
drivers/crypto/ccp/ccp-ops.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Secure Processor device driver * * Copyright (C) 2013,2019 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <[email protected]> * Author: Gary R Hook <[email protected]> */ #include <linux/bitfield.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/dma-mapping.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/ccp.h> #include "ccp-dev.h" #include "psp-dev.h" /* used for version string AA.BB.CC.DD */ #define AA GENMASK(31, 24) #define BB GENMASK(23, 16) #define CC GENMASK(15, 8) #define DD GENMASK(7, 0) #define MSIX_VECTORS 2 struct sp_pci { int msix_count; struct msix_entry msix_entry[MSIX_VECTORS]; }; static struct sp_device *sp_dev_master; #define security_attribute_show(name, def) \ static ssize_t name##_show(struct device *d, struct device_attribute *attr, \ char *buf) \ { \ struct sp_device *sp = dev_get_drvdata(d); \ struct psp_device *psp = sp->psp_data; \ int bit = PSP_SECURITY_##def << PSP_CAPABILITY_PSP_SECURITY_OFFSET; \ return sysfs_emit(buf, "%d\n", (psp->capability & bit) > 0); \ } security_attribute_show(fused_part, FUSED_PART) static DEVICE_ATTR_RO(fused_part); security_attribute_show(debug_lock_on, DEBUG_LOCK_ON) static DEVICE_ATTR_RO(debug_lock_on); security_attribute_show(tsme_status, TSME_STATUS) static DEVICE_ATTR_RO(tsme_status); security_attribute_show(anti_rollback_status, ANTI_ROLLBACK_STATUS) static DEVICE_ATTR_RO(anti_rollback_status); security_attribute_show(rpmc_production_enabled, RPMC_PRODUCTION_ENABLED) static DEVICE_ATTR_RO(rpmc_production_enabled); security_attribute_show(rpmc_spirom_available, RPMC_SPIROM_AVAILABLE) static DEVICE_ATTR_RO(rpmc_spirom_available); security_attribute_show(hsp_tpm_available, HSP_TPM_AVAILABLE) static DEVICE_ATTR_RO(hsp_tpm_available); security_attribute_show(rom_armor_enforced, ROM_ARMOR_ENFORCED) static DEVICE_ATTR_RO(rom_armor_enforced); static struct attribute *psp_security_attrs[] = { &dev_attr_fused_part.attr, &dev_attr_debug_lock_on.attr, &dev_attr_tsme_status.attr, &dev_attr_anti_rollback_status.attr, &dev_attr_rpmc_production_enabled.attr, &dev_attr_rpmc_spirom_available.attr, &dev_attr_hsp_tpm_available.attr, &dev_attr_rom_armor_enforced.attr, NULL }; static umode_t psp_security_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct device *dev = kobj_to_dev(kobj); struct sp_device *sp = dev_get_drvdata(dev); struct psp_device *psp = sp->psp_data; if (psp && (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING)) return 0444; return 0; } static struct attribute_group psp_security_attr_group = { .attrs = psp_security_attrs, .is_visible = psp_security_is_visible, }; #define version_attribute_show(name, _offset) \ static ssize_t name##_show(struct device *d, struct device_attribute *attr, \ char *buf) \ { \ struct sp_device *sp = dev_get_drvdata(d); \ struct psp_device *psp = sp->psp_data; \ unsigned int val = ioread32(psp->io_regs + _offset); \ return sysfs_emit(buf, "%02lx.%02lx.%02lx.%02lx\n", \ FIELD_GET(AA, val), \ FIELD_GET(BB, val), \ FIELD_GET(CC, val), \ FIELD_GET(DD, val)); \ } version_attribute_show(bootloader_version, psp->vdata->bootloader_info_reg) static DEVICE_ATTR_RO(bootloader_version); version_attribute_show(tee_version, psp->vdata->tee->info_reg) static DEVICE_ATTR_RO(tee_version); static struct attribute *psp_firmware_attrs[] = { &dev_attr_bootloader_version.attr, &dev_attr_tee_version.attr, NULL, }; static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { struct device *dev = kobj_to_dev(kobj); struct sp_device *sp = dev_get_drvdata(dev); struct psp_device *psp = sp->psp_data; unsigned int val = 0xffffffff; if (!psp) return 0; if (attr == &dev_attr_bootloader_version.attr && psp->vdata->bootloader_info_reg) val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg); if (attr == &dev_attr_tee_version.attr && psp->capability & PSP_CAPABILITY_TEE && psp->vdata->tee->info_reg) val = ioread32(psp->io_regs + psp->vdata->tee->info_reg); /* If platform disallows accessing this register it will be all f's */ if (val != 0xffffffff) return 0444; return 0; } static struct attribute_group psp_firmware_attr_group = { .attrs = psp_firmware_attrs, .is_visible = psp_firmware_is_visible, }; static const struct attribute_group *psp_groups[] = { &psp_security_attr_group, &psp_firmware_attr_group, NULL, }; static int sp_get_msix_irqs(struct sp_device *sp) { struct sp_pci *sp_pci = sp->dev_specific; struct device *dev = sp->dev; struct pci_dev *pdev = to_pci_dev(dev); int v, ret; for (v = 0; v < ARRAY_SIZE(sp_pci->msix_entry); v++) sp_pci->msix_entry[v].entry = v; ret = pci_enable_msix_range(pdev, sp_pci->msix_entry, 1, v); if (ret < 0) return ret; sp_pci->msix_count = ret; sp->use_tasklet = true; sp->psp_irq = sp_pci->msix_entry[0].vector; sp->ccp_irq = (sp_pci->msix_count > 1) ? sp_pci->msix_entry[1].vector : sp_pci->msix_entry[0].vector; return 0; } static int sp_get_msi_irq(struct sp_device *sp) { struct device *dev = sp->dev; struct pci_dev *pdev = to_pci_dev(dev); int ret; ret = pci_enable_msi(pdev); if (ret) return ret; sp->ccp_irq = pdev->irq; sp->psp_irq = pdev->irq; return 0; } static int sp_get_irqs(struct sp_device *sp) { struct device *dev = sp->dev; int ret; ret = sp_get_msix_irqs(sp); if (!ret) return 0; /* Couldn't get MSI-X vectors, try MSI */ dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret); ret = sp_get_msi_irq(sp); if (!ret) return 0; /* Couldn't get MSI interrupt */ dev_notice(dev, "could not enable MSI (%d)\n", ret); return ret; } static void sp_free_irqs(struct sp_device *sp) { struct sp_pci *sp_pci = sp->dev_specific; struct device *dev = sp->dev; struct pci_dev *pdev = to_pci_dev(dev); if (sp_pci->msix_count) pci_disable_msix(pdev); else if (sp->psp_irq) pci_disable_msi(pdev); sp->ccp_irq = 0; sp->psp_irq = 0; } static bool sp_pci_is_master(struct sp_device *sp) { struct device *dev_cur, *dev_new; struct pci_dev *pdev_cur, *pdev_new; dev_new = sp->dev; dev_cur = sp_dev_master->dev; pdev_new = to_pci_dev(dev_new); pdev_cur = to_pci_dev(dev_cur); if (pdev_new->bus->number < pdev_cur->bus->number) return true; if (PCI_SLOT(pdev_new->devfn) < PCI_SLOT(pdev_cur->devfn)) return true; if (PCI_FUNC(pdev_new->devfn) < PCI_FUNC(pdev_cur->devfn)) return true; return false; } static void psp_set_master(struct sp_device *sp) { if (!sp_dev_master) { sp_dev_master = sp; return; } if (sp_pci_is_master(sp)) sp_dev_master = sp; } static struct sp_device *psp_get_master(void) { return sp_dev_master; } static void psp_clear_master(struct sp_device *sp) { if (sp == sp_dev_master) { sp_dev_master = NULL; dev_dbg(sp->dev, "Cleared sp_dev_master\n"); } } static int sp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct sp_device *sp; struct sp_pci *sp_pci; struct device *dev = &pdev->dev; void __iomem * const *iomap_table; int bar_mask; int ret; ret = -ENOMEM; sp = sp_alloc_struct(dev); if (!sp) goto e_err; sp_pci = devm_kzalloc(dev, sizeof(*sp_pci), GFP_KERNEL); if (!sp_pci) goto e_err; sp->dev_specific = sp_pci; sp->dev_vdata = (struct sp_dev_vdata *)id->driver_data; if (!sp->dev_vdata) { ret = -ENODEV; dev_err(dev, "missing driver data\n"); goto e_err; } ret = pcim_enable_device(pdev); if (ret) { dev_err(dev, "pcim_enable_device failed (%d)\n", ret); goto e_err; } bar_mask = pci_select_bars(pdev, IORESOURCE_MEM); ret = pcim_iomap_regions(pdev, bar_mask, "ccp"); if (ret) { dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret); goto e_err; } iomap_table = pcim_iomap_table(pdev); if (!iomap_table) { dev_err(dev, "pcim_iomap_table failed\n"); ret = -ENOMEM; goto e_err; } sp->io_map = iomap_table[sp->dev_vdata->bar]; if (!sp->io_map) { dev_err(dev, "ioremap failed\n"); ret = -ENOMEM; goto e_err; } ret = sp_get_irqs(sp); if (ret) goto e_err; pci_set_master(pdev); sp->set_psp_master_device = psp_set_master; sp->get_psp_master_device = psp_get_master; sp->clear_psp_master_device = psp_clear_master; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (ret) { ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); goto free_irqs; } } dev_set_drvdata(dev, sp); ret = sp_init(sp); if (ret) goto free_irqs; return 0; free_irqs: sp_free_irqs(sp); e_err: dev_notice(dev, "initialization failed\n"); return ret; } static void sp_pci_shutdown(struct pci_dev *pdev) { struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); if (!sp) return; sp_destroy(sp); } static void sp_pci_remove(struct pci_dev *pdev) { struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); if (!sp) return; sp_destroy(sp); sp_free_irqs(sp); } static int __maybe_unused sp_pci_suspend(struct device *dev) { struct sp_device *sp = dev_get_drvdata(dev); return sp_suspend(sp); } static int __maybe_unused sp_pci_resume(struct device *dev) { struct sp_device *sp = dev_get_drvdata(dev); return sp_resume(sp); } #ifdef CONFIG_CRYPTO_DEV_SP_PSP static const struct sev_vdata sevv1 = { .cmdresp_reg = 0x10580, /* C2PMSG_32 */ .cmdbuff_addr_lo_reg = 0x105e0, /* C2PMSG_56 */ .cmdbuff_addr_hi_reg = 0x105e4, /* C2PMSG_57 */ }; static const struct sev_vdata sevv2 = { .cmdresp_reg = 0x10980, /* C2PMSG_32 */ .cmdbuff_addr_lo_reg = 0x109e0, /* C2PMSG_56 */ .cmdbuff_addr_hi_reg = 0x109e4, /* C2PMSG_57 */ }; static const struct tee_vdata teev1 = { .cmdresp_reg = 0x10544, /* C2PMSG_17 */ .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */ .cmdbuff_addr_hi_reg = 0x1054c, /* C2PMSG_19 */ .ring_wptr_reg = 0x10550, /* C2PMSG_20 */ .ring_rptr_reg = 0x10554, /* C2PMSG_21 */ .info_reg = 0x109e8, /* C2PMSG_58 */ }; static const struct tee_vdata teev2 = { .cmdresp_reg = 0x10944, /* C2PMSG_17 */ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */ .ring_wptr_reg = 0x10950, /* C2PMSG_20 */ .ring_rptr_reg = 0x10954, /* C2PMSG_21 */ }; static const struct platform_access_vdata pa_v1 = { .cmdresp_reg = 0x10570, /* C2PMSG_28 */ .cmdbuff_addr_lo_reg = 0x10574, /* C2PMSG_29 */ .cmdbuff_addr_hi_reg = 0x10578, /* C2PMSG_30 */ .doorbell_button_reg = 0x10a24, /* C2PMSG_73 */ .doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */ }; static const struct platform_access_vdata pa_v2 = { .doorbell_button_reg = 0x10a24, /* C2PMSG_73 */ .doorbell_cmd_reg = 0x10a40, /* C2PMSG_80 */ }; static const struct psp_vdata pspv1 = { .sev = &sevv1, .bootloader_info_reg = 0x105ec, /* C2PMSG_59 */ .feature_reg = 0x105fc, /* C2PMSG_63 */ .inten_reg = 0x10610, /* P2CMSG_INTEN */ .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ }; static const struct psp_vdata pspv2 = { .sev = &sevv2, .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10690, /* P2CMSG_INTEN */ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ }; static const struct psp_vdata pspv3 = { .tee = &teev1, .platform_access = &pa_v1, .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10690, /* P2CMSG_INTEN */ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ .platform_features = PLATFORM_FEATURE_DBC, }; static const struct psp_vdata pspv4 = { .sev = &sevv2, .tee = &teev1, .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10690, /* P2CMSG_INTEN */ .intsts_reg = 0x10694, /* P2CMSG_INTSTS */ }; static const struct psp_vdata pspv5 = { .tee = &teev2, .platform_access = &pa_v2, .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10510, /* P2CMSG_INTEN */ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ }; static const struct psp_vdata pspv6 = { .sev = &sevv2, .tee = &teev2, .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10510, /* P2CMSG_INTEN */ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ }; #endif static const struct sp_dev_vdata dev_vdata[] = { { /* 0 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_CCP .ccp_vdata = &ccpv3, #endif }, { /* 1 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_CCP .ccp_vdata = &ccpv5a, #endif #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv1, #endif }, { /* 2 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_CCP .ccp_vdata = &ccpv5b, #endif }, { /* 3 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_CCP .ccp_vdata = &ccpv5a, #endif #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv2, #endif }, { /* 4 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_CCP .ccp_vdata = &ccpv5a, #endif #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv3, #endif }, { /* 5 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv4, #endif }, { /* 6 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv3, #endif }, { /* 7 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv5, #endif }, { /* 8 */ .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv6, #endif }, }; static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1537), (kernel_ulong_t)&dev_vdata[0] }, { PCI_VDEVICE(AMD, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, { PCI_VDEVICE(AMD, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, { PCI_VDEVICE(AMD, 0x1486), (kernel_ulong_t)&dev_vdata[3] }, { PCI_VDEVICE(AMD, 0x15DF), (kernel_ulong_t)&dev_vdata[4] }, { PCI_VDEVICE(AMD, 0x14CA), (kernel_ulong_t)&dev_vdata[5] }, { PCI_VDEVICE(AMD, 0x15C7), (kernel_ulong_t)&dev_vdata[6] }, { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, /* Last entry must be zero */ { 0, } }; MODULE_DEVICE_TABLE(pci, sp_pci_table); static SIMPLE_DEV_PM_OPS(sp_pci_pm_ops, sp_pci_suspend, sp_pci_resume); static struct pci_driver sp_pci_driver = { .name = "ccp", .id_table = sp_pci_table, .probe = sp_pci_probe, .remove = sp_pci_remove, .shutdown = sp_pci_shutdown, .driver.pm = &sp_pci_pm_ops, .dev_groups = psp_groups, }; int sp_pci_init(void) { return pci_register_driver(&sp_pci_driver); } void sp_pci_exit(void) { pci_unregister_driver(&sp_pci_driver); }
linux-master
drivers/crypto/ccp/sp-pci.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Secure Processor device driver * * Copyright (C) 2014,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/ioport.h> #include <linux/dma-mapping.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/ccp.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/acpi.h> #include "ccp-dev.h" struct sp_platform { int coherent; unsigned int irq_count; }; static const struct sp_dev_vdata dev_vdata[] = { { .bar = 0, #ifdef CONFIG_CRYPTO_DEV_SP_CCP .ccp_vdata = &ccpv3_platform, #endif }, }; #ifdef CONFIG_ACPI static const struct acpi_device_id sp_acpi_match[] = { { "AMDI0C00", (kernel_ulong_t)&dev_vdata[0] }, { }, }; MODULE_DEVICE_TABLE(acpi, sp_acpi_match); #endif #ifdef CONFIG_OF static const struct of_device_id sp_of_match[] = { { .compatible = "amd,ccp-seattle-v1a", .data = (const void *)&dev_vdata[0] }, { }, }; MODULE_DEVICE_TABLE(of, sp_of_match); #endif static struct sp_dev_vdata *sp_get_of_version(struct platform_device *pdev) { #ifdef CONFIG_OF const struct of_device_id *match; match = of_match_node(sp_of_match, pdev->dev.of_node); if (match && match->data) return (struct sp_dev_vdata *)match->data; #endif return NULL; } static struct sp_dev_vdata *sp_get_acpi_version(struct platform_device *pdev) { #ifdef CONFIG_ACPI const struct acpi_device_id *match; match = acpi_match_device(sp_acpi_match, &pdev->dev); if (match && match->driver_data) return (struct sp_dev_vdata *)match->driver_data; #endif return NULL; } static int sp_get_irqs(struct sp_device *sp) { struct sp_platform *sp_platform = sp->dev_specific; struct device *dev = sp->dev; struct platform_device *pdev = to_platform_device(dev); int ret; sp_platform->irq_count = platform_irq_count(pdev); ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_notice(dev, "unable to get IRQ (%d)\n", ret); return ret; } sp->psp_irq = ret; if (sp_platform->irq_count == 1) { sp->ccp_irq = ret; } else { ret = platform_get_irq(pdev, 1); if (ret < 0) { dev_notice(dev, "unable to get IRQ (%d)\n", ret); return ret; } sp->ccp_irq = ret; } return 0; } static int sp_platform_probe(struct platform_device *pdev) { struct sp_device *sp; struct sp_platform *sp_platform; struct device *dev = &pdev->dev; enum dev_dma_attr attr; int ret; ret = -ENOMEM; sp = sp_alloc_struct(dev); if (!sp) goto e_err; sp_platform = devm_kzalloc(dev, sizeof(*sp_platform), GFP_KERNEL); if (!sp_platform) goto e_err; sp->dev_specific = sp_platform; sp->dev_vdata = pdev->dev.of_node ? sp_get_of_version(pdev) : sp_get_acpi_version(pdev); if (!sp->dev_vdata) { ret = -ENODEV; dev_err(dev, "missing driver data\n"); goto e_err; } sp->io_map = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(sp->io_map)) { ret = PTR_ERR(sp->io_map); goto e_err; } attr = device_get_dma_attr(dev); if (attr == DEV_DMA_NOT_SUPPORTED) { dev_err(dev, "DMA is not supported"); goto e_err; } sp_platform->coherent = (attr == DEV_DMA_COHERENT); if (sp_platform->coherent) sp->axcache = CACHE_WB_NO_ALLOC; else sp->axcache = CACHE_NONE; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); goto e_err; } ret = sp_get_irqs(sp); if (ret) goto e_err; dev_set_drvdata(dev, sp); ret = sp_init(sp); if (ret) goto e_err; dev_notice(dev, "enabled\n"); return 0; e_err: dev_notice(dev, "initialization failed\n"); return ret; } static int sp_platform_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); sp_destroy(sp); dev_notice(dev, "disabled\n"); return 0; } #ifdef CONFIG_PM static int sp_platform_suspend(struct platform_device *pdev, pm_message_t state) { struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); return sp_suspend(sp); } static int sp_platform_resume(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct sp_device *sp = dev_get_drvdata(dev); return sp_resume(sp); } #endif static struct platform_driver sp_platform_driver = { .driver = { .name = "ccp", #ifdef CONFIG_ACPI .acpi_match_table = sp_acpi_match, #endif #ifdef CONFIG_OF .of_match_table = sp_of_match, #endif }, .probe = sp_platform_probe, .remove = sp_platform_remove, #ifdef CONFIG_PM .suspend = sp_platform_suspend, .resume = sp_platform_resume, #endif }; int sp_platform_init(void) { return platform_driver_register(&sp_platform_driver); } void sp_platform_exit(void) { platform_driver_unregister(&sp_platform_driver); }
linux-master
drivers/crypto/ccp/sp-platform.c
// SPDX-License-Identifier: GPL-2.0 /* * AMD Platform Security Processor (PSP) Platform Access interface * * Copyright (C) 2023 Advanced Micro Devices, Inc. * * Author: Mario Limonciello <[email protected]> * * Some of this code is adapted from drivers/i2c/busses/i2c-designware-amdpsp.c * developed by Jan Dabros <[email protected]> and Copyright (C) 2022 Google Inc. * */ #include <linux/bitfield.h> #include <linux/errno.h> #include <linux/iopoll.h> #include <linux/mutex.h> #include "platform-access.h" #define PSP_CMD_TIMEOUT_US (500 * USEC_PER_MSEC) #define DOORBELL_CMDRESP_STS GENMASK(7, 0) /* Recovery field should be equal 0 to start sending commands */ static int check_recovery(u32 __iomem *cmd) { return FIELD_GET(PSP_CMDRESP_RECOVERY, ioread32(cmd)); } static int wait_cmd(u32 __iomem *cmd) { u32 tmp, expected; /* Expect mbox_cmd to be cleared and ready bit to be set by PSP */ expected = FIELD_PREP(PSP_CMDRESP_RESP, 1); /* * Check for readiness of PSP mailbox in a tight loop in order to * process further as soon as command was consumed. */ return readl_poll_timeout(cmd, tmp, (tmp & expected), 0, PSP_CMD_TIMEOUT_US); } int psp_check_platform_access_status(void) { struct psp_device *psp = psp_get_master_device(); if (!psp || !psp->platform_access_data) return -ENODEV; return 0; } EXPORT_SYMBOL(psp_check_platform_access_status); int psp_send_platform_access_msg(enum psp_platform_access_msg msg, struct psp_request *req) { struct psp_device *psp = psp_get_master_device(); u32 __iomem *cmd, *lo, *hi; struct psp_platform_access_device *pa_dev; phys_addr_t req_addr; u32 cmd_reg; int ret; if (!psp || !psp->platform_access_data) return -ENODEV; pa_dev = psp->platform_access_data; if (!pa_dev->vdata->cmdresp_reg || !pa_dev->vdata->cmdbuff_addr_lo_reg || !pa_dev->vdata->cmdbuff_addr_hi_reg) return -ENODEV; cmd = psp->io_regs + pa_dev->vdata->cmdresp_reg; lo = psp->io_regs + pa_dev->vdata->cmdbuff_addr_lo_reg; hi = psp->io_regs + pa_dev->vdata->cmdbuff_addr_hi_reg; mutex_lock(&pa_dev->mailbox_mutex); if (check_recovery(cmd)) { dev_dbg(psp->dev, "platform mailbox is in recovery\n"); ret = -EBUSY; goto unlock; } if (wait_cmd(cmd)) { dev_dbg(psp->dev, "platform mailbox is not done processing command\n"); ret = -EBUSY; goto unlock; } /* * Fill mailbox with address of command-response buffer, which will be * used for sending i2c requests as well as reading status returned by * PSP. Use physical address of buffer, since PSP will map this region. */ req_addr = __psp_pa(req); iowrite32(lower_32_bits(req_addr), lo); iowrite32(upper_32_bits(req_addr), hi); print_hex_dump_debug("->psp ", DUMP_PREFIX_OFFSET, 16, 2, req, req->header.payload_size, false); /* Write command register to trigger processing */ cmd_reg = FIELD_PREP(PSP_CMDRESP_CMD, msg); iowrite32(cmd_reg, cmd); if (wait_cmd(cmd)) { ret = -ETIMEDOUT; goto unlock; } /* Ensure it was triggered by this driver */ if (ioread32(lo) != lower_32_bits(req_addr) || ioread32(hi) != upper_32_bits(req_addr)) { ret = -EBUSY; goto unlock; } /* Store the status in request header for caller to investigate */ cmd_reg = ioread32(cmd); req->header.status = FIELD_GET(PSP_CMDRESP_STS, cmd_reg); if (req->header.status) { ret = -EIO; goto unlock; } print_hex_dump_debug("<-psp ", DUMP_PREFIX_OFFSET, 16, 2, req, req->header.payload_size, false); ret = 0; unlock: mutex_unlock(&pa_dev->mailbox_mutex); return ret; } EXPORT_SYMBOL_GPL(psp_send_platform_access_msg); int psp_ring_platform_doorbell(int msg, u32 *result) { struct psp_device *psp = psp_get_master_device(); struct psp_platform_access_device *pa_dev; u32 __iomem *button, *cmd; int ret, val; if (!psp || !psp->platform_access_data) return -ENODEV; pa_dev = psp->platform_access_data; button = psp->io_regs + pa_dev->vdata->doorbell_button_reg; cmd = psp->io_regs + pa_dev->vdata->doorbell_cmd_reg; mutex_lock(&pa_dev->doorbell_mutex); if (wait_cmd(cmd)) { dev_err(psp->dev, "doorbell command not done processing\n"); ret = -EBUSY; goto unlock; } iowrite32(FIELD_PREP(DOORBELL_CMDRESP_STS, msg), cmd); iowrite32(PSP_DRBL_RING, button); if (wait_cmd(cmd)) { ret = -ETIMEDOUT; goto unlock; } val = FIELD_GET(DOORBELL_CMDRESP_STS, ioread32(cmd)); if (val) { if (result) *result = val; ret = -EIO; goto unlock; } ret = 0; unlock: mutex_unlock(&pa_dev->doorbell_mutex); return ret; } EXPORT_SYMBOL_GPL(psp_ring_platform_doorbell); void platform_access_dev_destroy(struct psp_device *psp) { struct psp_platform_access_device *pa_dev = psp->platform_access_data; if (!pa_dev) return; mutex_destroy(&pa_dev->mailbox_mutex); mutex_destroy(&pa_dev->doorbell_mutex); psp->platform_access_data = NULL; } int platform_access_dev_init(struct psp_device *psp) { struct device *dev = psp->dev; struct psp_platform_access_device *pa_dev; pa_dev = devm_kzalloc(dev, sizeof(*pa_dev), GFP_KERNEL); if (!pa_dev) return -ENOMEM; psp->platform_access_data = pa_dev; pa_dev->psp = psp; pa_dev->dev = dev; pa_dev->vdata = (struct platform_access_vdata *)psp->vdata->platform_access; mutex_init(&pa_dev->mailbox_mutex); mutex_init(&pa_dev->doorbell_mutex); dev_dbg(dev, "platform access enabled\n"); return 0; }
linux-master
drivers/crypto/ccp/platform-access.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) driver * * Copyright (C) 2013,2019 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <[email protected]> * Author: Gary R Hook <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/spinlock_types.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/cpu.h> #include <linux/atomic.h> #ifdef CONFIG_X86 #include <asm/cpu_device_id.h> #endif #include <linux/ccp.h> #include "ccp-dev.h" #define MAX_CCPS 32 /* Limit CCP use to a specifed number of queues per device */ static unsigned int nqueues; module_param(nqueues, uint, 0444); MODULE_PARM_DESC(nqueues, "Number of queues per CCP (minimum 1; default: all available)"); /* Limit the maximum number of configured CCPs */ static atomic_t dev_count = ATOMIC_INIT(0); static unsigned int max_devs = MAX_CCPS; module_param(max_devs, uint, 0444); MODULE_PARM_DESC(max_devs, "Maximum number of CCPs to enable (default: all; 0 disables all CCPs)"); struct ccp_tasklet_data { struct completion completion; struct ccp_cmd *cmd; }; /* Human-readable error strings */ #define CCP_MAX_ERROR_CODE 64 static char *ccp_error_codes[] = { "", "ILLEGAL_ENGINE", "ILLEGAL_KEY_ID", "ILLEGAL_FUNCTION_TYPE", "ILLEGAL_FUNCTION_MODE", "ILLEGAL_FUNCTION_ENCRYPT", "ILLEGAL_FUNCTION_SIZE", "Zlib_MISSING_INIT_EOM", "ILLEGAL_FUNCTION_RSVD", "ILLEGAL_BUFFER_LENGTH", "VLSB_FAULT", "ILLEGAL_MEM_ADDR", "ILLEGAL_MEM_SEL", "ILLEGAL_CONTEXT_ID", "ILLEGAL_KEY_ADDR", "0xF Reserved", "Zlib_ILLEGAL_MULTI_QUEUE", "Zlib_ILLEGAL_JOBID_CHANGE", "CMD_TIMEOUT", "IDMA0_AXI_SLVERR", "IDMA0_AXI_DECERR", "0x15 Reserved", "IDMA1_AXI_SLAVE_FAULT", "IDMA1_AIXI_DECERR", "0x18 Reserved", "ZLIBVHB_AXI_SLVERR", "ZLIBVHB_AXI_DECERR", "0x1B Reserved", "ZLIB_UNEXPECTED_EOM", "ZLIB_EXTRA_DATA", "ZLIB_BTYPE", "ZLIB_UNDEFINED_SYMBOL", "ZLIB_UNDEFINED_DISTANCE_S", "ZLIB_CODE_LENGTH_SYMBOL", "ZLIB _VHB_ILLEGAL_FETCH", "ZLIB_UNCOMPRESSED_LEN", "ZLIB_LIMIT_REACHED", "ZLIB_CHECKSUM_MISMATCH0", "ODMA0_AXI_SLVERR", "ODMA0_AXI_DECERR", "0x28 Reserved", "ODMA1_AXI_SLVERR", "ODMA1_AXI_DECERR", }; void ccp_log_error(struct ccp_device *d, unsigned int e) { if (WARN_ON(e >= CCP_MAX_ERROR_CODE)) return; if (e < ARRAY_SIZE(ccp_error_codes)) dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]); else dev_err(d->dev, "CCP error %d: Unknown Error\n", e); } /* List of CCPs, CCP count, read-write access lock, and access functions * * Lock structure: get ccp_unit_lock for reading whenever we need to * examine the CCP list. While holding it for reading we can acquire * the RR lock to update the round-robin next-CCP pointer. The unit lock * must be acquired before the RR lock. * * If the unit-lock is acquired for writing, we have total control over * the list, so there's no value in getting the RR lock. */ static DEFINE_RWLOCK(ccp_unit_lock); static LIST_HEAD(ccp_units); /* Round-robin counter */ static DEFINE_SPINLOCK(ccp_rr_lock); static struct ccp_device *ccp_rr; /** * ccp_add_device - add a CCP device to the list * * @ccp: ccp_device struct pointer * * Put this CCP on the unit list, which makes it available * for use. * * Returns zero if a CCP device is present, -ENODEV otherwise. */ void ccp_add_device(struct ccp_device *ccp) { unsigned long flags; write_lock_irqsave(&ccp_unit_lock, flags); list_add_tail(&ccp->entry, &ccp_units); if (!ccp_rr) /* We already have the list lock (we're first) so this * pointer can't change on us. Set its initial value. */ ccp_rr = ccp; write_unlock_irqrestore(&ccp_unit_lock, flags); } /** * ccp_del_device - remove a CCP device from the list * * @ccp: ccp_device struct pointer * * Remove this unit from the list of devices. If the next device * up for use is this one, adjust the pointer. If this is the last * device, NULL the pointer. */ void ccp_del_device(struct ccp_device *ccp) { unsigned long flags; write_lock_irqsave(&ccp_unit_lock, flags); if (ccp_rr == ccp) { /* ccp_unit_lock is read/write; any read access * will be suspended while we make changes to the * list and RR pointer. */ if (list_is_last(&ccp_rr->entry, &ccp_units)) ccp_rr = list_first_entry(&ccp_units, struct ccp_device, entry); else ccp_rr = list_next_entry(ccp_rr, entry); } list_del(&ccp->entry); if (list_empty(&ccp_units)) ccp_rr = NULL; write_unlock_irqrestore(&ccp_unit_lock, flags); } int ccp_register_rng(struct ccp_device *ccp) { int ret = 0; dev_dbg(ccp->dev, "Registering RNG...\n"); /* Register an RNG */ ccp->hwrng.name = ccp->rngname; ccp->hwrng.read = ccp_trng_read; ret = hwrng_register(&ccp->hwrng); if (ret) dev_err(ccp->dev, "error registering hwrng (%d)\n", ret); return ret; } void ccp_unregister_rng(struct ccp_device *ccp) { if (ccp->hwrng.name) hwrng_unregister(&ccp->hwrng); } static struct ccp_device *ccp_get_device(void) { unsigned long flags; struct ccp_device *dp = NULL; /* We round-robin through the unit list. * The (ccp_rr) pointer refers to the next unit to use. */ read_lock_irqsave(&ccp_unit_lock, flags); if (!list_empty(&ccp_units)) { spin_lock(&ccp_rr_lock); dp = ccp_rr; if (list_is_last(&ccp_rr->entry, &ccp_units)) ccp_rr = list_first_entry(&ccp_units, struct ccp_device, entry); else ccp_rr = list_next_entry(ccp_rr, entry); spin_unlock(&ccp_rr_lock); } read_unlock_irqrestore(&ccp_unit_lock, flags); return dp; } /** * ccp_present - check if a CCP device is present * * Returns zero if a CCP device is present, -ENODEV otherwise. */ int ccp_present(void) { unsigned long flags; int ret; read_lock_irqsave(&ccp_unit_lock, flags); ret = list_empty(&ccp_units); read_unlock_irqrestore(&ccp_unit_lock, flags); return ret ? -ENODEV : 0; } EXPORT_SYMBOL_GPL(ccp_present); /** * ccp_version - get the version of the CCP device * * Returns the version from the first unit on the list; * otherwise a zero if no CCP device is present */ unsigned int ccp_version(void) { struct ccp_device *dp; unsigned long flags; int ret = 0; read_lock_irqsave(&ccp_unit_lock, flags); if (!list_empty(&ccp_units)) { dp = list_first_entry(&ccp_units, struct ccp_device, entry); ret = dp->vdata->version; } read_unlock_irqrestore(&ccp_unit_lock, flags); return ret; } EXPORT_SYMBOL_GPL(ccp_version); /** * ccp_enqueue_cmd - queue an operation for processing by the CCP * * @cmd: ccp_cmd struct to be processed * * Queue a cmd to be processed by the CCP. If queueing the cmd * would exceed the defined length of the cmd queue the cmd will * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will * result in a return code of -EBUSY. * * The callback routine specified in the ccp_cmd struct will be * called to notify the caller of completion (if the cmd was not * backlogged) or advancement out of the backlog. If the cmd has * advanced out of the backlog the "err" value of the callback * will be -EINPROGRESS. Any other "err" value during callback is * the result of the operation. * * The cmd has been successfully queued if: * the return code is -EINPROGRESS or * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set */ int ccp_enqueue_cmd(struct ccp_cmd *cmd) { struct ccp_device *ccp; unsigned long flags; unsigned int i; int ret; /* Some commands might need to be sent to a specific device */ ccp = cmd->ccp ? cmd->ccp : ccp_get_device(); if (!ccp) return -ENODEV; /* Caller must supply a callback routine */ if (!cmd->callback) return -EINVAL; cmd->ccp = ccp; spin_lock_irqsave(&ccp->cmd_lock, flags); i = ccp->cmd_q_count; if (ccp->cmd_count >= MAX_CMD_QLEN) { if (cmd->flags & CCP_CMD_MAY_BACKLOG) { ret = -EBUSY; list_add_tail(&cmd->entry, &ccp->backlog); } else { ret = -ENOSPC; } } else { ret = -EINPROGRESS; ccp->cmd_count++; list_add_tail(&cmd->entry, &ccp->cmd); /* Find an idle queue */ if (!ccp->suspending) { for (i = 0; i < ccp->cmd_q_count; i++) { if (ccp->cmd_q[i].active) continue; break; } } } spin_unlock_irqrestore(&ccp->cmd_lock, flags); /* If we found an idle queue, wake it up */ if (i < ccp->cmd_q_count) wake_up_process(ccp->cmd_q[i].kthread); return ret; } EXPORT_SYMBOL_GPL(ccp_enqueue_cmd); static void ccp_do_cmd_backlog(struct work_struct *work) { struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work); struct ccp_device *ccp = cmd->ccp; unsigned long flags; unsigned int i; cmd->callback(cmd->data, -EINPROGRESS); spin_lock_irqsave(&ccp->cmd_lock, flags); ccp->cmd_count++; list_add_tail(&cmd->entry, &ccp->cmd); /* Find an idle queue */ for (i = 0; i < ccp->cmd_q_count; i++) { if (ccp->cmd_q[i].active) continue; break; } spin_unlock_irqrestore(&ccp->cmd_lock, flags); /* If we found an idle queue, wake it up */ if (i < ccp->cmd_q_count) wake_up_process(ccp->cmd_q[i].kthread); } static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) { struct ccp_device *ccp = cmd_q->ccp; struct ccp_cmd *cmd = NULL; struct ccp_cmd *backlog = NULL; unsigned long flags; spin_lock_irqsave(&ccp->cmd_lock, flags); cmd_q->active = 0; if (ccp->suspending) { cmd_q->suspended = 1; spin_unlock_irqrestore(&ccp->cmd_lock, flags); wake_up_interruptible(&ccp->suspend_queue); return NULL; } if (ccp->cmd_count) { cmd_q->active = 1; cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); list_del(&cmd->entry); ccp->cmd_count--; } if (!list_empty(&ccp->backlog)) { backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); list_del(&backlog->entry); } spin_unlock_irqrestore(&ccp->cmd_lock, flags); if (backlog) { INIT_WORK(&backlog->work, ccp_do_cmd_backlog); schedule_work(&backlog->work); } return cmd; } static void ccp_do_cmd_complete(unsigned long data) { struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data; struct ccp_cmd *cmd = tdata->cmd; cmd->callback(cmd->data, cmd->ret); complete(&tdata->completion); } /** * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue * * @data: thread-specific data */ int ccp_cmd_queue_thread(void *data) { struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; struct ccp_cmd *cmd; struct ccp_tasklet_data tdata; struct tasklet_struct tasklet; tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); set_current_state(TASK_INTERRUPTIBLE); cmd = ccp_dequeue_cmd(cmd_q); if (!cmd) continue; __set_current_state(TASK_RUNNING); /* Execute the command */ cmd->ret = ccp_run_cmd(cmd_q, cmd); /* Schedule the completion callback */ tdata.cmd = cmd; init_completion(&tdata.completion); tasklet_schedule(&tasklet); wait_for_completion(&tdata.completion); } __set_current_state(TASK_RUNNING); return 0; } /** * ccp_alloc_struct - allocate and initialize the ccp_device struct * * @sp: sp_device struct of the CCP */ struct ccp_device *ccp_alloc_struct(struct sp_device *sp) { struct device *dev = sp->dev; struct ccp_device *ccp; ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL); if (!ccp) return NULL; ccp->dev = dev; ccp->sp = sp; ccp->axcache = sp->axcache; INIT_LIST_HEAD(&ccp->cmd); INIT_LIST_HEAD(&ccp->backlog); spin_lock_init(&ccp->cmd_lock); mutex_init(&ccp->req_mutex); mutex_init(&ccp->sb_mutex); ccp->sb_count = KSB_COUNT; ccp->sb_start = 0; /* Initialize the wait queues */ init_waitqueue_head(&ccp->sb_queue); init_waitqueue_head(&ccp->suspend_queue); snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord); snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord); return ccp; } int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); u32 trng_value; int len = min_t(int, sizeof(trng_value), max); /* Locking is provided by the caller so we can update device * hwrng-related fields safely */ trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); if (!trng_value) { /* Zero is returned if not data is available or if a * bad-entropy error is present. Assume an error if * we exceed TRNG_RETRIES reads of zero. */ if (ccp->hwrng_retries++ > TRNG_RETRIES) return -EIO; return 0; } /* Reset the counter and save the rng value */ ccp->hwrng_retries = 0; memcpy(data, &trng_value, len); return len; } bool ccp_queues_suspended(struct ccp_device *ccp) { unsigned int suspended = 0; unsigned long flags; unsigned int i; spin_lock_irqsave(&ccp->cmd_lock, flags); for (i = 0; i < ccp->cmd_q_count; i++) if (ccp->cmd_q[i].suspended) suspended++; spin_unlock_irqrestore(&ccp->cmd_lock, flags); return ccp->cmd_q_count == suspended; } void ccp_dev_suspend(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; unsigned long flags; unsigned int i; /* If there's no device there's nothing to do */ if (!ccp) return; spin_lock_irqsave(&ccp->cmd_lock, flags); ccp->suspending = 1; /* Wake all the queue kthreads to prepare for suspend */ for (i = 0; i < ccp->cmd_q_count; i++) wake_up_process(ccp->cmd_q[i].kthread); spin_unlock_irqrestore(&ccp->cmd_lock, flags); /* Wait for all queue kthreads to say they're done */ while (!ccp_queues_suspended(ccp)) wait_event_interruptible(ccp->suspend_queue, ccp_queues_suspended(ccp)); } void ccp_dev_resume(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; unsigned long flags; unsigned int i; /* If there's no device there's nothing to do */ if (!ccp) return; spin_lock_irqsave(&ccp->cmd_lock, flags); ccp->suspending = 0; /* Wake up all the kthreads */ for (i = 0; i < ccp->cmd_q_count; i++) { ccp->cmd_q[i].suspended = 0; wake_up_process(ccp->cmd_q[i].kthread); } spin_unlock_irqrestore(&ccp->cmd_lock, flags); } int ccp_dev_init(struct sp_device *sp) { struct device *dev = sp->dev; struct ccp_device *ccp; int ret; /* * Check how many we have so far, and stop after reaching * that number */ if (atomic_inc_return(&dev_count) > max_devs) return 0; /* don't fail the load */ ret = -ENOMEM; ccp = ccp_alloc_struct(sp); if (!ccp) goto e_err; sp->ccp_data = ccp; if (!nqueues || (nqueues > MAX_HW_QUEUES)) ccp->max_q_count = MAX_HW_QUEUES; else ccp->max_q_count = nqueues; ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata; if (!ccp->vdata || !ccp->vdata->version) { ret = -ENODEV; dev_err(dev, "missing driver data\n"); goto e_err; } ccp->use_tasklet = sp->use_tasklet; ccp->io_regs = sp->io_map + ccp->vdata->offset; if (ccp->vdata->setup) ccp->vdata->setup(ccp); ret = ccp->vdata->perform->init(ccp); if (ret) { /* A positive number means that the device cannot be initialized, * but no additional message is required. */ if (ret > 0) goto e_quiet; /* An unexpected problem occurred, and should be reported in the log */ goto e_err; } dev_notice(dev, "ccp enabled\n"); return 0; e_err: dev_notice(dev, "ccp initialization failed\n"); e_quiet: sp->ccp_data = NULL; return ret; } void ccp_dev_destroy(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; if (!ccp) return; ccp->vdata->perform->destroy(ccp); }
linux-master
drivers/crypto/ccp/ccp-dev.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) driver * * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. * * Author: Gary R Hook <[email protected]> */ #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/compiler.h> #include <linux/ccp.h> #include "ccp-dev.h" /* Allocate the requested number of contiguous LSB slots * from the LSB bitmap. Look in the private range for this * queue first; failing that, check the public area. * If no space is available, wait around. * Return: first slot number */ static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) { struct ccp_device *ccp; int start; /* First look at the map for the queue */ if (cmd_q->lsb >= 0) { start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, LSB_SIZE, 0, count, 0); if (start < LSB_SIZE) { bitmap_set(cmd_q->lsbmap, start, count); return start + cmd_q->lsb * LSB_SIZE; } } /* No joy; try to get an entry from the shared blocks */ ccp = cmd_q->ccp; for (;;) { mutex_lock(&ccp->sb_mutex); start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, MAX_LSB_CNT * LSB_SIZE, 0, count, 0); if (start <= MAX_LSB_CNT * LSB_SIZE) { bitmap_set(ccp->lsbmap, start, count); mutex_unlock(&ccp->sb_mutex); return start; } ccp->sb_avail = 0; mutex_unlock(&ccp->sb_mutex); /* Wait for KSB entries to become available */ if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) return 0; } } /* Free a number of LSB slots from the bitmap, starting at * the indicated starting slot number. */ static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, unsigned int count) { if (!start) return; if (cmd_q->lsb == start) { /* An entry from the private LSB */ bitmap_clear(cmd_q->lsbmap, start, count); } else { /* From the shared LSBs */ struct ccp_device *ccp = cmd_q->ccp; mutex_lock(&ccp->sb_mutex); bitmap_clear(ccp->lsbmap, start, count); ccp->sb_avail = 1; mutex_unlock(&ccp->sb_mutex); wake_up_interruptible_all(&ccp->sb_queue); } } /* CCP version 5: Union to define the function field (cmd_reg1/dword0) */ union ccp_function { struct { u16 size:7; u16 encrypt:1; u16 mode:5; u16 type:2; } aes; struct { u16 size:7; u16 encrypt:1; u16 rsvd:5; u16 type:2; } aes_xts; struct { u16 size:7; u16 encrypt:1; u16 mode:5; u16 type:2; } des3; struct { u16 rsvd1:10; u16 type:4; u16 rsvd2:1; } sha; struct { u16 mode:3; u16 size:12; } rsa; struct { u16 byteswap:2; u16 bitwise:3; u16 reflect:2; u16 rsvd:8; } pt; struct { u16 rsvd:13; } zlib; struct { u16 size:10; u16 type:2; u16 mode:3; } ecc; u16 raw; }; #define CCP_AES_SIZE(p) ((p)->aes.size) #define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt) #define CCP_AES_MODE(p) ((p)->aes.mode) #define CCP_AES_TYPE(p) ((p)->aes.type) #define CCP_XTS_SIZE(p) ((p)->aes_xts.size) #define CCP_XTS_TYPE(p) ((p)->aes_xts.type) #define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt) #define CCP_DES3_SIZE(p) ((p)->des3.size) #define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt) #define CCP_DES3_MODE(p) ((p)->des3.mode) #define CCP_DES3_TYPE(p) ((p)->des3.type) #define CCP_SHA_TYPE(p) ((p)->sha.type) #define CCP_RSA_SIZE(p) ((p)->rsa.size) #define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) #define CCP_ECC_MODE(p) ((p)->ecc.mode) #define CCP_ECC_AFFINE(p) ((p)->ecc.one) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) #define CCP5_CMD_SOC(p) (CCP5_CMD_DW0(p).soc) #define CCP5_CMD_IOC(p) (CCP5_CMD_DW0(p).ioc) #define CCP5_CMD_INIT(p) (CCP5_CMD_DW0(p).init) #define CCP5_CMD_EOM(p) (CCP5_CMD_DW0(p).eom) #define CCP5_CMD_FUNCTION(p) (CCP5_CMD_DW0(p).function) #define CCP5_CMD_ENGINE(p) (CCP5_CMD_DW0(p).engine) #define CCP5_CMD_PROT(p) (CCP5_CMD_DW0(p).prot) /* Word 1 */ #define CCP5_CMD_DW1(p) ((p)->length) #define CCP5_CMD_LEN(p) (CCP5_CMD_DW1(p)) /* Word 2 */ #define CCP5_CMD_DW2(p) ((p)->src_lo) #define CCP5_CMD_SRC_LO(p) (CCP5_CMD_DW2(p)) /* Word 3 */ #define CCP5_CMD_DW3(p) ((p)->dw3) #define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem) #define CCP5_CMD_SRC_HI(p) ((p)->dw3.src_hi) #define CCP5_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id) #define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed) /* Words 4/5 */ #define CCP5_CMD_DW4(p) ((p)->dw4) #define CCP5_CMD_DST_LO(p) (CCP5_CMD_DW4(p).dst_lo) #define CCP5_CMD_DW5(p) ((p)->dw5.fields.dst_hi) #define CCP5_CMD_DST_HI(p) (CCP5_CMD_DW5(p)) #define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem) #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) /* Word 6/7 */ #define CCP5_CMD_DW6(p) ((p)->key_lo) #define CCP5_CMD_KEY_LO(p) (CCP5_CMD_DW6(p)) #define CCP5_CMD_DW7(p) ((p)->dw7) #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) static inline u32 low_address(unsigned long addr) { return (u64)addr & 0x0ffffffff; } static inline u32 high_address(unsigned long addr) { return ((u64)addr >> 32) & 0x00000ffff; } static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) { unsigned int head_idx, n; u32 head_lo, queue_start; queue_start = low_address(cmd_q->qdma_tail); head_lo = ioread32(cmd_q->reg_head_lo); head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ } static int ccp5_do_cmd(struct ccp5_desc *desc, struct ccp_cmd_queue *cmd_q) { __le32 *mP; u32 *dP; u32 tail; int i; int ret = 0; cmd_q->total_ops++; if (CCP5_CMD_SOC(desc)) { CCP5_CMD_IOC(desc) = 1; CCP5_CMD_SOC(desc) = 0; } mutex_lock(&cmd_q->q_mutex); mP = (__le32 *)&cmd_q->qbase[cmd_q->qidx]; dP = (u32 *)desc; for (i = 0; i < 8; i++) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; /* The data used by this command must be flushed to memory */ wmb(); /* Write the new tail address back to the queue register */ tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); iowrite32(tail, cmd_q->reg_tail_lo); /* Turn the queue back on using our cached control register */ iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); mutex_unlock(&cmd_q->q_mutex); if (CCP5_CMD_IOC(desc)) { /* Wait for the job to complete */ ret = wait_event_interruptible(cmd_q->int_queue, cmd_q->int_rcvd); if (ret || cmd_q->cmd_error) { /* Log the error and flush the queue by * moving the head pointer */ if (cmd_q->cmd_error) ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); iowrite32(tail, cmd_q->reg_head_lo); if (!ret) ret = -EIO; } cmd_q->int_rcvd = 0; } return ret; } static int ccp5_perform_aes(struct ccp_op *op) { struct ccp5_desc desc; union ccp_function function; u32 key_addr = op->sb_key * LSB_ITEM_SIZE; op->cmd_q->total_aes_ops++; /* Zero out all the fields of the command desc */ memset(&desc, 0, Q_DESC_SIZE); CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES; CCP5_CMD_SOC(&desc) = op->soc; CCP5_CMD_IOC(&desc) = 1; CCP5_CMD_INIT(&desc) = op->init; CCP5_CMD_EOM(&desc) = op->eom; CCP5_CMD_PROT(&desc) = 0; function.raw = 0; CCP_AES_ENCRYPT(&function) = op->u.aes.action; CCP_AES_MODE(&function) = op->u.aes.mode; CCP_AES_TYPE(&function) = op->u.aes.type; CCP_AES_SIZE(&function) = op->u.aes.size; CCP5_CMD_FUNCTION(&desc) = function.raw; CCP5_CMD_LEN(&desc) = op->src.u.dma.length; CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); CCP5_CMD_KEY_HI(&desc) = 0; CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; return ccp5_do_cmd(&desc, op->cmd_q); } static int ccp5_perform_xts_aes(struct ccp_op *op) { struct ccp5_desc desc; union ccp_function function; u32 key_addr = op->sb_key * LSB_ITEM_SIZE; op->cmd_q->total_xts_aes_ops++; /* Zero out all the fields of the command desc */ memset(&desc, 0, Q_DESC_SIZE); CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128; CCP5_CMD_SOC(&desc) = op->soc; CCP5_CMD_IOC(&desc) = 1; CCP5_CMD_INIT(&desc) = op->init; CCP5_CMD_EOM(&desc) = op->eom; CCP5_CMD_PROT(&desc) = 0; function.raw = 0; CCP_XTS_TYPE(&function) = op->u.xts.type; CCP_XTS_ENCRYPT(&function) = op->u.xts.action; CCP_XTS_SIZE(&function) = op->u.xts.unit_size; CCP5_CMD_FUNCTION(&desc) = function.raw; CCP5_CMD_LEN(&desc) = op->src.u.dma.length; CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); CCP5_CMD_KEY_HI(&desc) = 0; CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; return ccp5_do_cmd(&desc, op->cmd_q); } static int ccp5_perform_sha(struct ccp_op *op) { struct ccp5_desc desc; union ccp_function function; op->cmd_q->total_sha_ops++; /* Zero out all the fields of the command desc */ memset(&desc, 0, Q_DESC_SIZE); CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA; CCP5_CMD_SOC(&desc) = op->soc; CCP5_CMD_IOC(&desc) = 1; CCP5_CMD_INIT(&desc) = 1; CCP5_CMD_EOM(&desc) = op->eom; CCP5_CMD_PROT(&desc) = 0; function.raw = 0; CCP_SHA_TYPE(&function) = op->u.sha.type; CCP5_CMD_FUNCTION(&desc) = function.raw; CCP5_CMD_LEN(&desc) = op->src.u.dma.length; CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; if (op->eom) { CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits); CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits); } else { CCP5_CMD_SHA_LO(&desc) = 0; CCP5_CMD_SHA_HI(&desc) = 0; } return ccp5_do_cmd(&desc, op->cmd_q); } static int ccp5_perform_des3(struct ccp_op *op) { struct ccp5_desc desc; union ccp_function function; u32 key_addr = op->sb_key * LSB_ITEM_SIZE; op->cmd_q->total_3des_ops++; /* Zero out all the fields of the command desc */ memset(&desc, 0, sizeof(struct ccp5_desc)); CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3; CCP5_CMD_SOC(&desc) = op->soc; CCP5_CMD_IOC(&desc) = 1; CCP5_CMD_INIT(&desc) = op->init; CCP5_CMD_EOM(&desc) = op->eom; CCP5_CMD_PROT(&desc) = 0; function.raw = 0; CCP_DES3_ENCRYPT(&function) = op->u.des3.action; CCP_DES3_MODE(&function) = op->u.des3.mode; CCP_DES3_TYPE(&function) = op->u.des3.type; CCP5_CMD_FUNCTION(&desc) = function.raw; CCP5_CMD_LEN(&desc) = op->src.u.dma.length; CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); CCP5_CMD_KEY_HI(&desc) = 0; CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; return ccp5_do_cmd(&desc, op->cmd_q); } static int ccp5_perform_rsa(struct ccp_op *op) { struct ccp5_desc desc; union ccp_function function; op->cmd_q->total_rsa_ops++; /* Zero out all the fields of the command desc */ memset(&desc, 0, Q_DESC_SIZE); CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA; CCP5_CMD_SOC(&desc) = op->soc; CCP5_CMD_IOC(&desc) = 1; CCP5_CMD_INIT(&desc) = 0; CCP5_CMD_EOM(&desc) = 1; CCP5_CMD_PROT(&desc) = 0; function.raw = 0; CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3; CCP5_CMD_FUNCTION(&desc) = function.raw; CCP5_CMD_LEN(&desc) = op->u.rsa.input_len; /* Source is from external memory */ CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; /* Destination is in external memory */ CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; /* Key (Exponent) is in external memory */ CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma); CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma); CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; return ccp5_do_cmd(&desc, op->cmd_q); } static int ccp5_perform_passthru(struct ccp_op *op) { struct ccp5_desc desc; union ccp_function function; struct ccp_dma_info *saddr = &op->src.u.dma; struct ccp_dma_info *daddr = &op->dst.u.dma; op->cmd_q->total_pt_ops++; memset(&desc, 0, Q_DESC_SIZE); CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; CCP5_CMD_SOC(&desc) = 0; CCP5_CMD_IOC(&desc) = 1; CCP5_CMD_INIT(&desc) = 0; CCP5_CMD_EOM(&desc) = op->eom; CCP5_CMD_PROT(&desc) = 0; function.raw = 0; CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap; CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod; CCP5_CMD_FUNCTION(&desc) = function.raw; /* Length of source data is always 256 bytes */ if (op->src.type == CCP_MEMTYPE_SYSTEM) CCP5_CMD_LEN(&desc) = saddr->length; else CCP5_CMD_LEN(&desc) = daddr->length; if (op->src.type == CCP_MEMTYPE_SYSTEM) { CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) CCP5_CMD_LSB_ID(&desc) = op->sb_key; } else { u32 key_addr = op->src.u.sb * CCP_SB_BYTES; CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr); CCP5_CMD_SRC_HI(&desc) = 0; CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB; } if (op->dst.type == CCP_MEMTYPE_SYSTEM) { CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; } else { u32 key_addr = op->dst.u.sb * CCP_SB_BYTES; CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr); CCP5_CMD_DST_HI(&desc) = 0; CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB; } return ccp5_do_cmd(&desc, op->cmd_q); } static int ccp5_perform_ecc(struct ccp_op *op) { struct ccp5_desc desc; union ccp_function function; op->cmd_q->total_ecc_ops++; /* Zero out all the fields of the command desc */ memset(&desc, 0, Q_DESC_SIZE); CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC; CCP5_CMD_SOC(&desc) = 0; CCP5_CMD_IOC(&desc) = 1; CCP5_CMD_INIT(&desc) = 0; CCP5_CMD_EOM(&desc) = 1; CCP5_CMD_PROT(&desc) = 0; function.raw = 0; function.ecc.mode = op->u.ecc.function; CCP5_CMD_FUNCTION(&desc) = function.raw; CCP5_CMD_LEN(&desc) = op->src.u.dma.length; CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; return ccp5_do_cmd(&desc, op->cmd_q); } static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; int queues = 0; int j; /* Build a bit mask to know which LSBs this queue has access to. * Don't bother with segment 0 as it has special privileges. */ for (j = 1; j < MAX_LSB_CNT; j++) { if (status & q_mask) bitmap_set(cmd_q->lsbmask, j, 1); status >>= LSB_REGION_WIDTH; } queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n", cmd_q->id, queues); return queues ? 0 : -EINVAL; } static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, int lsb_cnt, int n_lsbs, unsigned long *lsb_pub) { DECLARE_BITMAP(qlsb, MAX_LSB_CNT); int bitno; int qlsb_wgt; int i; /* For each queue: * If the count of potential LSBs available to a queue matches the * ordinal given to us in lsb_cnt: * Copy the mask of possible LSBs for this queue into "qlsb"; * For each bit in qlsb, see if the corresponding bit in the * aggregation mask is set; if so, we have a match. * If we have a match, clear the bit in the aggregation to * mark it as no longer available. * If there is no match, clear the bit in qlsb and keep looking. */ for (i = 0; i < ccp->cmd_q_count; i++) { struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); if (qlsb_wgt == lsb_cnt) { bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT); bitno = find_first_bit(qlsb, MAX_LSB_CNT); while (bitno < MAX_LSB_CNT) { if (test_bit(bitno, lsb_pub)) { /* We found an available LSB * that this queue can access */ cmd_q->lsb = bitno; bitmap_clear(lsb_pub, bitno, 1); dev_dbg(ccp->dev, "Queue %d gets LSB %d\n", i, bitno); break; } bitmap_clear(qlsb, bitno, 1); bitno = find_first_bit(qlsb, MAX_LSB_CNT); } if (bitno >= MAX_LSB_CNT) return -EINVAL; n_lsbs--; } } return n_lsbs; } /* For each queue, from the most- to least-constrained: * find an LSB that can be assigned to the queue. If there are N queues that * can only use M LSBs, where N > M, fail; otherwise, every queue will get a * dedicated LSB. Remaining LSB regions become a shared resource. * If we have fewer LSBs than queues, all LSB regions become shared resources. */ static int ccp_assign_lsbs(struct ccp_device *ccp) { DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT); DECLARE_BITMAP(qlsb, MAX_LSB_CNT); int n_lsbs = 0; int bitno; int i, lsb_cnt; int rc = 0; bitmap_zero(lsb_pub, MAX_LSB_CNT); /* Create an aggregate bitmap to get a total count of available LSBs */ for (i = 0; i < ccp->cmd_q_count; i++) bitmap_or(lsb_pub, lsb_pub, ccp->cmd_q[i].lsbmask, MAX_LSB_CNT); n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT); if (n_lsbs >= ccp->cmd_q_count) { /* We have enough LSBS to give every queue a private LSB. * Brute force search to start with the queues that are more * constrained in LSB choice. When an LSB is privately * assigned, it is removed from the public mask. * This is an ugly N squared algorithm with some optimization. */ for (lsb_cnt = 1; n_lsbs && (lsb_cnt <= MAX_LSB_CNT); lsb_cnt++) { rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, lsb_pub); if (rc < 0) return -EINVAL; n_lsbs = rc; } } rc = 0; /* What's left of the LSBs, according to the public mask, now become * shared. Any zero bits in the lsb_pub mask represent an LSB region * that can't be used as a shared resource, so mark the LSB slots for * them as "in use". */ bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT); bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); while (bitno < MAX_LSB_CNT) { bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); bitmap_set(qlsb, bitno, 1); bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); } return rc; } static void ccp5_disable_queue_interrupts(struct ccp_device *ccp) { unsigned int i; for (i = 0; i < ccp->cmd_q_count; i++) iowrite32(0x0, ccp->cmd_q[i].reg_int_enable); } static void ccp5_enable_queue_interrupts(struct ccp_device *ccp) { unsigned int i; for (i = 0; i < ccp->cmd_q_count; i++) iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable); } static void ccp5_irq_bh(unsigned long data) { struct ccp_device *ccp = (struct ccp_device *)data; u32 status; unsigned int i; for (i = 0; i < ccp->cmd_q_count; i++) { struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; status = ioread32(cmd_q->reg_interrupt_status); if (status) { cmd_q->int_status = status; cmd_q->q_status = ioread32(cmd_q->reg_status); cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); /* On error, only save the first error value */ if ((status & INT_ERROR) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); cmd_q->int_rcvd = 1; /* Acknowledge the interrupt and wake the kthread */ iowrite32(status, cmd_q->reg_interrupt_status); wake_up_interruptible(&cmd_q->int_queue); } } ccp5_enable_queue_interrupts(ccp); } static irqreturn_t ccp5_irq_handler(int irq, void *data) { struct ccp_device *ccp = (struct ccp_device *)data; ccp5_disable_queue_interrupts(ccp); ccp->total_interrupts++; if (ccp->use_tasklet) tasklet_schedule(&ccp->irq_tasklet); else ccp5_irq_bh((unsigned long)ccp); return IRQ_HANDLED; } static int ccp5_init(struct ccp_device *ccp) { struct device *dev = ccp->dev; struct ccp_cmd_queue *cmd_q; struct dma_pool *dma_pool; char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; unsigned int qmr, i; u64 status; u32 status_lo, status_hi; int ret; /* Find available queues */ qmr = ioread32(ccp->io_regs + Q_MASK_REG); /* * Check for a access to the registers. If this read returns * 0xffffffff, it's likely that the system is running a broken * BIOS which disallows access to the device. Stop here and fail * the initialization (but not the load, as the PSP could get * properly initialized). */ if (qmr == 0xffffffff) { dev_notice(dev, "ccp: unable to access the device: you might be running a broken BIOS.\n"); return 1; } for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { if (!(qmr & (1 << i))) continue; /* Allocate a dma pool for this queue */ snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", ccp->name, i); dma_pool = dma_pool_create(dma_pool_name, dev, CCP_DMAPOOL_MAX_SIZE, CCP_DMAPOOL_ALIGN, 0); if (!dma_pool) { dev_err(dev, "unable to allocate dma pool\n"); ret = -ENOMEM; goto e_pool; } cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; ccp->cmd_q_count++; cmd_q->ccp = ccp; cmd_q->id = i; cmd_q->dma_pool = dma_pool; mutex_init(&cmd_q->q_mutex); /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma, GFP_KERNEL); if (!cmd_q->qbase) { dev_err(dev, "unable to allocate command queue\n"); ret = -ENOMEM; goto e_pool; } cmd_q->qidx = 0; /* Preset some register values and masks that are queue * number dependent */ cmd_q->reg_control = ccp->io_regs + CMD5_Q_STATUS_INCR * (i + 1); cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE; cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE; cmd_q->reg_int_enable = cmd_q->reg_control + CMD5_Q_INT_ENABLE_BASE; cmd_q->reg_interrupt_status = cmd_q->reg_control + CMD5_Q_INTERRUPT_STATUS_BASE; cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE; cmd_q->reg_int_status = cmd_q->reg_control + CMD5_Q_INT_STATUS_BASE; cmd_q->reg_dma_status = cmd_q->reg_control + CMD5_Q_DMA_STATUS_BASE; cmd_q->reg_dma_read_status = cmd_q->reg_control + CMD5_Q_DMA_READ_STATUS_BASE; cmd_q->reg_dma_write_status = cmd_q->reg_control + CMD5_Q_DMA_WRITE_STATUS_BASE; init_waitqueue_head(&cmd_q->int_queue); dev_dbg(dev, "queue #%u available\n", i); } if (ccp->cmd_q_count == 0) { dev_notice(dev, "no command queues available\n"); ret = 1; goto e_pool; } /* Turn off the queues and disable interrupts until ready */ ccp5_disable_queue_interrupts(ccp); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol = 0; /* Start with nothing */ iowrite32(cmd_q->qcontrol, cmd_q->reg_control); ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_status); /* Clear the interrupt status */ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); } dev_dbg(dev, "Requesting an IRQ...\n"); /* Request an irq */ ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp); if (ret) { dev_err(dev, "unable to allocate an IRQ\n"); goto e_pool; } /* Initialize the ISR tasklet */ if (ccp->use_tasklet) tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh, (unsigned long)ccp); dev_dbg(dev, "Loading LSB map...\n"); /* Copy the private LSB mask to the public registers */ status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); status = ((u64)status_hi<<30) | (u64)status_lo; dev_dbg(dev, "Configuring virtual queues...\n"); /* Configure size of each virtual queue accessible to host */ for (i = 0; i < ccp->cmd_q_count; i++) { u32 dma_addr_lo; u32 dma_addr_hi; cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; cmd_q->qdma_tail = cmd_q->qbase_dma; dma_addr_lo = low_address(cmd_q->qdma_tail); iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo); iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo); dma_addr_hi = high_address(cmd_q->qdma_tail); cmd_q->qcontrol |= (dma_addr_hi << 16); iowrite32(cmd_q->qcontrol, cmd_q->reg_control); /* Find the LSB regions accessible to the queue */ ccp_find_lsb_regions(cmd_q, status); cmd_q->lsb = -1; /* Unassigned value */ } dev_dbg(dev, "Assigning LSBs...\n"); ret = ccp_assign_lsbs(ccp); if (ret) { dev_err(dev, "Unable to assign LSBs (%d)\n", ret); goto e_irq; } /* Optimization: pre-allocate LSB slots for each queue */ for (i = 0; i < ccp->cmd_q_count; i++) { ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); } dev_dbg(dev, "Starting threads...\n"); /* Create a kthread for each queue */ for (i = 0; i < ccp->cmd_q_count; i++) { struct task_struct *kthread; cmd_q = &ccp->cmd_q[i]; kthread = kthread_run(ccp_cmd_queue_thread, cmd_q, "%s-q%u", ccp->name, cmd_q->id); if (IS_ERR(kthread)) { dev_err(dev, "error creating queue thread (%ld)\n", PTR_ERR(kthread)); ret = PTR_ERR(kthread); goto e_kthread; } cmd_q->kthread = kthread; } dev_dbg(dev, "Enabling interrupts...\n"); ccp5_enable_queue_interrupts(ccp); dev_dbg(dev, "Registering device...\n"); /* Put this on the unit list to make it available */ ccp_add_device(ccp); ret = ccp_register_rng(ccp); if (ret) goto e_kthread; /* Register the DMA engine support */ ret = ccp_dmaengine_register(ccp); if (ret) goto e_hwrng; #ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS /* Set up debugfs entries */ ccp5_debugfs_setup(ccp); #endif return 0; e_hwrng: ccp_unregister_rng(ccp); e_kthread: for (i = 0; i < ccp->cmd_q_count; i++) if (ccp->cmd_q[i].kthread) kthread_stop(ccp->cmd_q[i].kthread); e_irq: sp_free_ccp_irq(ccp->sp, ccp); e_pool: for (i = 0; i < ccp->cmd_q_count; i++) dma_pool_destroy(ccp->cmd_q[i].dma_pool); return ret; } static void ccp5_destroy(struct ccp_device *ccp) { struct ccp_cmd_queue *cmd_q; struct ccp_cmd *cmd; unsigned int i; /* Unregister the DMA engine */ ccp_dmaengine_unregister(ccp); /* Unregister the RNG */ ccp_unregister_rng(ccp); /* Remove this device from the list of available units first */ ccp_del_device(ccp); #ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS /* We're in the process of tearing down the entire driver; * when all the devices are gone clean up debugfs */ if (ccp_present()) ccp5_debugfs_destroy(); #endif /* Disable and clear interrupts */ ccp5_disable_queue_interrupts(ccp); for (i = 0; i < ccp->cmd_q_count; i++) { cmd_q = &ccp->cmd_q[i]; /* Turn off the run bit */ iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); /* Clear the interrupt status */ iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_status); } /* Stop the queue kthreads */ for (i = 0; i < ccp->cmd_q_count; i++) if (ccp->cmd_q[i].kthread) kthread_stop(ccp->cmd_q[i].kthread); sp_free_ccp_irq(ccp->sp, ccp); /* Flush the cmd and backlog queue */ while (!list_empty(&ccp->cmd)) { /* Invoke the callback directly with an error code */ cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); list_del(&cmd->entry); cmd->callback(cmd->data, -ENODEV); } while (!list_empty(&ccp->backlog)) { /* Invoke the callback directly with an error code */ cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); list_del(&cmd->entry); cmd->callback(cmd->data, -ENODEV); } } static void ccp5_config(struct ccp_device *ccp) { /* Public side */ iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); } static void ccp5other_config(struct ccp_device *ccp) { int i; u32 rnd; /* We own all of the queues on the NTB CCP */ iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); for (i = 0; i < 12; i++) { rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); } iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET); iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET); iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET); iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET); ccp5_config(ccp); } /* Version 5 adds some function, but is essentially the same as v5 */ static const struct ccp_actions ccp5_actions = { .aes = ccp5_perform_aes, .xts_aes = ccp5_perform_xts_aes, .sha = ccp5_perform_sha, .des3 = ccp5_perform_des3, .rsa = ccp5_perform_rsa, .passthru = ccp5_perform_passthru, .ecc = ccp5_perform_ecc, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, .destroy = ccp5_destroy, .get_free_slots = ccp5_get_free_slots, }; const struct ccp_vdata ccpv5a = { .version = CCP_VERSION(5, 0), .setup = ccp5_config, .perform = &ccp5_actions, .offset = 0x0, .rsamax = CCP5_RSA_MAX_WIDTH, }; const struct ccp_vdata ccpv5b = { .version = CCP_VERSION(5, 0), .dma_chan_attr = DMA_PRIVATE, .setup = ccp5other_config, .perform = &ccp5_actions, .offset = 0x0, .rsamax = CCP5_RSA_MAX_WIDTH, };
linux-master
drivers/crypto/ccp/ccp-dev-v5.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) SHA crypto API support * * Copyright (C) 2013,2018 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <[email protected]> * Author: Gary R Hook <[email protected]> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/hmac.h> #include <crypto/internal/hash.h> #include <crypto/sha1.h> #include <crypto/sha2.h> #include <crypto/scatterwalk.h> #include <linux/string.h> #include "ccp-crypto.h" static int ccp_sha_complete(struct crypto_async_request *async_req, int ret) { struct ahash_request *req = ahash_request_cast(async_req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req); unsigned int digest_size = crypto_ahash_digestsize(tfm); if (ret) goto e_free; if (rctx->hash_rem) { /* Save remaining data to buffer */ unsigned int offset = rctx->nbytes - rctx->hash_rem; scatterwalk_map_and_copy(rctx->buf, rctx->src, offset, rctx->hash_rem, 0); rctx->buf_count = rctx->hash_rem; } else { rctx->buf_count = 0; } /* Update result area if supplied */ if (req->result && rctx->final) memcpy(req->result, rctx->ctx, digest_size); e_free: sg_free_table(&rctx->data_sg); return ret; } static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, unsigned int final) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req); struct scatterlist *sg; unsigned int block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); unsigned int sg_count; gfp_t gfp; u64 len; int ret; len = (u64)rctx->buf_count + (u64)nbytes; if (!final && (len <= block_size)) { scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, 0, nbytes, 0); rctx->buf_count += nbytes; return 0; } rctx->src = req->src; rctx->nbytes = nbytes; rctx->final = final; rctx->hash_rem = final ? 0 : len & (block_size - 1); rctx->hash_cnt = len - rctx->hash_rem; if (!final && !rctx->hash_rem) { /* CCP can't do zero length final, so keep some data around */ rctx->hash_cnt -= block_size; rctx->hash_rem = block_size; } /* Initialize the context scatterlist */ sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx)); sg = NULL; if (rctx->buf_count && nbytes) { /* Build the data scatterlist table - allocate enough entries * for both data pieces (buffer and input data) */ gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; sg_count = sg_nents(req->src) + 1; ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); if (ret) return ret; sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); sg = ccp_crypto_sg_table_add(&rctx->data_sg, &rctx->buf_sg); if (!sg) { ret = -EINVAL; goto e_free; } sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); if (!sg) { ret = -EINVAL; goto e_free; } sg_mark_end(sg); sg = rctx->data_sg.sgl; } else if (rctx->buf_count) { sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); sg = &rctx->buf_sg; } else if (nbytes) { sg = req->src; } rctx->msg_bits += (rctx->hash_cnt << 3); /* Total in bits */ memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_SHA; rctx->cmd.u.sha.type = rctx->type; rctx->cmd.u.sha.ctx = &rctx->ctx_sg; switch (rctx->type) { case CCP_SHA_TYPE_1: rctx->cmd.u.sha.ctx_len = SHA1_DIGEST_SIZE; break; case CCP_SHA_TYPE_224: rctx->cmd.u.sha.ctx_len = SHA224_DIGEST_SIZE; break; case CCP_SHA_TYPE_256: rctx->cmd.u.sha.ctx_len = SHA256_DIGEST_SIZE; break; case CCP_SHA_TYPE_384: rctx->cmd.u.sha.ctx_len = SHA384_DIGEST_SIZE; break; case CCP_SHA_TYPE_512: rctx->cmd.u.sha.ctx_len = SHA512_DIGEST_SIZE; break; default: /* Should never get here */ break; } rctx->cmd.u.sha.src = sg; rctx->cmd.u.sha.src_len = rctx->hash_cnt; rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? &ctx->u.sha.opad_sg : NULL; rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ? ctx->u.sha.opad_count : 0; rctx->cmd.u.sha.first = rctx->first; rctx->cmd.u.sha.final = rctx->final; rctx->cmd.u.sha.msg_bits = rctx->msg_bits; rctx->first = 0; ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); return ret; e_free: sg_free_table(&rctx->data_sg); return ret; } static int ccp_sha_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req); struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(crypto_ahash_tfm(tfm)); unsigned int block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); memset(rctx, 0, sizeof(*rctx)); rctx->type = alg->type; rctx->first = 1; if (ctx->u.sha.key_len) { /* Buffer the HMAC key for first update */ memcpy(rctx->buf, ctx->u.sha.ipad, block_size); rctx->buf_count = block_size; } return 0; } static int ccp_sha_update(struct ahash_request *req) { return ccp_do_sha_update(req, req->nbytes, 0); } static int ccp_sha_final(struct ahash_request *req) { return ccp_do_sha_update(req, 0, 1); } static int ccp_sha_finup(struct ahash_request *req) { return ccp_do_sha_update(req, req->nbytes, 1); } static int ccp_sha_digest(struct ahash_request *req) { int ret; ret = ccp_sha_init(req); if (ret) return ret; return ccp_sha_finup(req); } static int ccp_sha_export(struct ahash_request *req, void *out) { struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req); struct ccp_sha_exp_ctx state; /* Don't let anything leak to 'out' */ memset(&state, 0, sizeof(state)); state.type = rctx->type; state.msg_bits = rctx->msg_bits; state.first = rctx->first; memcpy(state.ctx, rctx->ctx, sizeof(state.ctx)); state.buf_count = rctx->buf_count; memcpy(state.buf, rctx->buf, sizeof(state.buf)); /* 'out' may not be aligned so memcpy from local variable */ memcpy(out, &state, sizeof(state)); return 0; } static int ccp_sha_import(struct ahash_request *req, const void *in) { struct ccp_sha_req_ctx *rctx = ahash_request_ctx_dma(req); struct ccp_sha_exp_ctx state; /* 'in' may not be aligned so memcpy to local variable */ memcpy(&state, in, sizeof(state)); memset(rctx, 0, sizeof(*rctx)); rctx->type = state.type; rctx->msg_bits = state.msg_bits; rctx->first = state.first; memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx)); rctx->buf_count = state.buf_count; memcpy(rctx->buf, state.buf, sizeof(rctx->buf)); return 0; } static int ccp_sha_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int key_len) { struct ccp_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct crypto_shash *shash = ctx->u.sha.hmac_tfm; unsigned int block_size = crypto_shash_blocksize(shash); unsigned int digest_size = crypto_shash_digestsize(shash); int i, ret; /* Set to zero until complete */ ctx->u.sha.key_len = 0; /* Clear key area to provide zero padding for keys smaller * than the block size */ memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key)); if (key_len > block_size) { /* Must hash the input key */ ret = crypto_shash_tfm_digest(shash, key, key_len, ctx->u.sha.key); if (ret) return -EINVAL; key_len = digest_size; } else { memcpy(ctx->u.sha.key, key, key_len); } for (i = 0; i < block_size; i++) { ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ HMAC_IPAD_VALUE; ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ HMAC_OPAD_VALUE; } sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size); ctx->u.sha.opad_count = block_size; ctx->u.sha.key_len = key_len; return 0; } static int ccp_sha_cra_init(struct crypto_tfm *tfm) { struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); struct ccp_ctx *ctx = crypto_ahash_ctx_dma(ahash); ctx->complete = ccp_sha_complete; ctx->u.sha.key_len = 0; crypto_ahash_set_reqsize_dma(ahash, sizeof(struct ccp_sha_req_ctx)); return 0; } static void ccp_sha_cra_exit(struct crypto_tfm *tfm) { } static int ccp_hmac_sha_cra_init(struct crypto_tfm *tfm) { struct ccp_ctx *ctx = crypto_tfm_ctx_dma(tfm); struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); struct crypto_shash *hmac_tfm; hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); if (IS_ERR(hmac_tfm)) { pr_warn("could not load driver %s need for HMAC support\n", alg->child_alg); return PTR_ERR(hmac_tfm); } ctx->u.sha.hmac_tfm = hmac_tfm; return ccp_sha_cra_init(tfm); } static void ccp_hmac_sha_cra_exit(struct crypto_tfm *tfm) { struct ccp_ctx *ctx = crypto_tfm_ctx_dma(tfm); if (ctx->u.sha.hmac_tfm) crypto_free_shash(ctx->u.sha.hmac_tfm); ccp_sha_cra_exit(tfm); } struct ccp_sha_def { unsigned int version; const char *name; const char *drv_name; enum ccp_sha_type type; u32 digest_size; u32 block_size; }; static struct ccp_sha_def sha_algs[] = { { .version = CCP_VERSION(3, 0), .name = "sha1", .drv_name = "sha1-ccp", .type = CCP_SHA_TYPE_1, .digest_size = SHA1_DIGEST_SIZE, .block_size = SHA1_BLOCK_SIZE, }, { .version = CCP_VERSION(3, 0), .name = "sha224", .drv_name = "sha224-ccp", .type = CCP_SHA_TYPE_224, .digest_size = SHA224_DIGEST_SIZE, .block_size = SHA224_BLOCK_SIZE, }, { .version = CCP_VERSION(3, 0), .name = "sha256", .drv_name = "sha256-ccp", .type = CCP_SHA_TYPE_256, .digest_size = SHA256_DIGEST_SIZE, .block_size = SHA256_BLOCK_SIZE, }, { .version = CCP_VERSION(5, 0), .name = "sha384", .drv_name = "sha384-ccp", .type = CCP_SHA_TYPE_384, .digest_size = SHA384_DIGEST_SIZE, .block_size = SHA384_BLOCK_SIZE, }, { .version = CCP_VERSION(5, 0), .name = "sha512", .drv_name = "sha512-ccp", .type = CCP_SHA_TYPE_512, .digest_size = SHA512_DIGEST_SIZE, .block_size = SHA512_BLOCK_SIZE, }, }; static int ccp_register_hmac_alg(struct list_head *head, const struct ccp_sha_def *def, const struct ccp_crypto_ahash_alg *base_alg) { struct ccp_crypto_ahash_alg *ccp_alg; struct ahash_alg *alg; struct hash_alg_common *halg; struct crypto_alg *base; int ret; ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); if (!ccp_alg) return -ENOMEM; /* Copy the base algorithm and only change what's necessary */ *ccp_alg = *base_alg; INIT_LIST_HEAD(&ccp_alg->entry); strscpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); alg = &ccp_alg->alg; alg->setkey = ccp_sha_setkey; halg = &alg->halg; base = &halg->base; snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name); snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s", def->drv_name); base->cra_init = ccp_hmac_sha_cra_init; base->cra_exit = ccp_hmac_sha_cra_exit; ret = crypto_register_ahash(alg); if (ret) { pr_err("%s ahash algorithm registration error (%d)\n", base->cra_name, ret); kfree(ccp_alg); return ret; } list_add(&ccp_alg->entry, head); return ret; } static int ccp_register_sha_alg(struct list_head *head, const struct ccp_sha_def *def) { struct ccp_crypto_ahash_alg *ccp_alg; struct ahash_alg *alg; struct hash_alg_common *halg; struct crypto_alg *base; int ret; ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); if (!ccp_alg) return -ENOMEM; INIT_LIST_HEAD(&ccp_alg->entry); ccp_alg->type = def->type; alg = &ccp_alg->alg; alg->init = ccp_sha_init; alg->update = ccp_sha_update; alg->final = ccp_sha_final; alg->finup = ccp_sha_finup; alg->digest = ccp_sha_digest; alg->export = ccp_sha_export; alg->import = ccp_sha_import; halg = &alg->halg; halg->digestsize = def->digest_size; halg->statesize = sizeof(struct ccp_sha_exp_ctx); base = &halg->base; snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->drv_name); base->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; base->cra_blocksize = def->block_size; base->cra_ctxsize = sizeof(struct ccp_ctx) + crypto_dma_padding(); base->cra_priority = CCP_CRA_PRIORITY; base->cra_init = ccp_sha_cra_init; base->cra_exit = ccp_sha_cra_exit; base->cra_module = THIS_MODULE; ret = crypto_register_ahash(alg); if (ret) { pr_err("%s ahash algorithm registration error (%d)\n", base->cra_name, ret); kfree(ccp_alg); return ret; } list_add(&ccp_alg->entry, head); ret = ccp_register_hmac_alg(head, def, ccp_alg); return ret; } int ccp_register_sha_algs(struct list_head *head) { int i, ret; unsigned int ccpversion = ccp_version(); for (i = 0; i < ARRAY_SIZE(sha_algs); i++) { if (sha_algs[i].version > ccpversion) continue; ret = ccp_register_sha_alg(head, &sha_algs[i]); if (ret) return ret; } return 0; }
linux-master
drivers/crypto/ccp/ccp-crypto-sha.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) AES GCM crypto API support * * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook <[email protected]> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <crypto/internal/aead.h> #include <crypto/algapi.h> #include <crypto/aes.h> #include <crypto/ctr.h> #include <crypto/gcm.h> #include <crypto/scatterwalk.h> #include "ccp-crypto.h" static int ccp_aes_gcm_complete(struct crypto_async_request *async_req, int ret) { return ret; } static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int key_len) { struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm); switch (key_len) { case AES_KEYSIZE_128: ctx->u.aes.type = CCP_AES_TYPE_128; break; case AES_KEYSIZE_192: ctx->u.aes.type = CCP_AES_TYPE_192; break; case AES_KEYSIZE_256: ctx->u.aes.type = CCP_AES_TYPE_256; break; default: return -EINVAL; } ctx->u.aes.mode = CCP_AES_MODE_GCM; ctx->u.aes.key_len = key_len; memcpy(ctx->u.aes.key, key, key_len); sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); return 0; } static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { switch (authsize) { case 16: case 15: case 14: case 13: case 12: case 8: case 4: break; default: return -EINVAL; } return 0; } static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm); struct ccp_aes_req_ctx *rctx = aead_request_ctx_dma(req); struct scatterlist *iv_sg = NULL; unsigned int iv_len = 0; int i; int ret = 0; if (!ctx->u.aes.key_len) return -EINVAL; if (ctx->u.aes.mode != CCP_AES_MODE_GCM) return -EINVAL; if (!req->iv) return -EINVAL; /* * 5 parts: * plaintext/ciphertext input * AAD * key * IV * Destination+tag buffer */ /* Prepare the IV: 12 bytes + an integer (counter) */ memcpy(rctx->iv, req->iv, GCM_AES_IV_SIZE); for (i = 0; i < 3; i++) rctx->iv[i + GCM_AES_IV_SIZE] = 0; rctx->iv[AES_BLOCK_SIZE - 1] = 1; /* Set up a scatterlist for the IV */ iv_sg = &rctx->iv_sg; iv_len = AES_BLOCK_SIZE; sg_init_one(iv_sg, rctx->iv, iv_len); /* The AAD + plaintext are concatenated in the src buffer */ memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_AES; rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm); rctx->cmd.u.aes.type = ctx->u.aes.type; rctx->cmd.u.aes.mode = ctx->u.aes.mode; rctx->cmd.u.aes.action = encrypt; rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; rctx->cmd.u.aes.iv = iv_sg; rctx->cmd.u.aes.iv_len = iv_len; rctx->cmd.u.aes.src = req->src; rctx->cmd.u.aes.src_len = req->cryptlen; rctx->cmd.u.aes.aad_len = req->assoclen; /* The cipher text + the tag are in the dst buffer */ rctx->cmd.u.aes.dst = req->dst; ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); return ret; } static int ccp_aes_gcm_encrypt(struct aead_request *req) { return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_ENCRYPT); } static int ccp_aes_gcm_decrypt(struct aead_request *req) { return ccp_aes_gcm_crypt(req, CCP_AES_ACTION_DECRYPT); } static int ccp_aes_gcm_cra_init(struct crypto_aead *tfm) { struct ccp_ctx *ctx = crypto_aead_ctx_dma(tfm); ctx->complete = ccp_aes_gcm_complete; ctx->u.aes.key_len = 0; crypto_aead_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx)); return 0; } static void ccp_aes_gcm_cra_exit(struct crypto_tfm *tfm) { } static struct aead_alg ccp_aes_gcm_defaults = { .setkey = ccp_aes_gcm_setkey, .setauthsize = ccp_aes_gcm_setauthsize, .encrypt = ccp_aes_gcm_encrypt, .decrypt = ccp_aes_gcm_decrypt, .init = ccp_aes_gcm_cra_init, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, .base = { .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct ccp_ctx) + CRYPTO_DMA_PADDING, .cra_priority = CCP_CRA_PRIORITY, .cra_exit = ccp_aes_gcm_cra_exit, .cra_module = THIS_MODULE, }, }; struct ccp_aes_aead_def { enum ccp_aes_mode mode; unsigned int version; const char *name; const char *driver_name; unsigned int blocksize; unsigned int ivsize; struct aead_alg *alg_defaults; }; static struct ccp_aes_aead_def aes_aead_algs[] = { { .mode = CCP_AES_MODE_GHASH, .version = CCP_VERSION(5, 0), .name = "gcm(aes)", .driver_name = "gcm-aes-ccp", .blocksize = 1, .ivsize = AES_BLOCK_SIZE, .alg_defaults = &ccp_aes_gcm_defaults, }, }; static int ccp_register_aes_aead(struct list_head *head, const struct ccp_aes_aead_def *def) { struct ccp_crypto_aead *ccp_aead; struct aead_alg *alg; int ret; ccp_aead = kzalloc(sizeof(*ccp_aead), GFP_KERNEL); if (!ccp_aead) return -ENOMEM; INIT_LIST_HEAD(&ccp_aead->entry); ccp_aead->mode = def->mode; /* Copy the defaults and override as necessary */ alg = &ccp_aead->alg; *alg = *def->alg_defaults; snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->driver_name); alg->base.cra_blocksize = def->blocksize; ret = crypto_register_aead(alg); if (ret) { pr_err("%s aead algorithm registration error (%d)\n", alg->base.cra_name, ret); kfree(ccp_aead); return ret; } list_add(&ccp_aead->entry, head); return 0; } int ccp_register_aes_aeads(struct list_head *head) { int i, ret; unsigned int ccpversion = ccp_version(); for (i = 0; i < ARRAY_SIZE(aes_aead_algs); i++) { if (aes_aead_algs[i].version > ccpversion) continue; ret = ccp_register_aes_aead(head, &aes_aead_algs[i]); if (ret) return ret; } return 0; }
linux-master
drivers/crypto/ccp/ccp-crypto-aes-galois.c
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support * * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. * * Author: Gary R Hook <[email protected]> * Author: Tom Lendacky <[email protected]> */ #include <linux/module.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/scatterlist.h> #include <crypto/aes.h> #include <crypto/xts.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include "ccp-crypto.h" struct ccp_aes_xts_def { const char *name; const char *drv_name; }; static const struct ccp_aes_xts_def aes_xts_algs[] = { { .name = "xts(aes)", .drv_name = "xts-aes-ccp", }, }; struct ccp_unit_size_map { unsigned int size; u32 value; }; static struct ccp_unit_size_map xts_unit_sizes[] = { { .size = 16, .value = CCP_XTS_AES_UNIT_SIZE_16, }, { .size = 512, .value = CCP_XTS_AES_UNIT_SIZE_512, }, { .size = 1024, .value = CCP_XTS_AES_UNIT_SIZE_1024, }, { .size = 2048, .value = CCP_XTS_AES_UNIT_SIZE_2048, }, { .size = 4096, .value = CCP_XTS_AES_UNIT_SIZE_4096, }, }; static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret) { struct skcipher_request *req = skcipher_request_cast(async_req); struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); if (ret) return ret; memcpy(req->iv, rctx->iv, AES_BLOCK_SIZE); return 0; } static int ccp_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int key_len) { struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); unsigned int ccpversion = ccp_version(); int ret; ret = xts_verify_key(tfm, key, key_len); if (ret) return ret; /* Version 3 devices support 128-bit keys; version 5 devices can * accommodate 128- and 256-bit keys. */ switch (key_len) { case AES_KEYSIZE_128 * 2: memcpy(ctx->u.aes.key, key, key_len); break; case AES_KEYSIZE_256 * 2: if (ccpversion > CCP_VERSION(3, 0)) memcpy(ctx->u.aes.key, key, key_len); break; } ctx->u.aes.key_len = key_len / 2; sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len); } static int ccp_aes_xts_crypt(struct skcipher_request *req, unsigned int encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); struct ccp_aes_req_ctx *rctx = skcipher_request_ctx_dma(req); unsigned int ccpversion = ccp_version(); unsigned int fallback = 0; unsigned int unit; u32 unit_size; int ret; if (!ctx->u.aes.key_len) return -EINVAL; if (!req->iv) return -EINVAL; /* Check conditions under which the CCP can fulfill a request. The * device can handle input plaintext of a length that is a multiple * of the unit_size, bug the crypto implementation only supports * the unit_size being equal to the input length. This limits the * number of scenarios we can handle. */ unit_size = CCP_XTS_AES_UNIT_SIZE__LAST; for (unit = 0; unit < ARRAY_SIZE(xts_unit_sizes); unit++) { if (req->cryptlen == xts_unit_sizes[unit].size) { unit_size = unit; break; } } /* The CCP has restrictions on block sizes. Also, a version 3 device * only supports AES-128 operations; version 5 CCPs support both * AES-128 and -256 operations. */ if (unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) fallback = 1; if ((ccpversion < CCP_VERSION(5, 0)) && (ctx->u.aes.key_len != AES_KEYSIZE_128)) fallback = 1; if ((ctx->u.aes.key_len != AES_KEYSIZE_128) && (ctx->u.aes.key_len != AES_KEYSIZE_256)) fallback = 1; if (fallback) { /* Use the fallback to process the request for any * unsupported unit sizes or key sizes */ skcipher_request_set_tfm(&rctx->fallback_req, ctx->u.aes.tfm_skcipher); skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, req->base.complete, req->base.data); skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, req->cryptlen, req->iv); ret = encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : crypto_skcipher_decrypt(&rctx->fallback_req); return ret; } memcpy(rctx->iv, req->iv, AES_BLOCK_SIZE); sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE); memset(&rctx->cmd, 0, sizeof(rctx->cmd)); INIT_LIST_HEAD(&rctx->cmd.entry); rctx->cmd.engine = CCP_ENGINE_XTS_AES_128; rctx->cmd.u.xts.type = CCP_AES_TYPE_128; rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT : CCP_AES_ACTION_DECRYPT; rctx->cmd.u.xts.unit_size = unit_size; rctx->cmd.u.xts.key = &ctx->u.aes.key_sg; rctx->cmd.u.xts.key_len = ctx->u.aes.key_len; rctx->cmd.u.xts.iv = &rctx->iv_sg; rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE; rctx->cmd.u.xts.src = req->src; rctx->cmd.u.xts.src_len = req->cryptlen; rctx->cmd.u.xts.dst = req->dst; ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); return ret; } static int ccp_aes_xts_encrypt(struct skcipher_request *req) { return ccp_aes_xts_crypt(req, 1); } static int ccp_aes_xts_decrypt(struct skcipher_request *req) { return ccp_aes_xts_crypt(req, 0); } static int ccp_aes_xts_init_tfm(struct crypto_skcipher *tfm) { struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); struct crypto_skcipher *fallback_tfm; ctx->complete = ccp_aes_xts_complete; ctx->u.aes.key_len = 0; fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback_tfm)) { pr_warn("could not load fallback driver xts(aes)\n"); return PTR_ERR(fallback_tfm); } ctx->u.aes.tfm_skcipher = fallback_tfm; crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct ccp_aes_req_ctx) + crypto_skcipher_reqsize(fallback_tfm)); return 0; } static void ccp_aes_xts_exit_tfm(struct crypto_skcipher *tfm) { struct ccp_ctx *ctx = crypto_skcipher_ctx_dma(tfm); crypto_free_skcipher(ctx->u.aes.tfm_skcipher); } static int ccp_register_aes_xts_alg(struct list_head *head, const struct ccp_aes_xts_def *def) { struct ccp_crypto_skcipher_alg *ccp_alg; struct skcipher_alg *alg; int ret; ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); if (!ccp_alg) return -ENOMEM; INIT_LIST_HEAD(&ccp_alg->entry); alg = &ccp_alg->alg; snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", def->drv_name); alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK; alg->base.cra_blocksize = AES_BLOCK_SIZE; alg->base.cra_ctxsize = sizeof(struct ccp_ctx) + crypto_dma_padding(); alg->base.cra_priority = CCP_CRA_PRIORITY; alg->base.cra_module = THIS_MODULE; alg->setkey = ccp_aes_xts_setkey; alg->encrypt = ccp_aes_xts_encrypt; alg->decrypt = ccp_aes_xts_decrypt; alg->min_keysize = AES_MIN_KEY_SIZE * 2; alg->max_keysize = AES_MAX_KEY_SIZE * 2; alg->ivsize = AES_BLOCK_SIZE; alg->init = ccp_aes_xts_init_tfm; alg->exit = ccp_aes_xts_exit_tfm; ret = crypto_register_skcipher(alg); if (ret) { pr_err("%s skcipher algorithm registration error (%d)\n", alg->base.cra_name, ret); kfree(ccp_alg); return ret; } list_add(&ccp_alg->entry, head); return 0; } int ccp_register_aes_xts_algs(struct list_head *head) { int i, ret; for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) { ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]); if (ret) return ret; } return 0; }
linux-master
drivers/crypto/ccp/ccp-crypto-aes-xts.c
// SPDX-License-Identifier: GPL-2.0-only /* * AES routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015 International Business Machines Inc. * * Author: Marcelo Henrique Cerri <[email protected]> */ #include <linux/types.h> #include <linux/err.h> #include <linux/crypto.h> #include <linux/delay.h> #include <asm/simd.h> #include <asm/switch_to.h> #include <crypto/aes.h> #include <crypto/internal/cipher.h> #include <crypto/internal/simd.h> #include "aesp8-ppc.h" struct p8_aes_ctx { struct crypto_cipher *fallback; struct aes_key enc_key; struct aes_key dec_key; }; static int p8_aes_init(struct crypto_tfm *tfm) { const char *alg = crypto_tfm_alg_name(tfm); struct crypto_cipher *fallback; struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); fallback = crypto_alloc_cipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback)) { printk(KERN_ERR "Failed to allocate transformation for '%s': %ld\n", alg, PTR_ERR(fallback)); return PTR_ERR(fallback); } crypto_cipher_set_flags(fallback, crypto_cipher_get_flags((struct crypto_cipher *) tfm)); ctx->fallback = fallback; return 0; } static void p8_aes_exit(struct crypto_tfm *tfm) { struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->fallback) { crypto_free_cipher(ctx->fallback); ctx->fallback = NULL; } } static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen) { int ret; struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); preempt_disable(); pagefault_disable(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); ret |= crypto_cipher_setkey(ctx->fallback, key, keylen); return ret ? -EINVAL : 0; } static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); if (!crypto_simd_usable()) { crypto_cipher_encrypt_one(ctx->fallback, dst, src); } else { preempt_disable(); pagefault_disable(); enable_kernel_vsx(); aes_p8_encrypt(src, dst, &ctx->enc_key); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); } } static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); if (!crypto_simd_usable()) { crypto_cipher_decrypt_one(ctx->fallback, dst, src); } else { preempt_disable(); pagefault_disable(); enable_kernel_vsx(); aes_p8_decrypt(src, dst, &ctx->dec_key); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); } } struct crypto_alg p8_aes_alg = { .cra_name = "aes", .cra_driver_name = "p8_aes", .cra_module = THIS_MODULE, .cra_priority = 1000, .cra_type = NULL, .cra_flags = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_NEED_FALLBACK, .cra_alignmask = 0, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct p8_aes_ctx), .cra_init = p8_aes_init, .cra_exit = p8_aes_exit, .cra_cipher = { .cia_min_keysize = AES_MIN_KEY_SIZE, .cia_max_keysize = AES_MAX_KEY_SIZE, .cia_setkey = p8_aes_setkey, .cia_encrypt = p8_aes_encrypt, .cia_decrypt = p8_aes_decrypt, }, };
linux-master
drivers/crypto/vmx/aes.c
// SPDX-License-Identifier: GPL-2.0-only /* * AES CBC routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015 International Business Machines Inc. * * Author: Marcelo Henrique Cerri <[email protected]> */ #include <asm/simd.h> #include <asm/switch_to.h> #include <crypto/aes.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include "aesp8-ppc.h" struct p8_aes_cbc_ctx { struct crypto_skcipher *fallback; struct aes_key enc_key; struct aes_key dec_key; }; static int p8_aes_cbc_init(struct crypto_skcipher *tfm) { struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *fallback; fallback = crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); if (IS_ERR(fallback)) { pr_err("Failed to allocate cbc(aes) fallback: %ld\n", PTR_ERR(fallback)); return PTR_ERR(fallback); } crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + crypto_skcipher_reqsize(fallback)); ctx->fallback = fallback; return 0; } static void p8_aes_cbc_exit(struct crypto_skcipher *tfm) { struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_skcipher(ctx->fallback); } static int p8_aes_cbc_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; preempt_disable(); pagefault_disable(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); ret |= aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen); return ret ? -EINVAL : 0; } static int p8_aes_cbc_crypt(struct skcipher_request *req, int enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct p8_aes_cbc_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int ret; if (!crypto_simd_usable()) { struct skcipher_request *subreq = skcipher_request_ctx(req); *subreq = *req; skcipher_request_set_tfm(subreq, ctx->fallback); return enc ? crypto_skcipher_encrypt(subreq) : crypto_skcipher_decrypt(subreq); } ret = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes) != 0) { preempt_disable(); pagefault_disable(); enable_kernel_vsx(); aes_p8_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, AES_BLOCK_SIZE), enc ? &ctx->enc_key : &ctx->dec_key, walk.iv, enc); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } return ret; } static int p8_aes_cbc_encrypt(struct skcipher_request *req) { return p8_aes_cbc_crypt(req, 1); } static int p8_aes_cbc_decrypt(struct skcipher_request *req) { return p8_aes_cbc_crypt(req, 0); } struct skcipher_alg p8_aes_cbc_alg = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "p8_aes_cbc", .base.cra_module = THIS_MODULE, .base.cra_priority = 2000, .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct p8_aes_cbc_ctx), .setkey = p8_aes_cbc_setkey, .encrypt = p8_aes_cbc_encrypt, .decrypt = p8_aes_cbc_decrypt, .init = p8_aes_cbc_init, .exit = p8_aes_cbc_exit, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, };
linux-master
drivers/crypto/vmx/aes_cbc.c
// SPDX-License-Identifier: GPL-2.0-only /* * AES XTS routines supporting VMX In-core instructions on Power 8 * * Copyright (C) 2015 International Business Machines Inc. * * Author: Leonidas S. Barbosa <[email protected]> */ #include <asm/simd.h> #include <asm/switch_to.h> #include <crypto/aes.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include <crypto/xts.h> #include "aesp8-ppc.h" struct p8_aes_xts_ctx { struct crypto_skcipher *fallback; struct aes_key enc_key; struct aes_key dec_key; struct aes_key tweak_key; }; static int p8_aes_xts_init(struct crypto_skcipher *tfm) { struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *fallback; fallback = crypto_alloc_skcipher("xts(aes)", 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); if (IS_ERR(fallback)) { pr_err("Failed to allocate xts(aes) fallback: %ld\n", PTR_ERR(fallback)); return PTR_ERR(fallback); } crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + crypto_skcipher_reqsize(fallback)); ctx->fallback = fallback; return 0; } static void p8_aes_xts_exit(struct crypto_skcipher *tfm) { struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_skcipher(ctx->fallback); } static int p8_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; ret = xts_verify_key(tfm, key, keylen); if (ret) return ret; preempt_disable(); pagefault_disable(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key + keylen/2, (keylen/2) * 8, &ctx->tweak_key); ret |= aes_p8_set_encrypt_key(key, (keylen/2) * 8, &ctx->enc_key); ret |= aes_p8_set_decrypt_key(key, (keylen/2) * 8, &ctx->dec_key); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen); return ret ? -EINVAL : 0; } static int p8_aes_xts_crypt(struct skcipher_request *req, int enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct p8_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; u8 tweak[AES_BLOCK_SIZE]; int ret; if (req->cryptlen < AES_BLOCK_SIZE) return -EINVAL; if (!crypto_simd_usable() || (req->cryptlen % XTS_BLOCK_SIZE) != 0) { struct skcipher_request *subreq = skcipher_request_ctx(req); *subreq = *req; skcipher_request_set_tfm(subreq, ctx->fallback); return enc ? crypto_skcipher_encrypt(subreq) : crypto_skcipher_decrypt(subreq); } ret = skcipher_walk_virt(&walk, req, false); if (ret) return ret; preempt_disable(); pagefault_disable(); enable_kernel_vsx(); aes_p8_encrypt(walk.iv, tweak, &ctx->tweak_key); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); while ((nbytes = walk.nbytes) != 0) { preempt_disable(); pagefault_disable(); enable_kernel_vsx(); if (enc) aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, AES_BLOCK_SIZE), &ctx->enc_key, NULL, tweak); else aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, round_down(nbytes, AES_BLOCK_SIZE), &ctx->dec_key, NULL, tweak); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); ret = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); } return ret; } static int p8_aes_xts_encrypt(struct skcipher_request *req) { return p8_aes_xts_crypt(req, 1); } static int p8_aes_xts_decrypt(struct skcipher_request *req) { return p8_aes_xts_crypt(req, 0); } struct skcipher_alg p8_aes_xts_alg = { .base.cra_name = "xts(aes)", .base.cra_driver_name = "p8_aes_xts", .base.cra_module = THIS_MODULE, .base.cra_priority = 2000, .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct p8_aes_xts_ctx), .setkey = p8_aes_xts_setkey, .encrypt = p8_aes_xts_encrypt, .decrypt = p8_aes_xts_decrypt, .init = p8_aes_xts_init, .exit = p8_aes_xts_exit, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, };
linux-master
drivers/crypto/vmx/aes_xts.c
// SPDX-License-Identifier: GPL-2.0-only /* * Routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015 International Business Machines Inc. * * Author: Marcelo Henrique Cerri <[email protected]> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/err.h> #include <linux/cpufeature.h> #include <linux/crypto.h> #include <asm/cputable.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include "aesp8-ppc.h" static int __init p8_init(void) { int ret; ret = crypto_register_shash(&p8_ghash_alg); if (ret) goto err; ret = crypto_register_alg(&p8_aes_alg); if (ret) goto err_unregister_ghash; ret = crypto_register_skcipher(&p8_aes_cbc_alg); if (ret) goto err_unregister_aes; ret = crypto_register_skcipher(&p8_aes_ctr_alg); if (ret) goto err_unregister_aes_cbc; ret = crypto_register_skcipher(&p8_aes_xts_alg); if (ret) goto err_unregister_aes_ctr; return 0; err_unregister_aes_ctr: crypto_unregister_skcipher(&p8_aes_ctr_alg); err_unregister_aes_cbc: crypto_unregister_skcipher(&p8_aes_cbc_alg); err_unregister_aes: crypto_unregister_alg(&p8_aes_alg); err_unregister_ghash: crypto_unregister_shash(&p8_ghash_alg); err: return ret; } static void __exit p8_exit(void) { crypto_unregister_skcipher(&p8_aes_xts_alg); crypto_unregister_skcipher(&p8_aes_ctr_alg); crypto_unregister_skcipher(&p8_aes_cbc_alg); crypto_unregister_alg(&p8_aes_alg); crypto_unregister_shash(&p8_ghash_alg); } module_cpu_feature_match(PPC_MODULE_FEATURE_VEC_CRYPTO, p8_init); module_exit(p8_exit); MODULE_AUTHOR("Marcelo Cerri<[email protected]>"); MODULE_DESCRIPTION("IBM VMX cryptographic acceleration instructions " "support on Power 8"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); MODULE_IMPORT_NS(CRYPTO_INTERNAL);
linux-master
drivers/crypto/vmx/vmx.c
// SPDX-License-Identifier: GPL-2.0-only /* * AES CTR routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015 International Business Machines Inc. * * Author: Marcelo Henrique Cerri <[email protected]> */ #include <asm/simd.h> #include <asm/switch_to.h> #include <crypto/aes.h> #include <crypto/internal/simd.h> #include <crypto/internal/skcipher.h> #include "aesp8-ppc.h" struct p8_aes_ctr_ctx { struct crypto_skcipher *fallback; struct aes_key enc_key; }; static int p8_aes_ctr_init(struct crypto_skcipher *tfm) { struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); struct crypto_skcipher *fallback; fallback = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); if (IS_ERR(fallback)) { pr_err("Failed to allocate ctr(aes) fallback: %ld\n", PTR_ERR(fallback)); return PTR_ERR(fallback); } crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + crypto_skcipher_reqsize(fallback)); ctx->fallback = fallback; return 0; } static void p8_aes_ctr_exit(struct crypto_skcipher *tfm) { struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); crypto_free_skcipher(ctx->fallback); } static int p8_aes_ctr_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; preempt_disable(); pagefault_disable(); enable_kernel_vsx(); ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); ret |= crypto_skcipher_setkey(ctx->fallback, key, keylen); return ret ? -EINVAL : 0; } static void p8_aes_ctr_final(const struct p8_aes_ctr_ctx *ctx, struct skcipher_walk *walk) { u8 *ctrblk = walk->iv; u8 keystream[AES_BLOCK_SIZE]; u8 *src = walk->src.virt.addr; u8 *dst = walk->dst.virt.addr; unsigned int nbytes = walk->nbytes; preempt_disable(); pagefault_disable(); enable_kernel_vsx(); aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); crypto_xor_cpy(dst, keystream, src, nbytes); crypto_inc(ctrblk, AES_BLOCK_SIZE); } static int p8_aes_ctr_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); const struct p8_aes_ctr_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_walk walk; unsigned int nbytes; int ret; if (!crypto_simd_usable()) { struct skcipher_request *subreq = skcipher_request_ctx(req); *subreq = *req; skcipher_request_set_tfm(subreq, ctx->fallback); return crypto_skcipher_encrypt(subreq); } ret = skcipher_walk_virt(&walk, req, false); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { preempt_disable(); pagefault_disable(); enable_kernel_vsx(); aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, nbytes / AES_BLOCK_SIZE, &ctx->enc_key, walk.iv); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); do { crypto_inc(walk.iv, AES_BLOCK_SIZE); } while ((nbytes -= AES_BLOCK_SIZE) >= AES_BLOCK_SIZE); ret = skcipher_walk_done(&walk, nbytes); } if (nbytes) { p8_aes_ctr_final(ctx, &walk); ret = skcipher_walk_done(&walk, 0); } return ret; } struct skcipher_alg p8_aes_ctr_alg = { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "p8_aes_ctr", .base.cra_module = THIS_MODULE, .base.cra_priority = 2000, .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct p8_aes_ctr_ctx), .setkey = p8_aes_ctr_setkey, .encrypt = p8_aes_ctr_crypt, .decrypt = p8_aes_ctr_crypt, .init = p8_aes_ctr_init, .exit = p8_aes_ctr_exit, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .chunksize = AES_BLOCK_SIZE, };
linux-master
drivers/crypto/vmx/aes_ctr.c
// SPDX-License-Identifier: GPL-2.0 /* * GHASH routines supporting VMX instructions on the Power 8 * * Copyright (C) 2015, 2019 International Business Machines Inc. * * Author: Marcelo Henrique Cerri <[email protected]> * * Extended by Daniel Axtens <[email protected]> to replace the fallback * mechanism. The new approach is based on arm64 code, which is: * Copyright (C) 2014 - 2018 Linaro Ltd. <[email protected]> */ #include <linux/types.h> #include <linux/err.h> #include <linux/crypto.h> #include <linux/delay.h> #include <asm/simd.h> #include <asm/switch_to.h> #include <crypto/aes.h> #include <crypto/ghash.h> #include <crypto/scatterwalk.h> #include <crypto/internal/hash.h> #include <crypto/internal/simd.h> #include <crypto/b128ops.h> #include "aesp8-ppc.h" void gcm_init_p8(u128 htable[16], const u64 Xi[2]); void gcm_gmult_p8(u64 Xi[2], const u128 htable[16]); void gcm_ghash_p8(u64 Xi[2], const u128 htable[16], const u8 *in, size_t len); struct p8_ghash_ctx { /* key used by vector asm */ u128 htable[16]; /* key used by software fallback */ be128 key; }; struct p8_ghash_desc_ctx { u64 shash[2]; u8 buffer[GHASH_DIGEST_SIZE]; int bytes; }; static int p8_ghash_init(struct shash_desc *desc) { struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); dctx->bytes = 0; memset(dctx->shash, 0, GHASH_DIGEST_SIZE); return 0; } static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); if (keylen != GHASH_BLOCK_SIZE) return -EINVAL; preempt_disable(); pagefault_disable(); enable_kernel_vsx(); gcm_init_p8(ctx->htable, (const u64 *) key); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); memcpy(&ctx->key, key, GHASH_BLOCK_SIZE); return 0; } static inline void __ghash_block(struct p8_ghash_ctx *ctx, struct p8_ghash_desc_ctx *dctx) { if (crypto_simd_usable()) { preempt_disable(); pagefault_disable(); enable_kernel_vsx(); gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, GHASH_DIGEST_SIZE); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); } else { crypto_xor((u8 *)dctx->shash, dctx->buffer, GHASH_BLOCK_SIZE); gf128mul_lle((be128 *)dctx->shash, &ctx->key); } } static inline void __ghash_blocks(struct p8_ghash_ctx *ctx, struct p8_ghash_desc_ctx *dctx, const u8 *src, unsigned int srclen) { if (crypto_simd_usable()) { preempt_disable(); pagefault_disable(); enable_kernel_vsx(); gcm_ghash_p8(dctx->shash, ctx->htable, src, srclen); disable_kernel_vsx(); pagefault_enable(); preempt_enable(); } else { while (srclen >= GHASH_BLOCK_SIZE) { crypto_xor((u8 *)dctx->shash, src, GHASH_BLOCK_SIZE); gf128mul_lle((be128 *)dctx->shash, &ctx->key); srclen -= GHASH_BLOCK_SIZE; src += GHASH_BLOCK_SIZE; } } } static int p8_ghash_update(struct shash_desc *desc, const u8 *src, unsigned int srclen) { unsigned int len; struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); if (dctx->bytes) { if (dctx->bytes + srclen < GHASH_DIGEST_SIZE) { memcpy(dctx->buffer + dctx->bytes, src, srclen); dctx->bytes += srclen; return 0; } memcpy(dctx->buffer + dctx->bytes, src, GHASH_DIGEST_SIZE - dctx->bytes); __ghash_block(ctx, dctx); src += GHASH_DIGEST_SIZE - dctx->bytes; srclen -= GHASH_DIGEST_SIZE - dctx->bytes; dctx->bytes = 0; } len = srclen & ~(GHASH_DIGEST_SIZE - 1); if (len) { __ghash_blocks(ctx, dctx, src, len); src += len; srclen -= len; } if (srclen) { memcpy(dctx->buffer, src, srclen); dctx->bytes = srclen; } return 0; } static int p8_ghash_final(struct shash_desc *desc, u8 *out) { int i; struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); struct p8_ghash_desc_ctx *dctx = shash_desc_ctx(desc); if (dctx->bytes) { for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++) dctx->buffer[i] = 0; __ghash_block(ctx, dctx); dctx->bytes = 0; } memcpy(out, dctx->shash, GHASH_DIGEST_SIZE); return 0; } struct shash_alg p8_ghash_alg = { .digestsize = GHASH_DIGEST_SIZE, .init = p8_ghash_init, .update = p8_ghash_update, .final = p8_ghash_final, .setkey = p8_ghash_setkey, .descsize = sizeof(struct p8_ghash_desc_ctx) + sizeof(struct ghash_desc_ctx), .base = { .cra_name = "ghash", .cra_driver_name = "p8_ghash", .cra_priority = 1000, .cra_blocksize = GHASH_BLOCK_SIZE, .cra_ctxsize = sizeof(struct p8_ghash_ctx), .cra_module = THIS_MODULE, }, };
linux-master
drivers/crypto/vmx/ghash.c
/* * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. * * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written and Maintained by: * Manoj Malviya ([email protected]) * Atul Gupta ([email protected]) * Jitendra Lulla ([email protected]) * Yeshaswi M R Gowda ([email protected]) * Harsh Jain ([email protected]) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/skbuff.h> #include <crypto/aes.h> #include <crypto/hash.h> #include "t4_msg.h" #include "chcr_core.h" #include "cxgb4_uld.h" static struct chcr_driver_data drv_data; typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input); static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input); static void *chcr_uld_add(const struct cxgb4_lld_info *lld); static int chcr_uld_state_change(void *handle, enum cxgb4_state state); static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { [CPL_FW6_PLD] = cpl_fw6_pld_handler, }; static struct cxgb4_uld_info chcr_uld_info = { .name = DRV_MODULE_NAME, .nrxq = MAX_ULD_QSETS, /* Max ntxq will be derived from fw config file*/ .rxq_size = 1024, .add = chcr_uld_add, .state_change = chcr_uld_state_change, .rx_handler = chcr_uld_rx_handler, }; static void detach_work_fn(struct work_struct *work) { struct chcr_dev *dev; dev = container_of(work, struct chcr_dev, detach_work.work); if (atomic_read(&dev->inflight)) { dev->wqretry--; if (dev->wqretry) { pr_debug("Request Inflight Count %d\n", atomic_read(&dev->inflight)); schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); } else { WARN(1, "CHCR:%d request Still Pending\n", atomic_read(&dev->inflight)); complete(&dev->detach_comp); } } else { complete(&dev->detach_comp); } } struct uld_ctx *assign_chcr_device(void) { struct uld_ctx *u_ctx = NULL; /* * When multiple devices are present in system select * device in round-robin fashion for crypto operations * Although One session must use the same device to * maintain request-response ordering. */ mutex_lock(&drv_data.drv_mutex); if (!list_empty(&drv_data.act_dev)) { u_ctx = drv_data.last_dev; if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) drv_data.last_dev = list_first_entry(&drv_data.act_dev, struct uld_ctx, entry); else drv_data.last_dev = list_next_entry(drv_data.last_dev, entry); } mutex_unlock(&drv_data.drv_mutex); return u_ctx; } static void chcr_dev_add(struct uld_ctx *u_ctx) { struct chcr_dev *dev; dev = &u_ctx->dev; dev->state = CHCR_ATTACH; atomic_set(&dev->inflight, 0); mutex_lock(&drv_data.drv_mutex); list_move(&u_ctx->entry, &drv_data.act_dev); if (!drv_data.last_dev) drv_data.last_dev = u_ctx; mutex_unlock(&drv_data.drv_mutex); } static void chcr_dev_init(struct uld_ctx *u_ctx) { struct chcr_dev *dev; dev = &u_ctx->dev; spin_lock_init(&dev->lock_chcr_dev); INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn); init_completion(&dev->detach_comp); dev->state = CHCR_INIT; dev->wqretry = WQ_RETRY; atomic_inc(&drv_data.dev_count); atomic_set(&dev->inflight, 0); mutex_lock(&drv_data.drv_mutex); list_add_tail(&u_ctx->entry, &drv_data.inact_dev); mutex_unlock(&drv_data.drv_mutex); } static int chcr_dev_move(struct uld_ctx *u_ctx) { mutex_lock(&drv_data.drv_mutex); if (drv_data.last_dev == u_ctx) { if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev)) drv_data.last_dev = list_first_entry(&drv_data.act_dev, struct uld_ctx, entry); else drv_data.last_dev = list_next_entry(drv_data.last_dev, entry); } list_move(&u_ctx->entry, &drv_data.inact_dev); if (list_empty(&drv_data.act_dev)) drv_data.last_dev = NULL; atomic_dec(&drv_data.dev_count); mutex_unlock(&drv_data.drv_mutex); return 0; } static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input) { struct crypto_async_request *req; struct cpl_fw6_pld *fw6_pld; u32 ack_err_status = 0; int error_status = 0; fw6_pld = (struct cpl_fw6_pld *)input; req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( fw6_pld->data[1]); ack_err_status = ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status)) error_status = -EBADMSG; /* call completion callback with failure status */ if (req) { error_status = chcr_handle_resp(req, input, error_status); } else { pr_err("Incorrect request address from the firmware\n"); return -EFAULT; } if (error_status) atomic_inc(&adap->chcr_stats.error); return 0; } int chcr_send_wr(struct sk_buff *skb) { return cxgb4_crypto_send(skb->dev, skb); } static void *chcr_uld_add(const struct cxgb4_lld_info *lld) { struct uld_ctx *u_ctx; /* Create the device and add it in the device list */ pr_info_once("%s\n", DRV_DESC); if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE)) return ERR_PTR(-EOPNOTSUPP); /* Create the device and add it in the device list */ u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); if (!u_ctx) { u_ctx = ERR_PTR(-ENOMEM); goto out; } u_ctx->lldi = *lld; chcr_dev_init(u_ctx); out: return u_ctx; } int chcr_uld_rx_handler(void *handle, const __be64 *rsp, const struct pkt_gl *pgl) { struct uld_ctx *u_ctx = (struct uld_ctx *)handle; struct chcr_dev *dev = &u_ctx->dev; struct adapter *adap = padap(dev); const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp; if (!work_handlers[rpl->opcode]) { pr_err("Unsupported opcode %d received\n", rpl->opcode); return 0; } if (!pgl) work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]); else work_handlers[rpl->opcode](adap, pgl->va); return 0; } static void chcr_detach_device(struct uld_ctx *u_ctx) { struct chcr_dev *dev = &u_ctx->dev; if (dev->state == CHCR_DETACH) { pr_debug("Detached Event received for already detach device\n"); return; } dev->state = CHCR_DETACH; if (atomic_read(&dev->inflight) != 0) { schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM); wait_for_completion(&dev->detach_comp); } // Move u_ctx to inactive_dev list chcr_dev_move(u_ctx); } static int chcr_uld_state_change(void *handle, enum cxgb4_state state) { struct uld_ctx *u_ctx = handle; int ret = 0; switch (state) { case CXGB4_STATE_UP: if (u_ctx->dev.state != CHCR_INIT) { // ALready Initialised. return 0; } chcr_dev_add(u_ctx); ret = start_crypto(); break; case CXGB4_STATE_DETACH: chcr_detach_device(u_ctx); if (!atomic_read(&drv_data.dev_count)) stop_crypto(); break; case CXGB4_STATE_START_RECOVERY: case CXGB4_STATE_DOWN: default: break; } return ret; } static int __init chcr_crypto_init(void) { INIT_LIST_HEAD(&drv_data.act_dev); INIT_LIST_HEAD(&drv_data.inact_dev); atomic_set(&drv_data.dev_count, 0); mutex_init(&drv_data.drv_mutex); drv_data.last_dev = NULL; cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info); return 0; } static void __exit chcr_crypto_exit(void) { struct uld_ctx *u_ctx, *tmp; struct adapter *adap; stop_crypto(); cxgb4_unregister_uld(CXGB4_ULD_CRYPTO); /* Remove all devices from list */ mutex_lock(&drv_data.drv_mutex); list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) { adap = padap(&u_ctx->dev); memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); list_del(&u_ctx->entry); kfree(u_ctx); } list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) { adap = padap(&u_ctx->dev); memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats)); list_del(&u_ctx->entry); kfree(u_ctx); } mutex_unlock(&drv_data.drv_mutex); } module_init(chcr_crypto_init); module_exit(chcr_crypto_exit); MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Chelsio Communications");
linux-master
drivers/crypto/chelsio/chcr_core.c
/* * This file is part of the Chelsio T6 Crypto driver for Linux. * * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Written and Maintained by: * Manoj Malviya ([email protected]) * Atul Gupta ([email protected]) * Jitendra Lulla ([email protected]) * Yeshaswi M R Gowda ([email protected]) * Harsh Jain ([email protected]) */ #define pr_fmt(fmt) "chcr:" fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/crypto.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/highmem.h> #include <linux/scatterlist.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/gcm.h> #include <crypto/sha1.h> #include <crypto/sha2.h> #include <crypto/authenc.h> #include <crypto/ctr.h> #include <crypto/gf128mul.h> #include <crypto/internal/aead.h> #include <crypto/null.h> #include <crypto/internal/skcipher.h> #include <crypto/aead.h> #include <crypto/scatterwalk.h> #include <crypto/internal/hash.h> #include "t4fw_api.h" #include "t4_msg.h" #include "chcr_core.h" #include "chcr_algo.h" #include "chcr_crypto.h" #define IV AES_BLOCK_SIZE static unsigned int sgl_ent_len[] = { 0, 0, 16, 24, 40, 48, 64, 72, 88, 96, 112, 120, 136, 144, 160, 168, 184, 192, 208, 216, 232, 240, 256, 264, 280, 288, 304, 312, 328, 336, 352, 360, 376 }; static unsigned int dsgl_ent_len[] = { 0, 32, 32, 48, 48, 64, 64, 80, 80, 112, 112, 128, 128, 144, 144, 160, 160, 192, 192, 208, 208, 224, 224, 240, 240, 272, 272, 288, 288, 304, 304, 320, 320 }; static u32 round_constant[11] = { 0x01000000, 0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000, 0x40000000, 0x80000000, 0x1B000000, 0x36000000, 0x6C000000 }; static int chcr_handle_cipher_resp(struct skcipher_request *req, unsigned char *input, int err); static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx) { return &ctx->crypto_ctx->aeadctx; } static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx) { return &ctx->crypto_ctx->ablkctx; } static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx) { return &ctx->crypto_ctx->hmacctx; } static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx) { return gctx->ctx->gcm; } static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx) { return gctx->ctx->authenc; } static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) { return container_of(ctx->dev, struct uld_ctx, dev); } static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx) { memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr)); } static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen, unsigned int entlen, unsigned int skip) { int nents = 0; unsigned int less; unsigned int skip_len = 0; while (sg && skip) { if (sg_dma_len(sg) <= skip) { skip -= sg_dma_len(sg); skip_len = 0; sg = sg_next(sg); } else { skip_len = skip; skip = 0; } } while (sg && reqlen) { less = min(reqlen, sg_dma_len(sg) - skip_len); nents += DIV_ROUND_UP(less, entlen); reqlen -= less; skip_len = 0; sg = sg_next(sg); } return nents; } static inline int get_aead_subtype(struct crypto_aead *aead) { struct aead_alg *alg = crypto_aead_alg(aead); struct chcr_alg_template *chcr_crypto_alg = container_of(alg, struct chcr_alg_template, alg.aead); return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; } void chcr_verify_tag(struct aead_request *req, u8 *input, int *err) { u8 temp[SHA512_DIGEST_SIZE]; struct crypto_aead *tfm = crypto_aead_reqtfm(req); int authsize = crypto_aead_authsize(tfm); struct cpl_fw6_pld *fw6_pld; int cmp = 0; fw6_pld = (struct cpl_fw6_pld *)input; if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) || (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) { cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize); } else { sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp, authsize, req->assoclen + req->cryptlen - authsize); cmp = crypto_memneq(temp, (fw6_pld + 1), authsize); } if (cmp) *err = -EBADMSG; else *err = 0; } static int chcr_inc_wrcount(struct chcr_dev *dev) { if (dev->state == CHCR_DETACH) return 1; atomic_inc(&dev->inflight); return 0; } static inline void chcr_dec_wrcount(struct chcr_dev *dev) { atomic_dec(&dev->inflight); } static inline int chcr_handle_aead_resp(struct aead_request *req, unsigned char *input, int err) { struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_dev *dev = a_ctx(tfm)->dev; chcr_aead_common_exit(req); if (reqctx->verify == VERIFY_SW) { chcr_verify_tag(req, input, &err); reqctx->verify = VERIFY_HW; } chcr_dec_wrcount(dev); aead_request_complete(req, err); return err; } static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned char *key, unsigned int keylength) { u32 temp; u32 w_ring[MAX_NK]; int i, j, k; u8 nr, nk; switch (keylength) { case AES_KEYLENGTH_128BIT: nk = KEYLENGTH_4BYTES; nr = NUMBER_OF_ROUNDS_10; break; case AES_KEYLENGTH_192BIT: nk = KEYLENGTH_6BYTES; nr = NUMBER_OF_ROUNDS_12; break; case AES_KEYLENGTH_256BIT: nk = KEYLENGTH_8BYTES; nr = NUMBER_OF_ROUNDS_14; break; default: return; } for (i = 0; i < nk; i++) w_ring[i] = get_unaligned_be32(&key[i * 4]); i = 0; temp = w_ring[nk - 1]; while (i + nk < (nr + 1) * 4) { if (!(i % nk)) { /* RotWord(temp) */ temp = (temp << 8) | (temp >> 24); temp = aes_ks_subword(temp); temp ^= round_constant[i / nk]; } else if (nk == 8 && (i % 4 == 0)) { temp = aes_ks_subword(temp); } w_ring[i % nk] ^= temp; temp = w_ring[i % nk]; i++; } i--; for (k = 0, j = i % nk; k < nk; k++) { put_unaligned_be32(w_ring[j], &dec_key[k * 4]); j--; if (j < 0) j += nk; } } static struct crypto_shash *chcr_alloc_shash(unsigned int ds) { struct crypto_shash *base_hash = ERR_PTR(-EINVAL); switch (ds) { case SHA1_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha1", 0, 0); break; case SHA224_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha224", 0, 0); break; case SHA256_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha256", 0, 0); break; case SHA384_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha384", 0, 0); break; case SHA512_DIGEST_SIZE: base_hash = crypto_alloc_shash("sha512", 0, 0); break; } return base_hash; } static int chcr_compute_partial_hash(struct shash_desc *desc, char *iopad, char *result_hash, int digest_size) { struct sha1_state sha1_st; struct sha256_state sha256_st; struct sha512_state sha512_st; int error; if (digest_size == SHA1_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?: crypto_shash_export(desc, (void *)&sha1_st); memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE); } else if (digest_size == SHA224_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: crypto_shash_export(desc, (void *)&sha256_st); memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); } else if (digest_size == SHA256_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: crypto_shash_export(desc, (void *)&sha256_st); memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); } else if (digest_size == SHA384_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: crypto_shash_export(desc, (void *)&sha512_st); memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); } else if (digest_size == SHA512_DIGEST_SIZE) { error = crypto_shash_init(desc) ?: crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: crypto_shash_export(desc, (void *)&sha512_st); memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); } else { error = -EINVAL; pr_err("Unknown digest size %d\n", digest_size); } return error; } static void chcr_change_order(char *buf, int ds) { int i; if (ds == SHA512_DIGEST_SIZE) { for (i = 0; i < (ds / sizeof(u64)); i++) *((__be64 *)buf + i) = cpu_to_be64(*((u64 *)buf + i)); } else { for (i = 0; i < (ds / sizeof(u32)); i++) *((__be32 *)buf + i) = cpu_to_be32(*((u32 *)buf + i)); } } static inline int is_hmac(struct crypto_tfm *tfm) { struct crypto_alg *alg = tfm->__crt_alg; struct chcr_alg_template *chcr_crypto_alg = container_of(__crypto_ahash_alg(alg), struct chcr_alg_template, alg.hash); if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC) return 1; return 0; } static inline void dsgl_walk_init(struct dsgl_walk *walk, struct cpl_rx_phys_dsgl *dsgl) { walk->dsgl = dsgl; walk->nents = 0; walk->to = (struct phys_sge_pairs *)(dsgl + 1); } static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid, int pci_chan_id) { struct cpl_rx_phys_dsgl *phys_cpl; phys_cpl = walk->dsgl; phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); phys_cpl->pcirlxorder_to_noofsgentr = htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) | CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) | CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) | CPL_RX_PHYS_DSGL_PCITPHNT_V(0) | CPL_RX_PHYS_DSGL_DCAID_V(0) | CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents)); phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; phys_cpl->rss_hdr_int.qid = htons(qid); phys_cpl->rss_hdr_int.hash_val = 0; phys_cpl->rss_hdr_int.channel = pci_chan_id; } static inline void dsgl_walk_add_page(struct dsgl_walk *walk, size_t size, dma_addr_t addr) { int j; if (!size) return; j = walk->nents; walk->to->len[j % 8] = htons(size); walk->to->addr[j % 8] = cpu_to_be64(addr); j++; if ((j % 8) == 0) walk->to++; walk->nents = j; } static void dsgl_walk_add_sg(struct dsgl_walk *walk, struct scatterlist *sg, unsigned int slen, unsigned int skip) { int skip_len = 0; unsigned int left_size = slen, len = 0; unsigned int j = walk->nents; int offset, ent_len; if (!slen) return; while (sg && skip) { if (sg_dma_len(sg) <= skip) { skip -= sg_dma_len(sg); skip_len = 0; sg = sg_next(sg); } else { skip_len = skip; skip = 0; } } while (left_size && sg) { len = min_t(u32, left_size, sg_dma_len(sg) - skip_len); offset = 0; while (len) { ent_len = min_t(u32, len, CHCR_DST_SG_SIZE); walk->to->len[j % 8] = htons(ent_len); walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) + offset + skip_len); offset += ent_len; len -= ent_len; j++; if ((j % 8) == 0) walk->to++; } walk->last_sg = sg; walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) - skip_len) + skip_len; left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len); skip_len = 0; sg = sg_next(sg); } walk->nents = j; } static inline void ulptx_walk_init(struct ulptx_walk *walk, struct ulptx_sgl *ulp) { walk->sgl = ulp; walk->nents = 0; walk->pair_idx = 0; walk->pair = ulp->sge; walk->last_sg = NULL; walk->last_sg_len = 0; } static inline void ulptx_walk_end(struct ulptx_walk *walk) { walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | ULPTX_NSGE_V(walk->nents)); } static inline void ulptx_walk_add_page(struct ulptx_walk *walk, size_t size, dma_addr_t addr) { if (!size) return; if (walk->nents == 0) { walk->sgl->len0 = cpu_to_be32(size); walk->sgl->addr0 = cpu_to_be64(addr); } else { walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr); walk->pair->len[walk->pair_idx] = cpu_to_be32(size); walk->pair_idx = !walk->pair_idx; if (!walk->pair_idx) walk->pair++; } walk->nents++; } static void ulptx_walk_add_sg(struct ulptx_walk *walk, struct scatterlist *sg, unsigned int len, unsigned int skip) { int small; int skip_len = 0; unsigned int sgmin; if (!len) return; while (sg && skip) { if (sg_dma_len(sg) <= skip) { skip -= sg_dma_len(sg); skip_len = 0; sg = sg_next(sg); } else { skip_len = skip; skip = 0; } } WARN(!sg, "SG should not be null here\n"); if (sg && (walk->nents == 0)) { small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len); sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); walk->sgl->len0 = cpu_to_be32(sgmin); walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len); walk->nents++; len -= sgmin; walk->last_sg = sg; walk->last_sg_len = sgmin + skip_len; skip_len += sgmin; if (sg_dma_len(sg) == skip_len) { sg = sg_next(sg); skip_len = 0; } } while (sg && len) { small = min(sg_dma_len(sg) - skip_len, len); sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE); walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin); walk->pair->addr[walk->pair_idx] = cpu_to_be64(sg_dma_address(sg) + skip_len); walk->pair_idx = !walk->pair_idx; walk->nents++; if (!walk->pair_idx) walk->pair++; len -= sgmin; skip_len += sgmin; walk->last_sg = sg; walk->last_sg_len = skip_len; if (sg_dma_len(sg) == skip_len) { sg = sg_next(sg); skip_len = 0; } } } static inline int get_cryptoalg_subtype(struct crypto_skcipher *tfm) { struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct chcr_alg_template *chcr_crypto_alg = container_of(alg, struct chcr_alg_template, alg.skcipher); return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; } static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) { struct adapter *adap = netdev2adap(dev); struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; struct sge_uld_txq *txq; int ret = 0; local_bh_disable(); txq = &txq_info->uldtxq[idx]; spin_lock(&txq->sendq.lock); if (txq->full) ret = -1; spin_unlock(&txq->sendq.lock); local_bh_enable(); return ret; } static int generate_copy_rrkey(struct ablk_ctx *ablkctx, struct _key_ctx *key_ctx) { if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len); } else { memcpy(key_ctx->key, ablkctx->key + (ablkctx->enckey_len >> 1), ablkctx->enckey_len >> 1); memcpy(key_ctx->key + (ablkctx->enckey_len >> 1), ablkctx->rrkey, ablkctx->enckey_len >> 1); } return 0; } static int chcr_hash_ent_in_wr(struct scatterlist *src, unsigned int minsg, unsigned int space, unsigned int srcskip) { int srclen = 0; int srcsg = minsg; int soffset = 0, sless; if (sg_dma_len(src) == srcskip) { src = sg_next(src); srcskip = 0; } while (src && space > (sgl_ent_len[srcsg + 1])) { sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip, CHCR_SRC_SG_SIZE); srclen += sless; soffset += sless; srcsg++; if (sg_dma_len(src) == (soffset + srcskip)) { src = sg_next(src); soffset = 0; srcskip = 0; } } return srclen; } static int chcr_sg_ent_in_wr(struct scatterlist *src, struct scatterlist *dst, unsigned int minsg, unsigned int space, unsigned int srcskip, unsigned int dstskip) { int srclen = 0, dstlen = 0; int srcsg = minsg, dstsg = minsg; int offset = 0, soffset = 0, less, sless = 0; if (sg_dma_len(src) == srcskip) { src = sg_next(src); srcskip = 0; } if (sg_dma_len(dst) == dstskip) { dst = sg_next(dst); dstskip = 0; } while (src && dst && space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) { sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset, CHCR_SRC_SG_SIZE); srclen += sless; srcsg++; offset = 0; while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) && space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) { if (srclen <= dstlen) break; less = min_t(unsigned int, sg_dma_len(dst) - offset - dstskip, CHCR_DST_SG_SIZE); dstlen += less; offset += less; if ((offset + dstskip) == sg_dma_len(dst)) { dst = sg_next(dst); offset = 0; } dstsg++; dstskip = 0; } soffset += sless; if ((soffset + srcskip) == sg_dma_len(src)) { src = sg_next(src); srcskip = 0; soffset = 0; } } return min(srclen, dstlen); } static int chcr_cipher_fallback(struct crypto_skcipher *cipher, struct skcipher_request *req, u8 *iv, unsigned short op_type) { struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); int err; skcipher_request_set_tfm(&reqctx->fallback_req, cipher); skcipher_request_set_callback(&reqctx->fallback_req, req->base.flags, req->base.complete, req->base.data); skcipher_request_set_crypt(&reqctx->fallback_req, req->src, req->dst, req->cryptlen, iv); err = op_type ? crypto_skcipher_decrypt(&reqctx->fallback_req) : crypto_skcipher_encrypt(&reqctx->fallback_req); return err; } static inline int get_qidxs(struct crypto_async_request *req, unsigned int *txqidx, unsigned int *rxqidx) { struct crypto_tfm *tfm = req->tfm; int ret = 0; switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: { struct aead_request *aead_req = container_of(req, struct aead_request, base); struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(aead_req); *txqidx = reqctx->txqidx; *rxqidx = reqctx->rxqidx; break; } case CRYPTO_ALG_TYPE_SKCIPHER: { struct skcipher_request *sk_req = container_of(req, struct skcipher_request, base); struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(sk_req); *txqidx = reqctx->txqidx; *rxqidx = reqctx->rxqidx; break; } case CRYPTO_ALG_TYPE_AHASH: { struct ahash_request *ahash_req = container_of(req, struct ahash_request, base); struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(ahash_req); *txqidx = reqctx->txqidx; *rxqidx = reqctx->rxqidx; break; } default: ret = -EINVAL; /* should never get here */ BUG(); break; } return ret; } static inline void create_wreq(struct chcr_context *ctx, struct chcr_wr *chcr_req, struct crypto_async_request *req, unsigned int imm, int hash_sz, unsigned int len16, unsigned int sc_len, unsigned int lcb) { struct uld_ctx *u_ctx = ULD_CTX(ctx); unsigned int tx_channel_id, rx_channel_id; unsigned int txqidx = 0, rxqidx = 0; unsigned int qid, fid, portno; get_qidxs(req, &txqidx, &rxqidx); qid = u_ctx->lldi.rxq_ids[rxqidx]; fid = u_ctx->lldi.rxq_ids[0]; portno = rxqidx / ctx->rxq_perchan; tx_channel_id = txqidx / ctx->txq_perchan; rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[portno]); chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE; chcr_req->wreq.pld_size_hash_size = htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); chcr_req->wreq.len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16))); chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req); chcr_req->wreq.rx_chid_to_rx_q_id = FILL_WR_RX_Q_ID(rx_channel_id, qid, !!lcb, txqidx); chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(tx_channel_id, fid); chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) - ((sizeof(chcr_req->wreq)) >> 4))); chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm); chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + sizeof(chcr_req->key_ctx) + sc_len); } /** * create_cipher_wr - form the WR for cipher operations * @wrparam: Container for create_cipher_wr()'s parameters */ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); struct chcr_context *ctx = c_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; struct ulptx_sgl *ulptx; struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(wrparam->req); unsigned int temp = 0, transhdr_len, dst_size; int error; int nents; unsigned int kctx_len; gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; struct adapter *adap = padap(ctx->dev); unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE, reqctx->dst_ofst); dst_size = get_space_for_phys_dsgl(nents); kctx_len = roundup(ablkctx->enckey_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes, CHCR_SRC_SG_SIZE, reqctx->src_ofst); temp = reqctx->imm ? roundup(wrparam->bytes, 16) : (sgl_len(nents) * 8); transhdr_len += temp; transhdr_len = roundup(transhdr_len, 16); skb = alloc_skb(SGE_MAX_WR_LEN, flags); if (!skb) { error = -ENOMEM; goto err; } chcr_req = __skb_put_zero(skb, transhdr_len); chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1); chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes); chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0); chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0); chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0, ablkctx->ciph_mode, 0, 0, IV >> 1); chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, 0, 1, dst_size); chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr; if ((reqctx->op == CHCR_DECRYPT_OP) && (!(get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_CTR)) && (!(get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) { generate_copy_rrkey(ablkctx, &chcr_req->key_ctx); } else { if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) || (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) { memcpy(chcr_req->key_ctx.key, ablkctx->key, ablkctx->enckey_len); } else { memcpy(chcr_req->key_ctx.key, ablkctx->key + (ablkctx->enckey_len >> 1), ablkctx->enckey_len >> 1); memcpy(chcr_req->key_ctx.key + (ablkctx->enckey_len >> 1), ablkctx->key, ablkctx->enckey_len >> 1); } } phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size); chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam); chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid); atomic_inc(&adap->chcr_stats.cipher_rqst); temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV + (reqctx->imm ? (wrparam->bytes) : 0); create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0, transhdr_len, temp, ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC); reqctx->skb = skb; if (reqctx->op && (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC)) sg_pcopy_to_buffer(wrparam->req->src, sg_nents(wrparam->req->src), wrparam->req->iv, 16, reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE); return skb; err: return ERR_PTR(error); } static inline int chcr_keyctx_ck_size(unsigned int keylen) { int ck_size = 0; if (keylen == AES_KEYSIZE_128) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; else if (keylen == AES_KEYSIZE_192) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; else if (keylen == AES_KEYSIZE_256) ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; else ck_size = 0; return ck_size; } static int chcr_cipher_fallback_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); return crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen); } static int chcr_aes_cbc_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); unsigned int ck_size, context_size; u16 alignment = 0; int err; err = chcr_cipher_fallback_setkey(cipher, key, keylen); if (err) goto badkey_err; ck_size = chcr_keyctx_ck_size(keylen); alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0; memcpy(ablkctx->key, key, keylen); ablkctx->enckey_len = keylen; get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3); context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + keylen + alignment) >> 4; ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0, 0, context_size); ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; return 0; badkey_err: ablkctx->enckey_len = 0; return err; } static int chcr_aes_ctr_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); unsigned int ck_size, context_size; u16 alignment = 0; int err; err = chcr_cipher_fallback_setkey(cipher, key, keylen); if (err) goto badkey_err; ck_size = chcr_keyctx_ck_size(keylen); alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; memcpy(ablkctx->key, key, keylen); ablkctx->enckey_len = keylen; context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + keylen + alignment) >> 4; ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0, 0, context_size); ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; return 0; badkey_err: ablkctx->enckey_len = 0; return err; } static int chcr_aes_rfc3686_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); unsigned int ck_size, context_size; u16 alignment = 0; int err; if (keylen < CTR_RFC3686_NONCE_SIZE) return -EINVAL; memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); keylen -= CTR_RFC3686_NONCE_SIZE; err = chcr_cipher_fallback_setkey(cipher, key, keylen); if (err) goto badkey_err; ck_size = chcr_keyctx_ck_size(keylen); alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0; memcpy(ablkctx->key, key, keylen); ablkctx->enckey_len = keylen; context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + keylen + alignment) >> 4; ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0, 0, context_size); ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; return 0; badkey_err: ablkctx->enckey_len = 0; return err; } static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add) { unsigned int size = AES_BLOCK_SIZE; __be32 *b = (__be32 *)(dstiv + size); u32 c, prev; memcpy(dstiv, srciv, AES_BLOCK_SIZE); for (; size >= 4; size -= 4) { prev = be32_to_cpu(*--b); c = prev + add; *b = cpu_to_be32(c); if (prev < c) break; add = 1; } } static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes) { __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE); u64 c; u32 temp = be32_to_cpu(*--b); temp = ~temp; c = (u64)temp + 1; // No of block can processed without overflow if ((bytes / AES_BLOCK_SIZE) >= c) bytes = c * AES_BLOCK_SIZE; return bytes; } static int chcr_update_tweak(struct skcipher_request *req, u8 *iv, u32 isfinal) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct crypto_aes_ctx aes; int ret, i; u8 *key; unsigned int keylen; int round = reqctx->last_req_len / AES_BLOCK_SIZE; int round8 = round / 8; memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); keylen = ablkctx->enckey_len / 2; key = ablkctx->key + keylen; /* For a 192 bit key remove the padded zeroes which was * added in chcr_xts_setkey */ if (KEY_CONTEXT_CK_SIZE_G(ntohl(ablkctx->key_ctx_hdr)) == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ret = aes_expandkey(&aes, key, keylen - 8); else ret = aes_expandkey(&aes, key, keylen); if (ret) return ret; aes_encrypt(&aes, iv, iv); for (i = 0; i < round8; i++) gf128mul_x8_ble((le128 *)iv, (le128 *)iv); for (i = 0; i < (round % 8); i++) gf128mul_x_ble((le128 *)iv, (le128 *)iv); if (!isfinal) aes_decrypt(&aes, iv, iv); memzero_explicit(&aes, sizeof(aes)); return 0; } static int chcr_update_cipher_iv(struct skcipher_request *req, struct cpl_fw6_pld *fw6_pld, u8 *iv) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); int subtype = get_cryptoalg_subtype(tfm); int ret = 0; if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) ctr_add_iv(iv, req->iv, (reqctx->processed / AES_BLOCK_SIZE)); else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed / AES_BLOCK_SIZE) + 1); else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) ret = chcr_update_tweak(req, iv, 0); else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { if (reqctx->op) /*Updated before sending last WR*/ memcpy(iv, req->iv, AES_BLOCK_SIZE); else memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); } return ret; } /* We need separate function for final iv because in rfc3686 Initial counter * starts from 1 and buffer size of iv is 8 byte only which remains constant * for subsequent update requests */ static int chcr_final_cipher_iv(struct skcipher_request *req, struct cpl_fw6_pld *fw6_pld, u8 *iv) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); int subtype = get_cryptoalg_subtype(tfm); int ret = 0; if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) ctr_add_iv(iv, req->iv, DIV_ROUND_UP(reqctx->processed, AES_BLOCK_SIZE)); else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS) { if (!reqctx->partial_req) memcpy(iv, reqctx->iv, AES_BLOCK_SIZE); else ret = chcr_update_tweak(req, iv, 1); } else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) { /*Already updated for Decrypt*/ if (!reqctx->op) memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE); } return ret; } static int chcr_handle_cipher_resp(struct skcipher_request *req, unsigned char *input, int err) { struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input; struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); struct chcr_dev *dev = c_ctx(tfm)->dev; struct chcr_context *ctx = c_ctx(tfm); struct adapter *adap = padap(ctx->dev); struct cipher_wr_param wrparam; struct sk_buff *skb; int bytes; if (err) goto unmap; if (req->cryptlen == reqctx->processed) { chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); err = chcr_final_cipher_iv(req, fw6_pld, req->iv); goto complete; } if (!reqctx->imm) { bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0, CIP_SPACE_LEFT(ablkctx->enckey_len), reqctx->src_ofst, reqctx->dst_ofst); if ((bytes + reqctx->processed) >= req->cryptlen) bytes = req->cryptlen - reqctx->processed; else bytes = rounddown(bytes, 16); } else { /*CTR mode counter overfloa*/ bytes = req->cryptlen - reqctx->processed; } err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv); if (err) goto unmap; if (unlikely(bytes == 0)) { chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); memcpy(req->iv, reqctx->init_iv, IV); atomic_inc(&adap->chcr_stats.fallback); err = chcr_cipher_fallback(ablkctx->sw_cipher, req, req->iv, reqctx->op); goto complete; } if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_CTR) bytes = adjust_ctr_overflow(reqctx->iv, bytes); wrparam.qid = u_ctx->lldi.rxq_ids[reqctx->rxqidx]; wrparam.req = req; wrparam.bytes = bytes; skb = create_cipher_wr(&wrparam); if (IS_ERR(skb)) { pr_err("%s : Failed to form WR. No memory\n", __func__); err = PTR_ERR(skb); goto unmap; } skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); chcr_send_wr(skb); reqctx->last_req_len = bytes; reqctx->processed += bytes; if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags == CRYPTO_TFM_REQ_MAY_SLEEP ) { complete(&ctx->cbc_aes_aio_done); } return 0; unmap: chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); complete: if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags == CRYPTO_TFM_REQ_MAY_SLEEP ) { complete(&ctx->cbc_aes_aio_done); } chcr_dec_wrcount(dev); skcipher_request_complete(req, err); return err; } static int process_cipher(struct skcipher_request *req, unsigned short qid, struct sk_buff **skb, unsigned short op_type) { struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm)); struct adapter *adap = padap(c_ctx(tfm)->dev); struct cipher_wr_param wrparam; int bytes, err = -EINVAL; int subtype; reqctx->processed = 0; reqctx->partial_req = 0; if (!req->iv) goto error; subtype = get_cryptoalg_subtype(tfm); if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || (req->cryptlen == 0) || (req->cryptlen % crypto_skcipher_blocksize(tfm))) { if (req->cryptlen == 0 && subtype != CRYPTO_ALG_SUB_TYPE_XTS) goto fallback; else if (req->cryptlen % crypto_skcipher_blocksize(tfm) && subtype == CRYPTO_ALG_SUB_TYPE_XTS) goto fallback; pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n", ablkctx->enckey_len, req->cryptlen, ivsize); goto error; } err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); if (err) goto error; if (req->cryptlen < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) + AES_MIN_KEY_SIZE + sizeof(struct cpl_rx_phys_dsgl) + /*Min dsgl size*/ 32))) { /* Can be sent as Imm*/ unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len; dnents = sg_nents_xlen(req->dst, req->cryptlen, CHCR_DST_SG_SIZE, 0); phys_dsgl = get_space_for_phys_dsgl(dnents); kctx_len = roundup(ablkctx->enckey_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); reqctx->imm = (transhdr_len + IV + req->cryptlen) <= SGE_MAX_WR_LEN; bytes = IV + req->cryptlen; } else { reqctx->imm = 0; } if (!reqctx->imm) { bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0, CIP_SPACE_LEFT(ablkctx->enckey_len), 0, 0); if ((bytes + reqctx->processed) >= req->cryptlen) bytes = req->cryptlen - reqctx->processed; else bytes = rounddown(bytes, 16); } else { bytes = req->cryptlen; } if (subtype == CRYPTO_ALG_SUB_TYPE_CTR) { bytes = adjust_ctr_overflow(req->iv, bytes); } if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) { memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE); memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE); /* initialize counter portion of counter block */ *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); memcpy(reqctx->init_iv, reqctx->iv, IV); } else { memcpy(reqctx->iv, req->iv, IV); memcpy(reqctx->init_iv, req->iv, IV); } if (unlikely(bytes == 0)) { chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); fallback: atomic_inc(&adap->chcr_stats.fallback); err = chcr_cipher_fallback(ablkctx->sw_cipher, req, subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686 ? reqctx->iv : req->iv, op_type); goto error; } reqctx->op = op_type; reqctx->srcsg = req->src; reqctx->dstsg = req->dst; reqctx->src_ofst = 0; reqctx->dst_ofst = 0; wrparam.qid = qid; wrparam.req = req; wrparam.bytes = bytes; *skb = create_cipher_wr(&wrparam); if (IS_ERR(*skb)) { err = PTR_ERR(*skb); goto unmap; } reqctx->processed = bytes; reqctx->last_req_len = bytes; reqctx->partial_req = !!(req->cryptlen - reqctx->processed); return 0; unmap: chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req); error: return err; } static int chcr_aes_encrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct chcr_dev *dev = c_ctx(tfm)->dev; struct sk_buff *skb = NULL; int err; struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); struct chcr_context *ctx = c_ctx(tfm); unsigned int cpu; cpu = get_cpu(); reqctx->txqidx = cpu % ctx->ntxq; reqctx->rxqidx = cpu % ctx->nrxq; put_cpu(); err = chcr_inc_wrcount(dev); if (err) return -ENXIO; if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], reqctx->txqidx) && (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { err = -ENOSPC; goto error; } err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], &skb, CHCR_ENCRYPT_OP); if (err || !skb) return err; skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); chcr_send_wr(skb); if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_CBC && req->base.flags == CRYPTO_TFM_REQ_MAY_SLEEP ) { reqctx->partial_req = 1; wait_for_completion(&ctx->cbc_aes_aio_done); } return -EINPROGRESS; error: chcr_dec_wrcount(dev); return err; } static int chcr_aes_decrypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm)); struct chcr_dev *dev = c_ctx(tfm)->dev; struct sk_buff *skb = NULL; int err; struct chcr_context *ctx = c_ctx(tfm); unsigned int cpu; cpu = get_cpu(); reqctx->txqidx = cpu % ctx->ntxq; reqctx->rxqidx = cpu % ctx->nrxq; put_cpu(); err = chcr_inc_wrcount(dev); if (err) return -ENXIO; if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], reqctx->txqidx) && (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) return -ENOSPC; err = process_cipher(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], &skb, CHCR_DECRYPT_OP); if (err || !skb) return err; skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); chcr_send_wr(skb); return -EINPROGRESS; } static int chcr_device_init(struct chcr_context *ctx) { struct uld_ctx *u_ctx = NULL; int txq_perchan, ntxq; int err = 0, rxq_perchan; if (!ctx->dev) { u_ctx = assign_chcr_device(); if (!u_ctx) { err = -ENXIO; pr_err("chcr device assignment fails\n"); goto out; } ctx->dev = &u_ctx->dev; ntxq = u_ctx->lldi.ntxq; rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; txq_perchan = ntxq / u_ctx->lldi.nchan; ctx->ntxq = ntxq; ctx->nrxq = u_ctx->lldi.nrxq; ctx->rxq_perchan = rxq_perchan; ctx->txq_perchan = txq_perchan; } out: return err; } static int chcr_init_tfm(struct crypto_skcipher *tfm) { struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); ablkctx->sw_cipher = crypto_alloc_skcipher(alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ablkctx->sw_cipher)) { pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ablkctx->sw_cipher); } init_completion(&ctx->cbc_aes_aio_done); crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + crypto_skcipher_reqsize(ablkctx->sw_cipher)); return chcr_device_init(ctx); } static int chcr_rfc3686_init(struct crypto_skcipher *tfm) { struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes)) * cannot be used as fallback in chcr_handle_cipher_response */ ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ablkctx->sw_cipher)) { pr_err("failed to allocate fallback for %s\n", alg->base.cra_name); return PTR_ERR(ablkctx->sw_cipher); } crypto_skcipher_set_reqsize(tfm, sizeof(struct chcr_skcipher_req_ctx) + crypto_skcipher_reqsize(ablkctx->sw_cipher)); return chcr_device_init(ctx); } static void chcr_exit_tfm(struct crypto_skcipher *tfm) { struct chcr_context *ctx = crypto_skcipher_ctx(tfm); struct ablk_ctx *ablkctx = ABLK_CTX(ctx); crypto_free_skcipher(ablkctx->sw_cipher); } static int get_alg_config(struct algo_param *params, unsigned int auth_size) { switch (auth_size) { case SHA1_DIGEST_SIZE: params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1; params->result_size = SHA1_DIGEST_SIZE; break; case SHA224_DIGEST_SIZE: params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224; params->result_size = SHA256_DIGEST_SIZE; break; case SHA256_DIGEST_SIZE: params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256; params->result_size = SHA256_DIGEST_SIZE; break; case SHA384_DIGEST_SIZE: params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384; params->result_size = SHA512_DIGEST_SIZE; break; case SHA512_DIGEST_SIZE: params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512; params->result_size = SHA512_DIGEST_SIZE; break; default: pr_err("ERROR, unsupported digest size\n"); return -EINVAL; } return 0; } static inline void chcr_free_shash(struct crypto_shash *base_hash) { crypto_free_shash(base_hash); } /** * create_hash_wr - Create hash work request * @req: Cipher req base * @param: Container for create_hash_wr()'s parameters */ static struct sk_buff *create_hash_wr(struct ahash_request *req, struct hash_wr_param *param) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct chcr_context *ctx = h_ctx(tfm); struct hmac_ctx *hmacctx = HMAC_CTX(ctx); struct sk_buff *skb = NULL; struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_wr *chcr_req; struct ulptx_sgl *ulptx; unsigned int nents = 0, transhdr_len; unsigned int temp = 0; gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; struct adapter *adap = padap(h_ctx(tfm)->dev); int error = 0; unsigned int rx_channel_id = req_ctx->rxqidx / ctx->rxq_perchan; rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len); req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len + param->sg_len) <= SGE_MAX_WR_LEN; nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len, CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst); nents += param->bfr_len ? 1 : 0; transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len + param->sg_len, 16) : (sgl_len(nents) * 8); transhdr_len = roundup(transhdr_len, 16); skb = alloc_skb(transhdr_len, flags); if (!skb) return ERR_PTR(-ENOMEM); chcr_req = __skb_put_zero(skb, transhdr_len); chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 0); chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len); chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0); chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0); chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode, param->opad_needed, 0); chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0); memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash, param->alg_prm.result_size); if (param->opad_needed) memcpy(chcr_req->key_ctx.key + ((param->alg_prm.result_size <= 32) ? 32 : CHCR_HASH_MAX_DIGEST_SIZE), hmacctx->opad, param->alg_prm.result_size); chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY, param->alg_prm.mk_size, 0, param->opad_needed, ((param->kctx_len + sizeof(chcr_req->key_ctx)) >> 4)); chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1); ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len + DUMMY_BYTES); if (param->bfr_len != 0) { req_ctx->hctx_wr.dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr, param->bfr_len, DMA_TO_DEVICE); if (dma_mapping_error(&u_ctx->lldi.pdev->dev, req_ctx->hctx_wr. dma_addr)) { error = -ENOMEM; goto err; } req_ctx->hctx_wr.dma_len = param->bfr_len; } else { req_ctx->hctx_wr.dma_addr = 0; } chcr_add_hash_src_ent(req, ulptx, param); /* Request upto max wr size */ temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ? (param->sg_len + param->bfr_len) : 0); atomic_inc(&adap->chcr_stats.digest_rqst); create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm, param->hash_size, transhdr_len, temp, 0); req_ctx->hctx_wr.skb = skb; return skb; err: kfree_skb(skb); return ERR_PTR(error); } static int chcr_ahash_update(struct ahash_request *req) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); struct chcr_context *ctx = h_ctx(rtfm); struct chcr_dev *dev = h_ctx(rtfm)->dev; struct sk_buff *skb; u8 remainder = 0, bs; unsigned int nbytes = req->nbytes; struct hash_wr_param params; int error; unsigned int cpu; cpu = get_cpu(); req_ctx->txqidx = cpu % ctx->ntxq; req_ctx->rxqidx = cpu % ctx->nrxq; put_cpu(); bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); if (nbytes + req_ctx->reqlen >= bs) { remainder = (nbytes + req_ctx->reqlen) % bs; nbytes = nbytes + req_ctx->reqlen - remainder; } else { sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr + req_ctx->reqlen, nbytes, 0); req_ctx->reqlen += nbytes; return 0; } error = chcr_inc_wrcount(dev); if (error) return -ENXIO; /* Detach state for CHCR means lldi or padap is freed. Increasing * inflight count for dev guarantees that lldi and padap is valid */ if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], req_ctx->txqidx) && (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { error = -ENOSPC; goto err; } chcr_init_hctx_per_wr(req_ctx); error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); if (error) { error = -ENOMEM; goto err; } get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm)); params.kctx_len = roundup(params.alg_prm.result_size, 16); params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, HASH_SPACE_LEFT(params.kctx_len), 0); if (params.sg_len > req->nbytes) params.sg_len = req->nbytes; params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) - req_ctx->reqlen; params.opad_needed = 0; params.more = 1; params.last = 0; params.bfr_len = req_ctx->reqlen; params.scmd1 = 0; req_ctx->hctx_wr.srcsg = req->src; params.hash_size = params.alg_prm.result_size; req_ctx->data_len += params.sg_len + params.bfr_len; skb = create_hash_wr(req, &params); if (IS_ERR(skb)) { error = PTR_ERR(skb); goto unmap; } req_ctx->hctx_wr.processed += params.sg_len; if (remainder) { /* Swap buffers */ swap(req_ctx->reqbfr, req_ctx->skbfr); sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr, remainder, req->nbytes - remainder); } req_ctx->reqlen = remainder; skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); chcr_send_wr(skb); return -EINPROGRESS; unmap: chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); err: chcr_dec_wrcount(dev); return error; } static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1) { memset(bfr_ptr, 0, bs); *bfr_ptr = 0x80; if (bs == 64) *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3); else *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3); } static int chcr_ahash_final(struct ahash_request *req) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); struct chcr_dev *dev = h_ctx(rtfm)->dev; struct hash_wr_param params; struct sk_buff *skb; struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); struct chcr_context *ctx = h_ctx(rtfm); u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); int error; unsigned int cpu; cpu = get_cpu(); req_ctx->txqidx = cpu % ctx->ntxq; req_ctx->rxqidx = cpu % ctx->nrxq; put_cpu(); error = chcr_inc_wrcount(dev); if (error) return -ENXIO; chcr_init_hctx_per_wr(req_ctx); if (is_hmac(crypto_ahash_tfm(rtfm))) params.opad_needed = 1; else params.opad_needed = 0; params.sg_len = 0; req_ctx->hctx_wr.isfinal = 1; get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm)); params.kctx_len = roundup(params.alg_prm.result_size, 16); if (is_hmac(crypto_ahash_tfm(rtfm))) { params.opad_needed = 1; params.kctx_len *= 2; } else { params.opad_needed = 0; } req_ctx->hctx_wr.result = 1; params.bfr_len = req_ctx->reqlen; req_ctx->data_len += params.bfr_len + params.sg_len; req_ctx->hctx_wr.srcsg = req->src; if (req_ctx->reqlen == 0) { create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); params.last = 0; params.more = 1; params.scmd1 = 0; params.bfr_len = bs; } else { params.scmd1 = req_ctx->data_len; params.last = 1; params.more = 0; } params.hash_size = crypto_ahash_digestsize(rtfm); skb = create_hash_wr(req, &params); if (IS_ERR(skb)) { error = PTR_ERR(skb); goto err; } req_ctx->reqlen = 0; skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); chcr_send_wr(skb); return -EINPROGRESS; err: chcr_dec_wrcount(dev); return error; } static int chcr_ahash_finup(struct ahash_request *req) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); struct chcr_dev *dev = h_ctx(rtfm)->dev; struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); struct chcr_context *ctx = h_ctx(rtfm); struct sk_buff *skb; struct hash_wr_param params; u8 bs; int error; unsigned int cpu; cpu = get_cpu(); req_ctx->txqidx = cpu % ctx->ntxq; req_ctx->rxqidx = cpu % ctx->nrxq; put_cpu(); bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); error = chcr_inc_wrcount(dev); if (error) return -ENXIO; if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], req_ctx->txqidx) && (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { error = -ENOSPC; goto err; } chcr_init_hctx_per_wr(req_ctx); error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); if (error) { error = -ENOMEM; goto err; } get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm)); params.kctx_len = roundup(params.alg_prm.result_size, 16); if (is_hmac(crypto_ahash_tfm(rtfm))) { params.kctx_len *= 2; params.opad_needed = 1; } else { params.opad_needed = 0; } params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, HASH_SPACE_LEFT(params.kctx_len), 0); if (params.sg_len < req->nbytes) { if (is_hmac(crypto_ahash_tfm(rtfm))) { params.kctx_len /= 2; params.opad_needed = 0; } params.last = 0; params.more = 1; params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) - req_ctx->reqlen; params.hash_size = params.alg_prm.result_size; params.scmd1 = 0; } else { params.last = 1; params.more = 0; params.sg_len = req->nbytes; params.hash_size = crypto_ahash_digestsize(rtfm); params.scmd1 = req_ctx->data_len + req_ctx->reqlen + params.sg_len; } params.bfr_len = req_ctx->reqlen; req_ctx->data_len += params.bfr_len + params.sg_len; req_ctx->hctx_wr.result = 1; req_ctx->hctx_wr.srcsg = req->src; if ((req_ctx->reqlen + req->nbytes) == 0) { create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); params.last = 0; params.more = 1; params.scmd1 = 0; params.bfr_len = bs; } skb = create_hash_wr(req, &params); if (IS_ERR(skb)) { error = PTR_ERR(skb); goto unmap; } req_ctx->reqlen = 0; req_ctx->hctx_wr.processed += params.sg_len; skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); chcr_send_wr(skb); return -EINPROGRESS; unmap: chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); err: chcr_dec_wrcount(dev); return error; } static int chcr_ahash_digest(struct ahash_request *req) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); struct chcr_dev *dev = h_ctx(rtfm)->dev; struct uld_ctx *u_ctx = ULD_CTX(h_ctx(rtfm)); struct chcr_context *ctx = h_ctx(rtfm); struct sk_buff *skb; struct hash_wr_param params; u8 bs; int error; unsigned int cpu; cpu = get_cpu(); req_ctx->txqidx = cpu % ctx->ntxq; req_ctx->rxqidx = cpu % ctx->nrxq; put_cpu(); rtfm->init(req); bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); error = chcr_inc_wrcount(dev); if (error) return -ENXIO; if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], req_ctx->txqidx) && (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { error = -ENOSPC; goto err; } chcr_init_hctx_per_wr(req_ctx); error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req); if (error) { error = -ENOMEM; goto err; } get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm)); params.kctx_len = roundup(params.alg_prm.result_size, 16); if (is_hmac(crypto_ahash_tfm(rtfm))) { params.kctx_len *= 2; params.opad_needed = 1; } else { params.opad_needed = 0; } params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen, HASH_SPACE_LEFT(params.kctx_len), 0); if (params.sg_len < req->nbytes) { if (is_hmac(crypto_ahash_tfm(rtfm))) { params.kctx_len /= 2; params.opad_needed = 0; } params.last = 0; params.more = 1; params.scmd1 = 0; params.sg_len = rounddown(params.sg_len, bs); params.hash_size = params.alg_prm.result_size; } else { params.sg_len = req->nbytes; params.hash_size = crypto_ahash_digestsize(rtfm); params.last = 1; params.more = 0; params.scmd1 = req->nbytes + req_ctx->data_len; } params.bfr_len = 0; req_ctx->hctx_wr.result = 1; req_ctx->hctx_wr.srcsg = req->src; req_ctx->data_len += params.bfr_len + params.sg_len; if (req->nbytes == 0) { create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len); params.more = 1; params.bfr_len = bs; } skb = create_hash_wr(req, &params); if (IS_ERR(skb)) { error = PTR_ERR(skb); goto unmap; } req_ctx->hctx_wr.processed += params.sg_len; skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, req_ctx->txqidx); chcr_send_wr(skb); return -EINPROGRESS; unmap: chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); err: chcr_dec_wrcount(dev); return error; } static int chcr_ahash_continue(struct ahash_request *req) { struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); struct chcr_context *ctx = h_ctx(rtfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct sk_buff *skb; struct hash_wr_param params; u8 bs; int error; unsigned int cpu; cpu = get_cpu(); reqctx->txqidx = cpu % ctx->ntxq; reqctx->rxqidx = cpu % ctx->nrxq; put_cpu(); bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm)); params.kctx_len = roundup(params.alg_prm.result_size, 16); if (is_hmac(crypto_ahash_tfm(rtfm))) { params.kctx_len *= 2; params.opad_needed = 1; } else { params.opad_needed = 0; } params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0, HASH_SPACE_LEFT(params.kctx_len), hctx_wr->src_ofst); if ((params.sg_len + hctx_wr->processed) > req->nbytes) params.sg_len = req->nbytes - hctx_wr->processed; if (!hctx_wr->result || ((params.sg_len + hctx_wr->processed) < req->nbytes)) { if (is_hmac(crypto_ahash_tfm(rtfm))) { params.kctx_len /= 2; params.opad_needed = 0; } params.last = 0; params.more = 1; params.sg_len = rounddown(params.sg_len, bs); params.hash_size = params.alg_prm.result_size; params.scmd1 = 0; } else { params.last = 1; params.more = 0; params.hash_size = crypto_ahash_digestsize(rtfm); params.scmd1 = reqctx->data_len + params.sg_len; } params.bfr_len = 0; reqctx->data_len += params.sg_len; skb = create_hash_wr(req, &params); if (IS_ERR(skb)) { error = PTR_ERR(skb); goto err; } hctx_wr->processed += params.sg_len; skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); chcr_send_wr(skb); return 0; err: return error; } static inline void chcr_handle_ahash_resp(struct ahash_request *req, unsigned char *input, int err) { struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr; int digestsize, updated_digestsize; struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm)); struct chcr_dev *dev = h_ctx(tfm)->dev; if (input == NULL) goto out; digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req)); updated_digestsize = digestsize; if (digestsize == SHA224_DIGEST_SIZE) updated_digestsize = SHA256_DIGEST_SIZE; else if (digestsize == SHA384_DIGEST_SIZE) updated_digestsize = SHA512_DIGEST_SIZE; if (hctx_wr->dma_addr) { dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr, hctx_wr->dma_len, DMA_TO_DEVICE); hctx_wr->dma_addr = 0; } if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) == req->nbytes)) { if (hctx_wr->result == 1) { hctx_wr->result = 0; memcpy(req->result, input + sizeof(struct cpl_fw6_pld), digestsize); } else { memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld), updated_digestsize); } goto unmap; } memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld), updated_digestsize); err = chcr_ahash_continue(req); if (err) goto unmap; return; unmap: if (hctx_wr->is_sg_map) chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req); out: chcr_dec_wrcount(dev); ahash_request_complete(req, err); } /* * chcr_handle_resp - Unmap the DMA buffers associated with the request * @req: crypto request */ int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, int err) { struct crypto_tfm *tfm = req->tfm; struct chcr_context *ctx = crypto_tfm_ctx(tfm); struct adapter *adap = padap(ctx->dev); switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: err = chcr_handle_aead_resp(aead_request_cast(req), input, err); break; case CRYPTO_ALG_TYPE_SKCIPHER: chcr_handle_cipher_resp(skcipher_request_cast(req), input, err); break; case CRYPTO_ALG_TYPE_AHASH: chcr_handle_ahash_resp(ahash_request_cast(req), input, err); } atomic_inc(&adap->chcr_stats.complete); return err; } static int chcr_ahash_export(struct ahash_request *areq, void *out) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); struct chcr_ahash_req_ctx *state = out; state->reqlen = req_ctx->reqlen; state->data_len = req_ctx->data_len; memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen); memcpy(state->partial_hash, req_ctx->partial_hash, CHCR_HASH_MAX_DIGEST_SIZE); chcr_init_hctx_per_wr(state); return 0; } static int chcr_ahash_import(struct ahash_request *areq, const void *in) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in; req_ctx->reqlen = state->reqlen; req_ctx->data_len = state->data_len; req_ctx->reqbfr = req_ctx->bfr1; req_ctx->skbfr = req_ctx->bfr2; memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128); memcpy(req_ctx->partial_hash, state->partial_hash, CHCR_HASH_MAX_DIGEST_SIZE); chcr_init_hctx_per_wr(req_ctx); return 0; } static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm)); unsigned int digestsize = crypto_ahash_digestsize(tfm); unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); unsigned int i, err = 0, updated_digestsize; SHASH_DESC_ON_STACK(shash, hmacctx->base_hash); /* use the key to calculate the ipad and opad. ipad will sent with the * first request's data. opad will be sent with the final hash result * ipad in hmacctx->ipad and opad in hmacctx->opad location */ shash->tfm = hmacctx->base_hash; if (keylen > bs) { err = crypto_shash_digest(shash, key, keylen, hmacctx->ipad); if (err) goto out; keylen = digestsize; } else { memcpy(hmacctx->ipad, key, keylen); } memset(hmacctx->ipad + keylen, 0, bs - keylen); unsafe_memcpy(hmacctx->opad, hmacctx->ipad, bs, "fortified memcpy causes -Wrestrict warning"); for (i = 0; i < bs / sizeof(int); i++) { *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA; *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA; } updated_digestsize = digestsize; if (digestsize == SHA224_DIGEST_SIZE) updated_digestsize = SHA256_DIGEST_SIZE; else if (digestsize == SHA384_DIGEST_SIZE) updated_digestsize = SHA512_DIGEST_SIZE; err = chcr_compute_partial_hash(shash, hmacctx->ipad, hmacctx->ipad, digestsize); if (err) goto out; chcr_change_order(hmacctx->ipad, updated_digestsize); err = chcr_compute_partial_hash(shash, hmacctx->opad, hmacctx->opad, digestsize); if (err) goto out; chcr_change_order(hmacctx->opad, updated_digestsize); out: return err; } static int chcr_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int key_len) { struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher)); unsigned short context_size = 0; int err; err = chcr_cipher_fallback_setkey(cipher, key, key_len); if (err) goto badkey_err; memcpy(ablkctx->key, key, key_len); ablkctx->enckey_len = key_len; get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2); context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; /* Both keys for xts must be aligned to 16 byte boundary * by padding with zeros. So for 24 byte keys padding 8 zeroes. */ if (key_len == 48) { context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len + 16) >> 4; memmove(ablkctx->key + 32, ablkctx->key + 24, 24); memset(ablkctx->key + 24, 0, 8); memset(ablkctx->key + 56, 0, 8); ablkctx->enckey_len = 64; ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_CIPHER_KEY_SIZE_192, CHCR_KEYCTX_NO_KEY, 1, 0, context_size); } else { ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : CHCR_KEYCTX_CIPHER_KEY_SIZE_256, CHCR_KEYCTX_NO_KEY, 1, 0, context_size); } ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; return 0; badkey_err: ablkctx->enckey_len = 0; return err; } static int chcr_sha_init(struct ahash_request *areq) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); int digestsize = crypto_ahash_digestsize(tfm); req_ctx->data_len = 0; req_ctx->reqlen = 0; req_ctx->reqbfr = req_ctx->bfr1; req_ctx->skbfr = req_ctx->bfr2; copy_hash_init_values(req_ctx->partial_hash, digestsize); return 0; } static int chcr_sha_cra_init(struct crypto_tfm *tfm) { crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct chcr_ahash_req_ctx)); return chcr_device_init(crypto_tfm_ctx(tfm)); } static int chcr_hmac_init(struct ahash_request *areq) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq); struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm)); unsigned int digestsize = crypto_ahash_digestsize(rtfm); unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); chcr_sha_init(areq); req_ctx->data_len = bs; if (is_hmac(crypto_ahash_tfm(rtfm))) { if (digestsize == SHA224_DIGEST_SIZE) memcpy(req_ctx->partial_hash, hmacctx->ipad, SHA256_DIGEST_SIZE); else if (digestsize == SHA384_DIGEST_SIZE) memcpy(req_ctx->partial_hash, hmacctx->ipad, SHA512_DIGEST_SIZE); else memcpy(req_ctx->partial_hash, hmacctx->ipad, digestsize); } return 0; } static int chcr_hmac_cra_init(struct crypto_tfm *tfm) { struct chcr_context *ctx = crypto_tfm_ctx(tfm); struct hmac_ctx *hmacctx = HMAC_CTX(ctx); unsigned int digestsize = crypto_ahash_digestsize(__crypto_ahash_cast(tfm)); crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct chcr_ahash_req_ctx)); hmacctx->base_hash = chcr_alloc_shash(digestsize); if (IS_ERR(hmacctx->base_hash)) return PTR_ERR(hmacctx->base_hash); return chcr_device_init(crypto_tfm_ctx(tfm)); } static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) { struct chcr_context *ctx = crypto_tfm_ctx(tfm); struct hmac_ctx *hmacctx = HMAC_CTX(ctx); if (hmacctx->base_hash) { chcr_free_shash(hmacctx->base_hash); hmacctx->base_hash = NULL; } } inline void chcr_aead_common_exit(struct aead_request *req) { struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm)); chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op); } static int chcr_aead_common_init(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); unsigned int authsize = crypto_aead_authsize(tfm); int error = -EINVAL; /* validate key size */ if (aeadctx->enckey_len == 0) goto err; if (reqctx->op && req->cryptlen < authsize) goto err; if (reqctx->b0_len) reqctx->scratch_pad = reqctx->iv + IV; else reqctx->scratch_pad = NULL; error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, reqctx->op); if (error) { error = -ENOMEM; goto err; } return 0; err: return error; } static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents, int aadmax, int wrlen, unsigned short op_type) { unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); if (((req->cryptlen - (op_type ? authsize : 0)) == 0) || dst_nents > MAX_DSGL_ENT || (req->assoclen > aadmax) || (wrlen > SGE_MAX_WR_LEN)) return 1; return 0; } static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); struct aead_request *subreq = aead_request_ctx_dma(req); aead_request_set_tfm(subreq, aeadctx->sw_cipher); aead_request_set_callback(subreq, req->base.flags, req->base.complete, req->base.data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(subreq, req->assoclen); return op_type ? crypto_aead_decrypt(subreq) : crypto_aead_encrypt(subreq); } static struct sk_buff *create_authenc_wr(struct aead_request *req, unsigned short qid, int size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; struct ulptx_sgl *ulptx; unsigned int transhdr_len; unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm); unsigned int kctx_len = 0, dnents, snents; unsigned int authsize = crypto_aead_authsize(tfm); int error = -EINVAL; u8 *ivptr; int null = 0; gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; struct adapter *adap = padap(ctx->dev); unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); if (req->cryptlen == 0) return NULL; reqctx->b0_len = 0; error = chcr_aead_common_init(req); if (error) return ERR_PTR(error); if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { null = 1; } dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen + (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0); dnents += MIN_AUTH_SG; // For IV snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen, CHCR_SRC_SG_SIZE, 0); dst_size = get_space_for_phys_dsgl(dnents); kctx_len = (KEY_CONTEXT_CTX_LEN_G(ntohl(aeadctx->key_ctx_hdr)) << 4) - sizeof(chcr_req->key_ctx); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) < SGE_MAX_WR_LEN; temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) : (sgl_len(snents) * 8); transhdr_len += temp; transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, transhdr_len, reqctx->op)) { atomic_inc(&adap->chcr_stats.fallback); chcr_aead_common_exit(req); return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); } skb = alloc_skb(transhdr_len, flags); if (!skb) { error = -ENOMEM; goto err; } chcr_req = __skb_put_zero(skb, transhdr_len); temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize; /* * Input order is AAD,IV and Payload. where IV should be included as * the part of authdata. All other fields should be filled according * to the hardware spec */ chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1); chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen); chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( null ? 0 : 1 + IV, null ? 0 : IV + req->assoclen, req->assoclen + IV + 1, (temp & 0x1F0) >> 4); chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT( temp & 0xF, null ? 0 : req->assoclen + IV + 1, temp, temp); if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL || subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA) temp = CHCR_SCMD_CIPHER_MODE_AES_CTR; else temp = CHCR_SCMD_CIPHER_MODE_AES_CBC; chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0, temp, actx->auth_mode, aeadctx->hmac_ctrl, IV >> 1); chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, 0, dst_size); chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; if (reqctx->op == CHCR_ENCRYPT_OP || subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); else memcpy(chcr_req->key_ctx.key, actx->dec_rrkey, aeadctx->enckey_len); memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16)); phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); ivptr = (u8 *)(phys_cpl + 1) + dst_size; ulptx = (struct ulptx_sgl *)(ivptr + IV); if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE); memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE); *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); } else { memcpy(ivptr, req->iv, IV); } chcr_add_aead_dst_ent(req, phys_cpl, qid); chcr_add_aead_src_ent(req, ulptx); atomic_inc(&adap->chcr_stats.cipher_rqst); temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0); create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, transhdr_len, temp, 0); reqctx->skb = skb; return skb; err: chcr_aead_common_exit(req); return ERR_PTR(error); } int chcr_aead_dma_map(struct device *dev, struct aead_request *req, unsigned short op_type) { int error; struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned int authsize = crypto_aead_authsize(tfm); int src_len, dst_len; /* calculate and handle src and dst sg length separately * for inplace and out-of place operations */ if (req->src == req->dst) { src_len = req->assoclen + req->cryptlen + (op_type ? 0 : authsize); dst_len = src_len; } else { src_len = req->assoclen + req->cryptlen; dst_len = req->assoclen + req->cryptlen + (op_type ? -authsize : authsize); } if (!req->cryptlen || !src_len || !dst_len) return 0; reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, reqctx->iv_dma)) return -ENOMEM; if (reqctx->b0_len) reqctx->b0_dma = reqctx->iv_dma + IV; else reqctx->b0_dma = 0; if (req->src == req->dst) { error = dma_map_sg(dev, req->src, sg_nents_for_len(req->src, src_len), DMA_BIDIRECTIONAL); if (!error) goto err; } else { error = dma_map_sg(dev, req->src, sg_nents_for_len(req->src, src_len), DMA_TO_DEVICE); if (!error) goto err; error = dma_map_sg(dev, req->dst, sg_nents_for_len(req->dst, dst_len), DMA_FROM_DEVICE); if (!error) { dma_unmap_sg(dev, req->src, sg_nents_for_len(req->src, src_len), DMA_TO_DEVICE); goto err; } } return 0; err: dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL); return -ENOMEM; } void chcr_aead_dma_unmap(struct device *dev, struct aead_request *req, unsigned short op_type) { struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); unsigned int authsize = crypto_aead_authsize(tfm); int src_len, dst_len; /* calculate and handle src and dst sg length separately * for inplace and out-of place operations */ if (req->src == req->dst) { src_len = req->assoclen + req->cryptlen + (op_type ? 0 : authsize); dst_len = src_len; } else { src_len = req->assoclen + req->cryptlen; dst_len = req->assoclen + req->cryptlen + (op_type ? -authsize : authsize); } if (!req->cryptlen || !src_len || !dst_len) return; dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len), DMA_BIDIRECTIONAL); if (req->src == req->dst) { dma_unmap_sg(dev, req->src, sg_nents_for_len(req->src, src_len), DMA_BIDIRECTIONAL); } else { dma_unmap_sg(dev, req->src, sg_nents_for_len(req->src, src_len), DMA_TO_DEVICE); dma_unmap_sg(dev, req->dst, sg_nents_for_len(req->dst, dst_len), DMA_FROM_DEVICE); } } void chcr_add_aead_src_ent(struct aead_request *req, struct ulptx_sgl *ulptx) { struct ulptx_walk ulp_walk; struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); if (reqctx->imm) { u8 *buf = (u8 *)ulptx; if (reqctx->b0_len) { memcpy(buf, reqctx->scratch_pad, reqctx->b0_len); buf += reqctx->b0_len; } sg_pcopy_to_buffer(req->src, sg_nents(req->src), buf, req->cryptlen + req->assoclen, 0); } else { ulptx_walk_init(&ulp_walk, ulptx); if (reqctx->b0_len) ulptx_walk_add_page(&ulp_walk, reqctx->b0_len, reqctx->b0_dma); ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen + req->assoclen, 0); ulptx_walk_end(&ulp_walk); } } void chcr_add_aead_dst_ent(struct aead_request *req, struct cpl_rx_phys_dsgl *phys_cpl, unsigned short qid) { struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct dsgl_walk dsgl_walk; unsigned int authsize = crypto_aead_authsize(tfm); struct chcr_context *ctx = a_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); u32 temp; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma); temp = req->assoclen + req->cryptlen + (reqctx->op ? -authsize : authsize); dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0); dsgl_walk_end(&dsgl_walk, qid, rx_channel_id); } void chcr_add_cipher_src_ent(struct skcipher_request *req, void *ulptx, struct cipher_wr_param *wrparam) { struct ulptx_walk ulp_walk; struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); u8 *buf = ulptx; memcpy(buf, reqctx->iv, IV); buf += IV; if (reqctx->imm) { sg_pcopy_to_buffer(req->src, sg_nents(req->src), buf, wrparam->bytes, reqctx->processed); } else { ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf); ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes, reqctx->src_ofst); reqctx->srcsg = ulp_walk.last_sg; reqctx->src_ofst = ulp_walk.last_sg_len; ulptx_walk_end(&ulp_walk); } } void chcr_add_cipher_dst_ent(struct skcipher_request *req, struct cpl_rx_phys_dsgl *phys_cpl, struct cipher_wr_param *wrparam, unsigned short qid) { struct chcr_skcipher_req_ctx *reqctx = skcipher_request_ctx(req); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(wrparam->req); struct chcr_context *ctx = c_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct dsgl_walk dsgl_walk; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); dsgl_walk_init(&dsgl_walk, phys_cpl); dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes, reqctx->dst_ofst); reqctx->dstsg = dsgl_walk.last_sg; reqctx->dst_ofst = dsgl_walk.last_sg_len; dsgl_walk_end(&dsgl_walk, qid, rx_channel_id); } void chcr_add_hash_src_ent(struct ahash_request *req, struct ulptx_sgl *ulptx, struct hash_wr_param *param) { struct ulptx_walk ulp_walk; struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req); if (reqctx->hctx_wr.imm) { u8 *buf = (u8 *)ulptx; if (param->bfr_len) { memcpy(buf, reqctx->reqbfr, param->bfr_len); buf += param->bfr_len; } sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg, sg_nents(reqctx->hctx_wr.srcsg), buf, param->sg_len, 0); } else { ulptx_walk_init(&ulp_walk, ulptx); if (param->bfr_len) ulptx_walk_add_page(&ulp_walk, param->bfr_len, reqctx->hctx_wr.dma_addr); ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg, param->sg_len, reqctx->hctx_wr.src_ofst); reqctx->hctx_wr.srcsg = ulp_walk.last_sg; reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len; ulptx_walk_end(&ulp_walk); } } int chcr_hash_dma_map(struct device *dev, struct ahash_request *req) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); int error = 0; if (!req->nbytes) return 0; error = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); if (!error) return -ENOMEM; req_ctx->hctx_wr.is_sg_map = 1; return 0; } void chcr_hash_dma_unmap(struct device *dev, struct ahash_request *req) { struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); if (!req->nbytes) return; dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); req_ctx->hctx_wr.is_sg_map = 0; } int chcr_cipher_dma_map(struct device *dev, struct skcipher_request *req) { int error; if (req->src == req->dst) { error = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); if (!error) goto err; } else { error = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); if (!error) goto err; error = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); if (!error) { dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); goto err; } } return 0; err: return -ENOMEM; } void chcr_cipher_dma_unmap(struct device *dev, struct skcipher_request *req) { if (req->src == req->dst) { dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); } else { dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); } } static int set_msg_len(u8 *block, unsigned int msglen, int csize) { __be32 data; memset(block, 0, csize); block += csize; if (csize >= 4) csize = 4; else if (msglen > (unsigned int)(1 << (8 * csize))) return -EOVERFLOW; data = cpu_to_be32(msglen); memcpy(block - csize, (u8 *)&data + 4 - csize, csize); return 0; } static int generate_b0(struct aead_request *req, u8 *ivptr, unsigned short op_type) { unsigned int l, lp, m; int rc; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); u8 *b0 = reqctx->scratch_pad; m = crypto_aead_authsize(aead); memcpy(b0, ivptr, 16); lp = b0[0]; l = lp + 1; /* set m, bits 3-5 */ *b0 |= (8 * ((m - 2) / 2)); /* set adata, bit 6, if associated data is used */ if (req->assoclen) *b0 |= 64; rc = set_msg_len(b0 + 16 - l, (op_type == CHCR_DECRYPT_OP) ? req->cryptlen - m : req->cryptlen, l); return rc; } static inline int crypto_ccm_check_iv(const u8 *iv) { /* 2 <= L <= 8, so 1 <= L' <= 7. */ if (iv[0] < 1 || iv[0] > 7) return -EINVAL; return 0; } static int ccm_format_packet(struct aead_request *req, u8 *ivptr, unsigned int sub_type, unsigned short op_type, unsigned int assoclen) { struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); int rc = 0; if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { ivptr[0] = 3; memcpy(ivptr + 1, &aeadctx->salt[0], 3); memcpy(ivptr + 4, req->iv, 8); memset(ivptr + 12, 0, 4); } else { memcpy(ivptr, req->iv, 16); } if (assoclen) put_unaligned_be16(assoclen, &reqctx->scratch_pad[16]); rc = generate_b0(req, ivptr, op_type); /* zero the ctr value */ memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1); return rc; } static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl, unsigned int dst_size, struct aead_request *req, unsigned short op_type) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM; unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC; unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; unsigned int ccm_xtra; unsigned int tag_offset = 0, auth_offset = 0; unsigned int assoclen; rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) assoclen = req->assoclen - 8; else assoclen = req->assoclen; ccm_xtra = CCM_B0_SIZE + ((assoclen) ? CCM_AAD_FIELD_SIZE : 0); auth_offset = req->cryptlen ? (req->assoclen + IV + 1 + ccm_xtra) : 0; if (op_type == CHCR_DECRYPT_OP) { if (crypto_aead_authsize(tfm) != req->cryptlen) tag_offset = crypto_aead_authsize(tfm); else auth_offset = 0; } sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(rx_channel_id, 2, 1); sec_cpl->pldlen = htonl(req->assoclen + IV + req->cryptlen + ccm_xtra); /* For CCM there wil be b0 always. So AAD start will be 1 always */ sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( 1 + IV, IV + assoclen + ccm_xtra, req->assoclen + IV + 1 + ccm_xtra, 0); sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, auth_offset, tag_offset, (op_type == CHCR_ENCRYPT_OP) ? 0 : crypto_aead_authsize(tfm)); sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type == CHCR_ENCRYPT_OP) ? 0 : 1, cipher_mode, mac_mode, aeadctx->hmac_ctrl, IV >> 1); sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, 0, dst_size); } static int aead_ccm_validate_input(unsigned short op_type, struct aead_request *req, struct chcr_aead_ctx *aeadctx, unsigned int sub_type) { if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) { if (crypto_ccm_check_iv(req->iv)) { pr_err("CCM: IV check fails\n"); return -EINVAL; } } else { if (req->assoclen != 16 && req->assoclen != 20) { pr_err("RFC4309: Invalid AAD length %d\n", req->assoclen); return -EINVAL; } } return 0; } static struct sk_buff *create_aead_ccm_wr(struct aead_request *req, unsigned short qid, int size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; struct ulptx_sgl *ulptx; unsigned int transhdr_len; unsigned int dst_size = 0, kctx_len, dnents, temp, snents; unsigned int sub_type, assoclen = req->assoclen; unsigned int authsize = crypto_aead_authsize(tfm); int error = -EINVAL; u8 *ivptr; gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; struct adapter *adap = padap(a_ctx(tfm)->dev); sub_type = get_aead_subtype(tfm); if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) assoclen -= 8; reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0); error = chcr_aead_common_init(req); if (error) return ERR_PTR(error); error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type); if (error) goto err; dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen + (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0); dnents += MIN_CCM_SG; // For IV and B0 dst_size = get_space_for_phys_dsgl(dnents); snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen, CHCR_SRC_SG_SIZE, 0); snents += MIN_CCM_SG; //For B0 kctx_len = roundup(aeadctx->enckey_len, 16) * 2; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen + reqctx->b0_len) <= SGE_MAX_WR_LEN; temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen + reqctx->b0_len, 16) : (sgl_len(snents) * 8); transhdr_len += temp; transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE - reqctx->b0_len, transhdr_len, reqctx->op)) { atomic_inc(&adap->chcr_stats.fallback); chcr_aead_common_exit(req); return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); } skb = alloc_skb(transhdr_len, flags); if (!skb) { error = -ENOMEM; goto err; } chcr_req = __skb_put_zero(skb, transhdr_len); fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op); chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), aeadctx->key, aeadctx->enckey_len); phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); ivptr = (u8 *)(phys_cpl + 1) + dst_size; ulptx = (struct ulptx_sgl *)(ivptr + IV); error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen); if (error) goto dstmap_fail; chcr_add_aead_dst_ent(req, phys_cpl, qid); chcr_add_aead_src_ent(req, ulptx); atomic_inc(&adap->chcr_stats.aead_rqst); temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen + reqctx->b0_len) : 0); create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0, transhdr_len, temp, 0); reqctx->skb = skb; return skb; dstmap_fail: kfree_skb(skb); err: chcr_aead_common_exit(req); return ERR_PTR(error); } static struct sk_buff *create_gcm_wr(struct aead_request *req, unsigned short qid, int size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct sk_buff *skb = NULL; struct chcr_wr *chcr_req; struct cpl_rx_phys_dsgl *phys_cpl; struct ulptx_sgl *ulptx; unsigned int transhdr_len, dnents = 0, snents; unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen; unsigned int authsize = crypto_aead_authsize(tfm); int error = -EINVAL; u8 *ivptr; gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; struct adapter *adap = padap(ctx->dev); unsigned int rx_channel_id = reqctx->rxqidx / ctx->rxq_perchan; rx_channel_id = cxgb4_port_e2cchan(u_ctx->lldi.ports[rx_channel_id]); if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) assoclen = req->assoclen - 8; reqctx->b0_len = 0; error = chcr_aead_common_init(req); if (error) return ERR_PTR(error); dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen + (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0); snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen, CHCR_SRC_SG_SIZE, 0); dnents += MIN_GCM_SG; // For IV dst_size = get_space_for_phys_dsgl(dnents); kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size); reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <= SGE_MAX_WR_LEN; temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) : (sgl_len(snents) * 8); transhdr_len += temp; transhdr_len = roundup(transhdr_len, 16); if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE, transhdr_len, reqctx->op)) { atomic_inc(&adap->chcr_stats.fallback); chcr_aead_common_exit(req); return ERR_PTR(chcr_aead_fallback(req, reqctx->op)); } skb = alloc_skb(transhdr_len, flags); if (!skb) { error = -ENOMEM; goto err; } chcr_req = __skb_put_zero(skb, transhdr_len); //Offset of tag from end temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize; chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR( rx_channel_id, 2, 1); chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen); chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI( assoclen ? 1 + IV : 0, assoclen ? IV + assoclen : 0, req->assoclen + IV + 1, 0); chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1, temp, temp); chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0, CHCR_SCMD_CIPHER_MODE_AES_GCM, CHCR_SCMD_AUTH_MODE_GHASH, aeadctx->hmac_ctrl, IV >> 1); chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0, 0, dst_size); chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr; memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len); memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE); phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len); ivptr = (u8 *)(phys_cpl + 1) + dst_size; /* prepare a 16 byte iv */ /* S A L T | IV | 0x00000001 */ if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) { memcpy(ivptr, aeadctx->salt, 4); memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE); } else { memcpy(ivptr, req->iv, GCM_AES_IV_SIZE); } put_unaligned_be32(0x01, &ivptr[12]); ulptx = (struct ulptx_sgl *)(ivptr + 16); chcr_add_aead_dst_ent(req, phys_cpl, qid); chcr_add_aead_src_ent(req, ulptx); atomic_inc(&adap->chcr_stats.aead_rqst); temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV + kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0); create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size, transhdr_len, temp, reqctx->verify); reqctx->skb = skb; return skb; err: chcr_aead_common_exit(req); return ERR_PTR(error); } static int chcr_aead_cra_init(struct crypto_aead *tfm) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); struct aead_alg *alg = crypto_aead_alg(tfm); aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); if (IS_ERR(aeadctx->sw_cipher)) return PTR_ERR(aeadctx->sw_cipher); crypto_aead_set_reqsize_dma( tfm, max(sizeof(struct chcr_aead_reqctx), sizeof(struct aead_request) + crypto_aead_reqsize(aeadctx->sw_cipher))); return chcr_device_init(a_ctx(tfm)); } static void chcr_aead_cra_exit(struct crypto_aead *tfm) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); crypto_free_aead(aeadctx->sw_cipher); } static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP; aeadctx->mayverify = VERIFY_HW; return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); } static int chcr_authenc_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); u32 maxauth = crypto_aead_maxauthsize(tfm); /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not * true for sha1. authsize == 12 condition should be before * authsize == (maxauth >> 1) */ if (authsize == ICV_4) { aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; aeadctx->mayverify = VERIFY_HW; } else if (authsize == ICV_6) { aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; aeadctx->mayverify = VERIFY_HW; } else if (authsize == ICV_10) { aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; aeadctx->mayverify = VERIFY_HW; } else if (authsize == ICV_12) { aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; aeadctx->mayverify = VERIFY_HW; } else if (authsize == ICV_14) { aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; aeadctx->mayverify = VERIFY_HW; } else if (authsize == (maxauth >> 1)) { aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; aeadctx->mayverify = VERIFY_HW; } else if (authsize == maxauth) { aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; aeadctx->mayverify = VERIFY_HW; } else { aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; aeadctx->mayverify = VERIFY_SW; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); } static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); switch (authsize) { case ICV_4: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; aeadctx->mayverify = VERIFY_HW; break; case ICV_8: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; aeadctx->mayverify = VERIFY_HW; break; case ICV_12: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; aeadctx->mayverify = VERIFY_HW; break; case ICV_14: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; aeadctx->mayverify = VERIFY_HW; break; case ICV_16: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; aeadctx->mayverify = VERIFY_HW; break; case ICV_13: case ICV_15: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; aeadctx->mayverify = VERIFY_SW; break; default: return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); } static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); switch (authsize) { case ICV_8: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; aeadctx->mayverify = VERIFY_HW; break; case ICV_12: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; aeadctx->mayverify = VERIFY_HW; break; case ICV_16: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; aeadctx->mayverify = VERIFY_HW; break; default: return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); } static int chcr_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm)); switch (authsize) { case ICV_4: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1; aeadctx->mayverify = VERIFY_HW; break; case ICV_6: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2; aeadctx->mayverify = VERIFY_HW; break; case ICV_8: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2; aeadctx->mayverify = VERIFY_HW; break; case ICV_10: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366; aeadctx->mayverify = VERIFY_HW; break; case ICV_12: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT; aeadctx->mayverify = VERIFY_HW; break; case ICV_14: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3; aeadctx->mayverify = VERIFY_HW; break; case ICV_16: aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC; aeadctx->mayverify = VERIFY_HW; break; default: return -EINVAL; } return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize); } static int chcr_ccm_common_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); unsigned char ck_size, mk_size; int key_ctx_size = 0; key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2; if (keylen == AES_KEYSIZE_128) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; } else if (keylen == AES_KEYSIZE_192) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192; } else if (keylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; } else { aeadctx->enckey_len = 0; return -EINVAL; } aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0, key_ctx_size >> 4); memcpy(aeadctx->key, key, keylen); aeadctx->enckey_len = keylen; return 0; } static int chcr_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); int error; crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); if (error) return error; return chcr_ccm_common_setkey(aead, key, keylen); } static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); int error; if (keylen < 3) { aeadctx->enckey_len = 0; return -EINVAL; } crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); if (error) return error; keylen -= 3; memcpy(aeadctx->salt, key + keylen, 3); return chcr_ccm_common_setkey(aead, key, keylen); } static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead)); struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx); unsigned int ck_size; int ret = 0, key_ctx_size = 0; struct crypto_aes_ctx aes; aeadctx->enckey_len = 0; crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) & CRYPTO_TFM_REQ_MASK); ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); if (ret) goto out; if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && keylen > 3) { keylen -= 4; /* nonce/salt is present in the last 4 bytes */ memcpy(aeadctx->salt, key + keylen, 4); } if (keylen == AES_KEYSIZE_128) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; } else if (keylen == AES_KEYSIZE_192) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; } else if (keylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { pr_err("GCM: Invalid key length %d\n", keylen); ret = -EINVAL; goto out; } memcpy(aeadctx->key, key, keylen); aeadctx->enckey_len = keylen; key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) + AEAD_H_SIZE; aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_MAC_KEY_SIZE_128, 0, 0, key_ctx_size >> 4); /* Calculate the H = CIPH(K, 0 repeated 16 times). * It will go in key context */ ret = aes_expandkey(&aes, key, keylen); if (ret) { aeadctx->enckey_len = 0; goto out; } memset(gctx->ghash_h, 0, AEAD_H_SIZE); aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h); memzero_explicit(&aes, sizeof(aes)); out: return ret; } static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key, unsigned int keylen) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc)); struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); /* it contains auth and cipher key both*/ struct crypto_authenc_keys keys; unsigned int bs, subtype; unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize; int err = 0, i, key_ctx_len = 0; unsigned char ck_size = 0; unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 }; struct crypto_shash *base_hash = ERR_PTR(-EINVAL); struct algo_param param; int align; u8 *o_ptr = NULL; crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); if (err) goto out; if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto out; if (get_alg_config(&param, max_authsize)) { pr_err("Unsupported digest size\n"); goto out; } subtype = get_aead_subtype(authenc); if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) goto out; memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; } if (keys.enckeylen == AES_KEYSIZE_128) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; } else if (keys.enckeylen == AES_KEYSIZE_192) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; } else if (keys.enckeylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { pr_err("Unsupported cipher key\n"); goto out; } /* Copy only encryption key. We use authkey to generate h(ipad) and * h(opad) so authkey is not needed again. authkeylen size have the * size of the hash digest size. */ memcpy(aeadctx->key, keys.enckey, keys.enckeylen); aeadctx->enckey_len = keys.enckeylen; if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, aeadctx->enckey_len << 3); } base_hash = chcr_alloc_shash(max_authsize); if (IS_ERR(base_hash)) { pr_err("Base driver cannot be loaded\n"); goto out; } { SHASH_DESC_ON_STACK(shash, base_hash); shash->tfm = base_hash; bs = crypto_shash_blocksize(base_hash); align = KEYCTX_ALIGN_PAD(max_authsize); o_ptr = actx->h_iopad + param.result_size + align; if (keys.authkeylen > bs) { err = crypto_shash_digest(shash, keys.authkey, keys.authkeylen, o_ptr); if (err) { pr_err("Base driver cannot be loaded\n"); goto out; } keys.authkeylen = max_authsize; } else memcpy(o_ptr, keys.authkey, keys.authkeylen); /* Compute the ipad-digest*/ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); memcpy(pad, o_ptr, keys.authkeylen); for (i = 0; i < bs >> 2; i++) *((unsigned int *)pad + i) ^= IPAD_DATA; if (chcr_compute_partial_hash(shash, pad, actx->h_iopad, max_authsize)) goto out; /* Compute the opad-digest */ memset(pad + keys.authkeylen, 0, bs - keys.authkeylen); memcpy(pad, o_ptr, keys.authkeylen); for (i = 0; i < bs >> 2; i++) *((unsigned int *)pad + i) ^= OPAD_DATA; if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize)) goto out; /* convert the ipad and opad digest to network order */ chcr_change_order(actx->h_iopad, param.result_size); chcr_change_order(o_ptr, param.result_size); key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16) + (param.result_size + align) * 2; aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size, 0, 1, key_ctx_len >> 4); actx->auth_mode = param.auth_mode; chcr_free_shash(base_hash); memzero_explicit(&keys, sizeof(keys)); return 0; } out: aeadctx->enckey_len = 0; memzero_explicit(&keys, sizeof(keys)); if (!IS_ERR(base_hash)) chcr_free_shash(base_hash); return -EINVAL; } static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc, const u8 *key, unsigned int keylen) { struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc)); struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx); struct crypto_authenc_keys keys; int err; /* it contains auth and cipher key both*/ unsigned int subtype; int key_ctx_len = 0; unsigned char ck_size = 0; crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc) & CRYPTO_TFM_REQ_MASK); err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen); if (err) goto out; if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto out; subtype = get_aead_subtype(authenc); if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) { if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE) goto out; memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); keys.enckeylen -= CTR_RFC3686_NONCE_SIZE; } if (keys.enckeylen == AES_KEYSIZE_128) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; } else if (keys.enckeylen == AES_KEYSIZE_192) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; } else if (keys.enckeylen == AES_KEYSIZE_256) { ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; } else { pr_err("Unsupported cipher key %d\n", keys.enckeylen); goto out; } memcpy(aeadctx->key, keys.enckey, keys.enckeylen); aeadctx->enckey_len = keys.enckeylen; if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA || subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) { get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key, aeadctx->enckey_len << 3); } key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16); aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0, 0, key_ctx_len >> 4); actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP; memzero_explicit(&keys, sizeof(keys)); return 0; out: aeadctx->enckey_len = 0; memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } static int chcr_aead_op(struct aead_request *req, int size, create_wr_t create_wr_fn) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct chcr_context *ctx = a_ctx(tfm); struct uld_ctx *u_ctx = ULD_CTX(ctx); struct sk_buff *skb; struct chcr_dev *cdev; cdev = a_ctx(tfm)->dev; if (!cdev) { pr_err("%s : No crypto device.\n", __func__); return -ENXIO; } if (chcr_inc_wrcount(cdev)) { /* Detach state for CHCR means lldi or padap is freed. * We cannot increment fallback here. */ return chcr_aead_fallback(req, reqctx->op); } if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], reqctx->txqidx) && (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))) { chcr_dec_wrcount(cdev); return -ENOSPC; } if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 && crypto_ipsec_check_assoclen(req->assoclen) != 0) { pr_err("RFC4106: Invalid value of assoclen %d\n", req->assoclen); return -EINVAL; } /* Form a WR from req */ skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[reqctx->rxqidx], size); if (IS_ERR_OR_NULL(skb)) { chcr_dec_wrcount(cdev); return PTR_ERR_OR_ZERO(skb); } skb->dev = u_ctx->lldi.ports[0]; set_wr_txq(skb, CPL_PRIORITY_DATA, reqctx->txqidx); chcr_send_wr(skb); return -EINPROGRESS; } static int chcr_aead_encrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); struct chcr_context *ctx = a_ctx(tfm); unsigned int cpu; cpu = get_cpu(); reqctx->txqidx = cpu % ctx->ntxq; reqctx->rxqidx = cpu % ctx->nrxq; put_cpu(); reqctx->verify = VERIFY_HW; reqctx->op = CHCR_ENCRYPT_OP; switch (get_aead_subtype(tfm)) { case CRYPTO_ALG_SUB_TYPE_CTR_SHA: case CRYPTO_ALG_SUB_TYPE_CBC_SHA: case CRYPTO_ALG_SUB_TYPE_CBC_NULL: case CRYPTO_ALG_SUB_TYPE_CTR_NULL: return chcr_aead_op(req, 0, create_authenc_wr); case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: return chcr_aead_op(req, 0, create_aead_ccm_wr); default: return chcr_aead_op(req, 0, create_gcm_wr); } } static int chcr_aead_decrypt(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct chcr_context *ctx = a_ctx(tfm); struct chcr_aead_ctx *aeadctx = AEAD_CTX(ctx); struct chcr_aead_reqctx *reqctx = aead_request_ctx_dma(req); int size; unsigned int cpu; cpu = get_cpu(); reqctx->txqidx = cpu % ctx->ntxq; reqctx->rxqidx = cpu % ctx->nrxq; put_cpu(); if (aeadctx->mayverify == VERIFY_SW) { size = crypto_aead_maxauthsize(tfm); reqctx->verify = VERIFY_SW; } else { size = 0; reqctx->verify = VERIFY_HW; } reqctx->op = CHCR_DECRYPT_OP; switch (get_aead_subtype(tfm)) { case CRYPTO_ALG_SUB_TYPE_CBC_SHA: case CRYPTO_ALG_SUB_TYPE_CTR_SHA: case CRYPTO_ALG_SUB_TYPE_CBC_NULL: case CRYPTO_ALG_SUB_TYPE_CTR_NULL: return chcr_aead_op(req, size, create_authenc_wr); case CRYPTO_ALG_SUB_TYPE_AEAD_CCM: case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309: return chcr_aead_op(req, size, create_aead_ccm_wr); default: return chcr_aead_op(req, size, create_gcm_wr); } } static struct chcr_alg_template driver_algs[] = { /* AES-CBC */ { .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC, .is_registered = 0, .alg.skcipher = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-chcr", .base.cra_blocksize = AES_BLOCK_SIZE, .init = chcr_init_tfm, .exit = chcr_exit_tfm, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = chcr_aes_cbc_setkey, .encrypt = chcr_aes_encrypt, .decrypt = chcr_aes_decrypt, } }, { .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS, .is_registered = 0, .alg.skcipher = { .base.cra_name = "xts(aes)", .base.cra_driver_name = "xts-aes-chcr", .base.cra_blocksize = AES_BLOCK_SIZE, .init = chcr_init_tfm, .exit = chcr_exit_tfm, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = chcr_aes_xts_setkey, .encrypt = chcr_aes_encrypt, .decrypt = chcr_aes_decrypt, } }, { .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR, .is_registered = 0, .alg.skcipher = { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "ctr-aes-chcr", .base.cra_blocksize = 1, .init = chcr_init_tfm, .exit = chcr_exit_tfm, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = chcr_aes_ctr_setkey, .encrypt = chcr_aes_encrypt, .decrypt = chcr_aes_decrypt, } }, { .type = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR_RFC3686, .is_registered = 0, .alg.skcipher = { .base.cra_name = "rfc3686(ctr(aes))", .base.cra_driver_name = "rfc3686-ctr-aes-chcr", .base.cra_blocksize = 1, .init = chcr_rfc3686_init, .exit = chcr_exit_tfm, .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .ivsize = CTR_RFC3686_IV_SIZE, .setkey = chcr_aes_rfc3686_setkey, .encrypt = chcr_aes_encrypt, .decrypt = chcr_aes_decrypt, } }, /* SHA */ { .type = CRYPTO_ALG_TYPE_AHASH, .is_registered = 0, .alg.hash = { .halg.digestsize = SHA1_DIGEST_SIZE, .halg.base = { .cra_name = "sha1", .cra_driver_name = "sha1-chcr", .cra_blocksize = SHA1_BLOCK_SIZE, } } }, { .type = CRYPTO_ALG_TYPE_AHASH, .is_registered = 0, .alg.hash = { .halg.digestsize = SHA256_DIGEST_SIZE, .halg.base = { .cra_name = "sha256", .cra_driver_name = "sha256-chcr", .cra_blocksize = SHA256_BLOCK_SIZE, } } }, { .type = CRYPTO_ALG_TYPE_AHASH, .is_registered = 0, .alg.hash = { .halg.digestsize = SHA224_DIGEST_SIZE, .halg.base = { .cra_name = "sha224", .cra_driver_name = "sha224-chcr", .cra_blocksize = SHA224_BLOCK_SIZE, } } }, { .type = CRYPTO_ALG_TYPE_AHASH, .is_registered = 0, .alg.hash = { .halg.digestsize = SHA384_DIGEST_SIZE, .halg.base = { .cra_name = "sha384", .cra_driver_name = "sha384-chcr", .cra_blocksize = SHA384_BLOCK_SIZE, } } }, { .type = CRYPTO_ALG_TYPE_AHASH, .is_registered = 0, .alg.hash = { .halg.digestsize = SHA512_DIGEST_SIZE, .halg.base = { .cra_name = "sha512", .cra_driver_name = "sha512-chcr", .cra_blocksize = SHA512_BLOCK_SIZE, } } }, /* HMAC */ { .type = CRYPTO_ALG_TYPE_HMAC, .is_registered = 0, .alg.hash = { .halg.digestsize = SHA1_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha1)", .cra_driver_name = "hmac-sha1-chcr", .cra_blocksize = SHA1_BLOCK_SIZE, } } }, { .type = CRYPTO_ALG_TYPE_HMAC, .is_registered = 0, .alg.hash = { .halg.digestsize = SHA224_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha224)", .cra_driver_name = "hmac-sha224-chcr", .cra_blocksize = SHA224_BLOCK_SIZE, } } }, { .type = CRYPTO_ALG_TYPE_HMAC, .is_registered = 0, .alg.hash = { .halg.digestsize = SHA256_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha256)", .cra_driver_name = "hmac-sha256-chcr", .cra_blocksize = SHA256_BLOCK_SIZE, } } }, { .type = CRYPTO_ALG_TYPE_HMAC, .is_registered = 0, .alg.hash = { .halg.digestsize = SHA384_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha384)", .cra_driver_name = "hmac-sha384-chcr", .cra_blocksize = SHA384_BLOCK_SIZE, } } }, { .type = CRYPTO_ALG_TYPE_HMAC, .is_registered = 0, .alg.hash = { .halg.digestsize = SHA512_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha512)", .cra_driver_name = "hmac-sha512-chcr", .cra_blocksize = SHA512_BLOCK_SIZE, } } }, /* Add AEAD Algorithms */ { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM, .is_registered = 0, .alg.aead = { .base = { .cra_name = "gcm(aes)", .cra_driver_name = "gcm-aes-chcr", .cra_blocksize = 1, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_gcm_ctx), }, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = GHASH_DIGEST_SIZE, .setkey = chcr_gcm_setkey, .setauthsize = chcr_gcm_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106, .is_registered = 0, .alg.aead = { .base = { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "rfc4106-gcm-aes-chcr", .cra_blocksize = 1, .cra_priority = CHCR_AEAD_PRIORITY + 1, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_gcm_ctx), }, .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = GHASH_DIGEST_SIZE, .setkey = chcr_gcm_setkey, .setauthsize = chcr_4106_4309_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM, .is_registered = 0, .alg.aead = { .base = { .cra_name = "ccm(aes)", .cra_driver_name = "ccm-aes-chcr", .cra_blocksize = 1, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx), }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = GHASH_DIGEST_SIZE, .setkey = chcr_aead_ccm_setkey, .setauthsize = chcr_ccm_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309, .is_registered = 0, .alg.aead = { .base = { .cra_name = "rfc4309(ccm(aes))", .cra_driver_name = "rfc4309-ccm-aes-chcr", .cra_blocksize = 1, .cra_priority = CHCR_AEAD_PRIORITY + 1, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx), }, .ivsize = 8, .maxauthsize = GHASH_DIGEST_SIZE, .setkey = chcr_aead_rfc4309_setkey, .setauthsize = chcr_4106_4309_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, .is_registered = 0, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-cbc-aes-chcr", .cra_blocksize = AES_BLOCK_SIZE, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_authenc_ctx), }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, .setkey = chcr_authenc_setkey, .setauthsize = chcr_authenc_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, .is_registered = 0, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-cbc-aes-chcr", .cra_blocksize = AES_BLOCK_SIZE, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_authenc_ctx), }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, .setkey = chcr_authenc_setkey, .setauthsize = chcr_authenc_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, .is_registered = 0, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha224),cbc(aes))", .cra_driver_name = "authenc-hmac-sha224-cbc-aes-chcr", .cra_blocksize = AES_BLOCK_SIZE, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_authenc_ctx), }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, .setkey = chcr_authenc_setkey, .setauthsize = chcr_authenc_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, .is_registered = 0, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha384),cbc(aes))", .cra_driver_name = "authenc-hmac-sha384-cbc-aes-chcr", .cra_blocksize = AES_BLOCK_SIZE, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_authenc_ctx), }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, .setkey = chcr_authenc_setkey, .setauthsize = chcr_authenc_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA, .is_registered = 0, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "authenc-hmac-sha512-cbc-aes-chcr", .cra_blocksize = AES_BLOCK_SIZE, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_authenc_ctx), }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, .setkey = chcr_authenc_setkey, .setauthsize = chcr_authenc_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL, .is_registered = 0, .alg.aead = { .base = { .cra_name = "authenc(digest_null,cbc(aes))", .cra_driver_name = "authenc-digest_null-cbc-aes-chcr", .cra_blocksize = AES_BLOCK_SIZE, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_authenc_ctx), }, .ivsize = AES_BLOCK_SIZE, .maxauthsize = 0, .setkey = chcr_aead_digest_null_setkey, .setauthsize = chcr_authenc_null_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, .is_registered = 0, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-chcr", .cra_blocksize = 1, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_authenc_ctx), }, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, .setkey = chcr_authenc_setkey, .setauthsize = chcr_authenc_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, .is_registered = 0, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-chcr", .cra_blocksize = 1, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_authenc_ctx), }, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, .setkey = chcr_authenc_setkey, .setauthsize = chcr_authenc_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, .is_registered = 0, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha224-rfc3686-ctr-aes-chcr", .cra_blocksize = 1, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_authenc_ctx), }, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, .setkey = chcr_authenc_setkey, .setauthsize = chcr_authenc_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, .is_registered = 0, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha384-rfc3686-ctr-aes-chcr", .cra_blocksize = 1, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_authenc_ctx), }, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, .setkey = chcr_authenc_setkey, .setauthsize = chcr_authenc_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA, .is_registered = 0, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha512-rfc3686-ctr-aes-chcr", .cra_blocksize = 1, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_authenc_ctx), }, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, .setkey = chcr_authenc_setkey, .setauthsize = chcr_authenc_setauthsize, } }, { .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL, .is_registered = 0, .alg.aead = { .base = { .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))", .cra_driver_name = "authenc-digest_null-rfc3686-ctr-aes-chcr", .cra_blocksize = 1, .cra_priority = CHCR_AEAD_PRIORITY, .cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct chcr_aead_ctx) + sizeof(struct chcr_authenc_ctx), }, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = 0, .setkey = chcr_aead_digest_null_setkey, .setauthsize = chcr_authenc_null_setauthsize, } }, }; /* * chcr_unregister_alg - Deregister crypto algorithms with * kernel framework. */ static int chcr_unregister_alg(void) { int i; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_SKCIPHER: if (driver_algs[i].is_registered && refcount_read( &driver_algs[i].alg.skcipher.base.cra_refcnt) == 1) { crypto_unregister_skcipher( &driver_algs[i].alg.skcipher); driver_algs[i].is_registered = 0; } break; case CRYPTO_ALG_TYPE_AEAD: if (driver_algs[i].is_registered && refcount_read( &driver_algs[i].alg.aead.base.cra_refcnt) == 1) { crypto_unregister_aead( &driver_algs[i].alg.aead); driver_algs[i].is_registered = 0; } break; case CRYPTO_ALG_TYPE_AHASH: if (driver_algs[i].is_registered && refcount_read( &driver_algs[i].alg.hash.halg.base.cra_refcnt) == 1) { crypto_unregister_ahash( &driver_algs[i].alg.hash); driver_algs[i].is_registered = 0; } break; } } return 0; } #define SZ_AHASH_CTX sizeof(struct chcr_context) #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx)) #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx) /* * chcr_register_alg - Register crypto algorithms with kernel framework. */ static int chcr_register_alg(void) { struct crypto_alg ai; struct ahash_alg *a_hash; int err = 0, i; char *name = NULL; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { if (driver_algs[i].is_registered) continue; switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_SKCIPHER: driver_algs[i].alg.skcipher.base.cra_priority = CHCR_CRA_PRIORITY; driver_algs[i].alg.skcipher.base.cra_module = THIS_MODULE; driver_algs[i].alg.skcipher.base.cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK; driver_algs[i].alg.skcipher.base.cra_ctxsize = sizeof(struct chcr_context) + sizeof(struct ablk_ctx); driver_algs[i].alg.skcipher.base.cra_alignmask = 0; err = crypto_register_skcipher(&driver_algs[i].alg.skcipher); name = driver_algs[i].alg.skcipher.base.cra_driver_name; break; case CRYPTO_ALG_TYPE_AEAD: driver_algs[i].alg.aead.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ALLOCATES_MEMORY; driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt; driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt; driver_algs[i].alg.aead.init = chcr_aead_cra_init; driver_algs[i].alg.aead.exit = chcr_aead_cra_exit; driver_algs[i].alg.aead.base.cra_module = THIS_MODULE; err = crypto_register_aead(&driver_algs[i].alg.aead); name = driver_algs[i].alg.aead.base.cra_driver_name; break; case CRYPTO_ALG_TYPE_AHASH: a_hash = &driver_algs[i].alg.hash; a_hash->update = chcr_ahash_update; a_hash->final = chcr_ahash_final; a_hash->finup = chcr_ahash_finup; a_hash->digest = chcr_ahash_digest; a_hash->export = chcr_ahash_export; a_hash->import = chcr_ahash_import; a_hash->halg.statesize = SZ_AHASH_REQ_CTX; a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; a_hash->halg.base.cra_module = THIS_MODULE; a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; a_hash->halg.base.cra_alignmask = 0; a_hash->halg.base.cra_exit = NULL; if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) { a_hash->halg.base.cra_init = chcr_hmac_cra_init; a_hash->halg.base.cra_exit = chcr_hmac_cra_exit; a_hash->init = chcr_hmac_init; a_hash->setkey = chcr_ahash_setkey; a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX; } else { a_hash->init = chcr_sha_init; a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX; a_hash->halg.base.cra_init = chcr_sha_cra_init; } err = crypto_register_ahash(&driver_algs[i].alg.hash); ai = driver_algs[i].alg.hash.halg.base; name = ai.cra_driver_name; break; } if (err) { pr_err("%s : Algorithm registration failed\n", name); goto register_err; } else { driver_algs[i].is_registered = 1; } } return 0; register_err: chcr_unregister_alg(); return err; } /* * start_crypto - Register the crypto algorithms. * This should called once when the first device comesup. After this * kernel will start calling driver APIs for crypto operations. */ int start_crypto(void) { return chcr_register_alg(); } /* * stop_crypto - Deregister all the crypto algorithms with kernel. * This should be called once when the last device goes down. After this * kernel will not call the driver API for crypto operations. */ int stop_crypto(void) { chcr_unregister_alg(); return 0; }
linux-master
drivers/crypto/chelsio/chcr_algo.c
// SPDX-License-Identifier: GPL-2.0-only /* * This file is part of STM32 Crypto driver for Linux. * * Copyright (C) 2017, STMicroelectronics - All Rights Reserved * Author(s): Lionel DEBIEVE <[email protected]> for STMicroelectronics. */ #include <crypto/engine.h> #include <crypto/internal/hash.h> #include <crypto/md5.h> #include <crypto/scatterwalk.h> #include <crypto/sha1.h> #include <crypto/sha2.h> #include <crypto/sha3.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/string.h> #define HASH_CR 0x00 #define HASH_DIN 0x04 #define HASH_STR 0x08 #define HASH_UX500_HREG(x) (0x0c + ((x) * 0x04)) #define HASH_IMR 0x20 #define HASH_SR 0x24 #define HASH_CSR(x) (0x0F8 + ((x) * 0x04)) #define HASH_HREG(x) (0x310 + ((x) * 0x04)) #define HASH_HWCFGR 0x3F0 #define HASH_VER 0x3F4 #define HASH_ID 0x3F8 /* Control Register */ #define HASH_CR_INIT BIT(2) #define HASH_CR_DMAE BIT(3) #define HASH_CR_DATATYPE_POS 4 #define HASH_CR_MODE BIT(6) #define HASH_CR_ALGO_POS 7 #define HASH_CR_MDMAT BIT(13) #define HASH_CR_DMAA BIT(14) #define HASH_CR_LKEY BIT(16) /* Interrupt */ #define HASH_DINIE BIT(0) #define HASH_DCIE BIT(1) /* Interrupt Mask */ #define HASH_MASK_CALC_COMPLETION BIT(0) #define HASH_MASK_DATA_INPUT BIT(1) /* Status Flags */ #define HASH_SR_DATA_INPUT_READY BIT(0) #define HASH_SR_OUTPUT_READY BIT(1) #define HASH_SR_DMA_ACTIVE BIT(2) #define HASH_SR_BUSY BIT(3) /* STR Register */ #define HASH_STR_NBLW_MASK GENMASK(4, 0) #define HASH_STR_DCAL BIT(8) /* HWCFGR Register */ #define HASH_HWCFG_DMA_MASK GENMASK(3, 0) /* Context swap register */ #define HASH_CSR_NB_SHA256_HMAC 54 #define HASH_CSR_NB_SHA256 38 #define HASH_CSR_NB_SHA512_HMAC 103 #define HASH_CSR_NB_SHA512 91 #define HASH_CSR_NB_SHA3_HMAC 88 #define HASH_CSR_NB_SHA3 72 #define HASH_CSR_NB_MAX HASH_CSR_NB_SHA512_HMAC #define HASH_FLAGS_INIT BIT(0) #define HASH_FLAGS_OUTPUT_READY BIT(1) #define HASH_FLAGS_CPU BIT(2) #define HASH_FLAGS_DMA_ACTIVE BIT(3) #define HASH_FLAGS_HMAC_INIT BIT(4) #define HASH_FLAGS_HMAC_FINAL BIT(5) #define HASH_FLAGS_HMAC_KEY BIT(6) #define HASH_FLAGS_SHA3_MODE BIT(7) #define HASH_FLAGS_FINAL BIT(15) #define HASH_FLAGS_FINUP BIT(16) #define HASH_FLAGS_ALGO_MASK GENMASK(20, 17) #define HASH_FLAGS_ALGO_SHIFT 17 #define HASH_FLAGS_ERRORS BIT(21) #define HASH_FLAGS_EMPTY BIT(22) #define HASH_FLAGS_HMAC BIT(23) #define HASH_OP_UPDATE 1 #define HASH_OP_FINAL 2 #define HASH_BURST_LEVEL 4 enum stm32_hash_data_format { HASH_DATA_32_BITS = 0x0, HASH_DATA_16_BITS = 0x1, HASH_DATA_8_BITS = 0x2, HASH_DATA_1_BIT = 0x3 }; #define HASH_BUFLEN (SHA3_224_BLOCK_SIZE + 4) #define HASH_MAX_KEY_SIZE (SHA512_BLOCK_SIZE * 8) enum stm32_hash_algo { HASH_SHA1 = 0, HASH_MD5 = 1, HASH_SHA224 = 2, HASH_SHA256 = 3, HASH_SHA3_224 = 4, HASH_SHA3_256 = 5, HASH_SHA3_384 = 6, HASH_SHA3_512 = 7, HASH_SHA384 = 12, HASH_SHA512 = 15, }; enum ux500_hash_algo { HASH_SHA256_UX500 = 0, HASH_SHA1_UX500 = 1, }; #define HASH_AUTOSUSPEND_DELAY 50 struct stm32_hash_ctx { struct stm32_hash_dev *hdev; struct crypto_shash *xtfm; unsigned long flags; u8 key[HASH_MAX_KEY_SIZE]; int keylen; }; struct stm32_hash_state { u32 flags; u16 bufcnt; u16 blocklen; u8 buffer[HASH_BUFLEN] __aligned(4); /* hash state */ u32 hw_context[3 + HASH_CSR_NB_MAX]; }; struct stm32_hash_request_ctx { struct stm32_hash_dev *hdev; unsigned long op; u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32)); size_t digcnt; /* DMA */ struct scatterlist *sg; unsigned int offset; unsigned int total; struct scatterlist sg_key; dma_addr_t dma_addr; size_t dma_ct; int nents; u8 data_type; struct stm32_hash_state state; }; struct stm32_hash_algs_info { struct ahash_engine_alg *algs_list; size_t size; }; struct stm32_hash_pdata { const int alg_shift; const struct stm32_hash_algs_info *algs_info; size_t algs_info_size; bool has_sr; bool has_mdmat; bool broken_emptymsg; bool ux500; }; struct stm32_hash_dev { struct list_head list; struct device *dev; struct clk *clk; struct reset_control *rst; void __iomem *io_base; phys_addr_t phys_base; u32 dma_mode; bool polled; struct ahash_request *req; struct crypto_engine *engine; unsigned long flags; struct dma_chan *dma_lch; struct completion dma_completion; const struct stm32_hash_pdata *pdata; }; struct stm32_hash_drv { struct list_head dev_list; spinlock_t lock; /* List protection access */ }; static struct stm32_hash_drv stm32_hash = { .dev_list = LIST_HEAD_INIT(stm32_hash.dev_list), .lock = __SPIN_LOCK_UNLOCKED(stm32_hash.lock), }; static void stm32_hash_dma_callback(void *param); static inline u32 stm32_hash_read(struct stm32_hash_dev *hdev, u32 offset) { return readl_relaxed(hdev->io_base + offset); } static inline void stm32_hash_write(struct stm32_hash_dev *hdev, u32 offset, u32 value) { writel_relaxed(value, hdev->io_base + offset); } static inline int stm32_hash_wait_busy(struct stm32_hash_dev *hdev) { u32 status; /* The Ux500 lacks the special status register, we poll the DCAL bit instead */ if (!hdev->pdata->has_sr) return readl_relaxed_poll_timeout(hdev->io_base + HASH_STR, status, !(status & HASH_STR_DCAL), 10, 10000); return readl_relaxed_poll_timeout(hdev->io_base + HASH_SR, status, !(status & HASH_SR_BUSY), 10, 10000); } static void stm32_hash_set_nblw(struct stm32_hash_dev *hdev, int length) { u32 reg; reg = stm32_hash_read(hdev, HASH_STR); reg &= ~(HASH_STR_NBLW_MASK); reg |= (8U * ((length) % 4U)); stm32_hash_write(hdev, HASH_STR, reg); } static int stm32_hash_write_key(struct stm32_hash_dev *hdev) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); u32 reg; int keylen = ctx->keylen; void *key = ctx->key; if (keylen) { stm32_hash_set_nblw(hdev, keylen); while (keylen > 0) { stm32_hash_write(hdev, HASH_DIN, *(u32 *)key); keylen -= 4; key += 4; } reg = stm32_hash_read(hdev, HASH_STR); reg |= HASH_STR_DCAL; stm32_hash_write(hdev, HASH_STR, reg); return -EINPROGRESS; } return 0; } static void stm32_hash_write_ctrl(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); struct stm32_hash_state *state = &rctx->state; u32 alg = (state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT; u32 reg = HASH_CR_INIT; if (!(hdev->flags & HASH_FLAGS_INIT)) { if (hdev->pdata->ux500) { reg |= ((alg & BIT(0)) << HASH_CR_ALGO_POS); } else { if (hdev->pdata->alg_shift == HASH_CR_ALGO_POS) reg |= ((alg & BIT(1)) << 17) | ((alg & BIT(0)) << HASH_CR_ALGO_POS); else reg |= alg << hdev->pdata->alg_shift; } reg |= (rctx->data_type << HASH_CR_DATATYPE_POS); if (state->flags & HASH_FLAGS_HMAC) { hdev->flags |= HASH_FLAGS_HMAC; reg |= HASH_CR_MODE; if (ctx->keylen > crypto_ahash_blocksize(tfm)) reg |= HASH_CR_LKEY; } if (!hdev->polled) stm32_hash_write(hdev, HASH_IMR, HASH_DCIE); stm32_hash_write(hdev, HASH_CR, reg); hdev->flags |= HASH_FLAGS_INIT; /* * After first block + 1 words are fill up, * we only need to fill 1 block to start partial computation */ rctx->state.blocklen -= sizeof(u32); dev_dbg(hdev->dev, "Write Control %x\n", reg); } } static void stm32_hash_append_sg(struct stm32_hash_request_ctx *rctx) { struct stm32_hash_state *state = &rctx->state; size_t count; while ((state->bufcnt < state->blocklen) && rctx->total) { count = min(rctx->sg->length - rctx->offset, rctx->total); count = min_t(size_t, count, state->blocklen - state->bufcnt); if (count <= 0) { if ((rctx->sg->length == 0) && !sg_is_last(rctx->sg)) { rctx->sg = sg_next(rctx->sg); continue; } else { break; } } scatterwalk_map_and_copy(state->buffer + state->bufcnt, rctx->sg, rctx->offset, count, 0); state->bufcnt += count; rctx->offset += count; rctx->total -= count; if (rctx->offset == rctx->sg->length) { rctx->sg = sg_next(rctx->sg); if (rctx->sg) rctx->offset = 0; else rctx->total = 0; } } } static int stm32_hash_xmit_cpu(struct stm32_hash_dev *hdev, const u8 *buf, size_t length, int final) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct stm32_hash_state *state = &rctx->state; unsigned int count, len32; const u32 *buffer = (const u32 *)buf; u32 reg; if (final) { hdev->flags |= HASH_FLAGS_FINAL; /* Do not process empty messages if hw is buggy. */ if (!(hdev->flags & HASH_FLAGS_INIT) && !length && hdev->pdata->broken_emptymsg) { state->flags |= HASH_FLAGS_EMPTY; return 0; } } len32 = DIV_ROUND_UP(length, sizeof(u32)); dev_dbg(hdev->dev, "%s: length: %zd, final: %x len32 %i\n", __func__, length, final, len32); hdev->flags |= HASH_FLAGS_CPU; stm32_hash_write_ctrl(hdev); if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; if ((hdev->flags & HASH_FLAGS_HMAC) && (!(hdev->flags & HASH_FLAGS_HMAC_KEY))) { hdev->flags |= HASH_FLAGS_HMAC_KEY; stm32_hash_write_key(hdev); if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; } for (count = 0; count < len32; count++) stm32_hash_write(hdev, HASH_DIN, buffer[count]); if (final) { if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; stm32_hash_set_nblw(hdev, length); reg = stm32_hash_read(hdev, HASH_STR); reg |= HASH_STR_DCAL; stm32_hash_write(hdev, HASH_STR, reg); if (hdev->flags & HASH_FLAGS_HMAC) { if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; stm32_hash_write_key(hdev); } return -EINPROGRESS; } return 0; } static int hash_swap_reg(struct stm32_hash_request_ctx *rctx) { struct stm32_hash_state *state = &rctx->state; switch ((state->flags & HASH_FLAGS_ALGO_MASK) >> HASH_FLAGS_ALGO_SHIFT) { case HASH_MD5: case HASH_SHA1: case HASH_SHA224: case HASH_SHA256: if (state->flags & HASH_FLAGS_HMAC) return HASH_CSR_NB_SHA256_HMAC; else return HASH_CSR_NB_SHA256; break; case HASH_SHA384: case HASH_SHA512: if (state->flags & HASH_FLAGS_HMAC) return HASH_CSR_NB_SHA512_HMAC; else return HASH_CSR_NB_SHA512; break; case HASH_SHA3_224: case HASH_SHA3_256: case HASH_SHA3_384: case HASH_SHA3_512: if (state->flags & HASH_FLAGS_HMAC) return HASH_CSR_NB_SHA3_HMAC; else return HASH_CSR_NB_SHA3; break; default: return -EINVAL; } } static int stm32_hash_update_cpu(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct stm32_hash_state *state = &rctx->state; u32 *preg = state->hw_context; int bufcnt, err = 0, final; int i, swap_reg; dev_dbg(hdev->dev, "%s flags %x\n", __func__, state->flags); final = state->flags & HASH_FLAGS_FINAL; while ((rctx->total >= state->blocklen) || (state->bufcnt + rctx->total >= state->blocklen)) { stm32_hash_append_sg(rctx); bufcnt = state->bufcnt; state->bufcnt = 0; err = stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 0); if (err) return err; } stm32_hash_append_sg(rctx); if (final) { bufcnt = state->bufcnt; state->bufcnt = 0; return stm32_hash_xmit_cpu(hdev, state->buffer, bufcnt, 1); } if (!(hdev->flags & HASH_FLAGS_INIT)) return 0; if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; swap_reg = hash_swap_reg(rctx); if (!hdev->pdata->ux500) *preg++ = stm32_hash_read(hdev, HASH_IMR); *preg++ = stm32_hash_read(hdev, HASH_STR); *preg++ = stm32_hash_read(hdev, HASH_CR); for (i = 0; i < swap_reg; i++) *preg++ = stm32_hash_read(hdev, HASH_CSR(i)); state->flags |= HASH_FLAGS_INIT; return err; } static int stm32_hash_xmit_dma(struct stm32_hash_dev *hdev, struct scatterlist *sg, int length, int mdma) { struct dma_async_tx_descriptor *in_desc; dma_cookie_t cookie; u32 reg; int err; in_desc = dmaengine_prep_slave_sg(hdev->dma_lch, sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!in_desc) { dev_err(hdev->dev, "dmaengine_prep_slave error\n"); return -ENOMEM; } reinit_completion(&hdev->dma_completion); in_desc->callback = stm32_hash_dma_callback; in_desc->callback_param = hdev; hdev->flags |= HASH_FLAGS_FINAL; hdev->flags |= HASH_FLAGS_DMA_ACTIVE; reg = stm32_hash_read(hdev, HASH_CR); if (hdev->pdata->has_mdmat) { if (mdma) reg |= HASH_CR_MDMAT; else reg &= ~HASH_CR_MDMAT; } reg |= HASH_CR_DMAE; stm32_hash_write(hdev, HASH_CR, reg); stm32_hash_set_nblw(hdev, length); cookie = dmaengine_submit(in_desc); err = dma_submit_error(cookie); if (err) return -ENOMEM; dma_async_issue_pending(hdev->dma_lch); if (!wait_for_completion_timeout(&hdev->dma_completion, msecs_to_jiffies(100))) err = -ETIMEDOUT; if (dma_async_is_tx_complete(hdev->dma_lch, cookie, NULL, NULL) != DMA_COMPLETE) err = -ETIMEDOUT; if (err) { dev_err(hdev->dev, "DMA Error %i\n", err); dmaengine_terminate_all(hdev->dma_lch); return err; } return -EINPROGRESS; } static void stm32_hash_dma_callback(void *param) { struct stm32_hash_dev *hdev = param; complete(&hdev->dma_completion); } static int stm32_hash_hmac_dma_send(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(hdev->req); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); int err; if (ctx->keylen < rctx->state.blocklen || hdev->dma_mode == 1) { err = stm32_hash_write_key(hdev); if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; } else { if (!(hdev->flags & HASH_FLAGS_HMAC_KEY)) sg_init_one(&rctx->sg_key, ctx->key, ALIGN(ctx->keylen, sizeof(u32))); rctx->dma_ct = dma_map_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE); if (rctx->dma_ct == 0) { dev_err(hdev->dev, "dma_map_sg error\n"); return -ENOMEM; } err = stm32_hash_xmit_dma(hdev, &rctx->sg_key, ctx->keylen, 0); dma_unmap_sg(hdev->dev, &rctx->sg_key, 1, DMA_TO_DEVICE); } return err; } static int stm32_hash_dma_init(struct stm32_hash_dev *hdev) { struct dma_slave_config dma_conf; struct dma_chan *chan; int err; memset(&dma_conf, 0, sizeof(dma_conf)); dma_conf.direction = DMA_MEM_TO_DEV; dma_conf.dst_addr = hdev->phys_base + HASH_DIN; dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; dma_conf.src_maxburst = HASH_BURST_LEVEL; dma_conf.dst_maxburst = HASH_BURST_LEVEL; dma_conf.device_fc = false; chan = dma_request_chan(hdev->dev, "in"); if (IS_ERR(chan)) return PTR_ERR(chan); hdev->dma_lch = chan; err = dmaengine_slave_config(hdev->dma_lch, &dma_conf); if (err) { dma_release_channel(hdev->dma_lch); hdev->dma_lch = NULL; dev_err(hdev->dev, "Couldn't configure DMA slave.\n"); return err; } init_completion(&hdev->dma_completion); return 0; } static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); u32 *buffer = (void *)rctx->state.buffer; struct scatterlist sg[1], *tsg; int err = 0, reg, ncp = 0; unsigned int i, len = 0, bufcnt = 0; bool is_last = false; rctx->sg = hdev->req->src; rctx->total = hdev->req->nbytes; rctx->nents = sg_nents(rctx->sg); if (rctx->nents < 0) return -EINVAL; stm32_hash_write_ctrl(hdev); if (hdev->flags & HASH_FLAGS_HMAC) { err = stm32_hash_hmac_dma_send(hdev); if (err != -EINPROGRESS) return err; } for_each_sg(rctx->sg, tsg, rctx->nents, i) { sg[0] = *tsg; len = sg->length; if (sg_is_last(sg) || (bufcnt + sg[0].length) >= rctx->total) { sg->length = rctx->total - bufcnt; is_last = true; if (hdev->dma_mode == 1) { len = (ALIGN(sg->length, 16) - 16); ncp = sg_pcopy_to_buffer( rctx->sg, rctx->nents, rctx->state.buffer, sg->length - len, rctx->total - sg->length + len); sg->length = len; } else { if (!(IS_ALIGNED(sg->length, sizeof(u32)))) { len = sg->length; sg->length = ALIGN(sg->length, sizeof(u32)); } } } rctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_TO_DEVICE); if (rctx->dma_ct == 0) { dev_err(hdev->dev, "dma_map_sg error\n"); return -ENOMEM; } err = stm32_hash_xmit_dma(hdev, sg, len, !is_last); bufcnt += sg[0].length; dma_unmap_sg(hdev->dev, sg, 1, DMA_TO_DEVICE); if (err == -ENOMEM) return err; if (is_last) break; } if (hdev->dma_mode == 1) { if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; reg = stm32_hash_read(hdev, HASH_CR); reg &= ~HASH_CR_DMAE; reg |= HASH_CR_DMAA; stm32_hash_write(hdev, HASH_CR, reg); if (ncp) { memset(buffer + ncp, 0, DIV_ROUND_UP(ncp, sizeof(u32)) - ncp); writesl(hdev->io_base + HASH_DIN, buffer, DIV_ROUND_UP(ncp, sizeof(u32))); } stm32_hash_set_nblw(hdev, ncp); reg = stm32_hash_read(hdev, HASH_STR); reg |= HASH_STR_DCAL; stm32_hash_write(hdev, HASH_STR, reg); err = -EINPROGRESS; } if (hdev->flags & HASH_FLAGS_HMAC) { if (stm32_hash_wait_busy(hdev)) return -ETIMEDOUT; err = stm32_hash_hmac_dma_send(hdev); } return err; } static struct stm32_hash_dev *stm32_hash_find_dev(struct stm32_hash_ctx *ctx) { struct stm32_hash_dev *hdev = NULL, *tmp; spin_lock_bh(&stm32_hash.lock); if (!ctx->hdev) { list_for_each_entry(tmp, &stm32_hash.dev_list, list) { hdev = tmp; break; } ctx->hdev = hdev; } else { hdev = ctx->hdev; } spin_unlock_bh(&stm32_hash.lock); return hdev; } static bool stm32_hash_dma_aligned_data(struct ahash_request *req) { struct scatterlist *sg; struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); int i; if (!hdev->dma_lch || req->nbytes <= rctx->state.blocklen) return false; if (sg_nents(req->src) > 1) { if (hdev->dma_mode == 1) return false; for_each_sg(req->src, sg, sg_nents(req->src), i) { if ((!IS_ALIGNED(sg->length, sizeof(u32))) && (!sg_is_last(sg))) return false; } } if (req->src->offset % 4) return false; return true; } static int stm32_hash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); struct stm32_hash_state *state = &rctx->state; bool sha3_mode = ctx->flags & HASH_FLAGS_SHA3_MODE; rctx->hdev = hdev; state->flags = HASH_FLAGS_CPU; if (sha3_mode) state->flags |= HASH_FLAGS_SHA3_MODE; rctx->digcnt = crypto_ahash_digestsize(tfm); switch (rctx->digcnt) { case MD5_DIGEST_SIZE: state->flags |= HASH_MD5 << HASH_FLAGS_ALGO_SHIFT; break; case SHA1_DIGEST_SIZE: if (hdev->pdata->ux500) state->flags |= HASH_SHA1_UX500 << HASH_FLAGS_ALGO_SHIFT; else state->flags |= HASH_SHA1 << HASH_FLAGS_ALGO_SHIFT; break; case SHA224_DIGEST_SIZE: if (sha3_mode) state->flags |= HASH_SHA3_224 << HASH_FLAGS_ALGO_SHIFT; else state->flags |= HASH_SHA224 << HASH_FLAGS_ALGO_SHIFT; break; case SHA256_DIGEST_SIZE: if (sha3_mode) { state->flags |= HASH_SHA3_256 << HASH_FLAGS_ALGO_SHIFT; } else { if (hdev->pdata->ux500) state->flags |= HASH_SHA256_UX500 << HASH_FLAGS_ALGO_SHIFT; else state->flags |= HASH_SHA256 << HASH_FLAGS_ALGO_SHIFT; } break; case SHA384_DIGEST_SIZE: if (sha3_mode) state->flags |= HASH_SHA3_384 << HASH_FLAGS_ALGO_SHIFT; else state->flags |= HASH_SHA384 << HASH_FLAGS_ALGO_SHIFT; break; case SHA512_DIGEST_SIZE: if (sha3_mode) state->flags |= HASH_SHA3_512 << HASH_FLAGS_ALGO_SHIFT; else state->flags |= HASH_SHA512 << HASH_FLAGS_ALGO_SHIFT; break; default: return -EINVAL; } rctx->state.bufcnt = 0; rctx->state.blocklen = crypto_ahash_blocksize(tfm) + sizeof(u32); if (rctx->state.blocklen > HASH_BUFLEN) { dev_err(hdev->dev, "Error, block too large"); return -EINVAL; } rctx->total = 0; rctx->offset = 0; rctx->data_type = HASH_DATA_8_BITS; if (ctx->flags & HASH_FLAGS_HMAC) state->flags |= HASH_FLAGS_HMAC; dev_dbg(hdev->dev, "%s Flags %x\n", __func__, state->flags); return 0; } static int stm32_hash_update_req(struct stm32_hash_dev *hdev) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); struct stm32_hash_state *state = &rctx->state; if (!(state->flags & HASH_FLAGS_CPU)) return stm32_hash_dma_send(hdev); return stm32_hash_update_cpu(hdev); } static int stm32_hash_final_req(struct stm32_hash_dev *hdev) { struct ahash_request *req = hdev->req; struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_state *state = &rctx->state; int buflen = state->bufcnt; if (state->flags & HASH_FLAGS_FINUP) return stm32_hash_update_req(hdev); state->bufcnt = 0; return stm32_hash_xmit_cpu(hdev, state->buffer, buflen, 1); } static void stm32_hash_emptymsg_fallback(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(ahash); struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_dev *hdev = rctx->hdev; int ret; dev_dbg(hdev->dev, "use fallback message size 0 key size %d\n", ctx->keylen); if (!ctx->xtfm) { dev_err(hdev->dev, "no fallback engine\n"); return; } if (ctx->keylen) { ret = crypto_shash_setkey(ctx->xtfm, ctx->key, ctx->keylen); if (ret) { dev_err(hdev->dev, "failed to set key ret=%d\n", ret); return; } } ret = crypto_shash_tfm_digest(ctx->xtfm, NULL, 0, rctx->digest); if (ret) dev_err(hdev->dev, "shash digest error\n"); } static void stm32_hash_copy_hash(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_state *state = &rctx->state; struct stm32_hash_dev *hdev = rctx->hdev; __be32 *hash = (void *)rctx->digest; unsigned int i, hashsize; if (hdev->pdata->broken_emptymsg && (state->flags & HASH_FLAGS_EMPTY)) return stm32_hash_emptymsg_fallback(req); hashsize = crypto_ahash_digestsize(tfm); for (i = 0; i < hashsize / sizeof(u32); i++) { if (hdev->pdata->ux500) hash[i] = cpu_to_be32(stm32_hash_read(hdev, HASH_UX500_HREG(i))); else hash[i] = cpu_to_be32(stm32_hash_read(hdev, HASH_HREG(i))); } } static int stm32_hash_finish(struct ahash_request *req) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); u32 reg; reg = stm32_hash_read(rctx->hdev, HASH_SR); reg &= ~HASH_SR_OUTPUT_READY; stm32_hash_write(rctx->hdev, HASH_SR, reg); if (!req->result) return -EINVAL; memcpy(req->result, rctx->digest, rctx->digcnt); return 0; } static void stm32_hash_finish_req(struct ahash_request *req, int err) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_dev *hdev = rctx->hdev; if (!err && (HASH_FLAGS_FINAL & hdev->flags)) { stm32_hash_copy_hash(req); err = stm32_hash_finish(req); } pm_runtime_mark_last_busy(hdev->dev); pm_runtime_put_autosuspend(hdev->dev); crypto_finalize_hash_request(hdev->engine, req, err); } static int stm32_hash_handle_queue(struct stm32_hash_dev *hdev, struct ahash_request *req) { return crypto_transfer_hash_request_to_engine(hdev->engine, req); } static int stm32_hash_one_request(struct crypto_engine *engine, void *areq) { struct ahash_request *req = container_of(areq, struct ahash_request, base); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); struct stm32_hash_state *state = &rctx->state; int swap_reg; int err = 0; if (!hdev) return -ENODEV; dev_dbg(hdev->dev, "processing new req, op: %lu, nbytes %d\n", rctx->op, req->nbytes); pm_runtime_get_sync(hdev->dev); hdev->req = req; hdev->flags = 0; swap_reg = hash_swap_reg(rctx); if (state->flags & HASH_FLAGS_INIT) { u32 *preg = rctx->state.hw_context; u32 reg; int i; if (!hdev->pdata->ux500) stm32_hash_write(hdev, HASH_IMR, *preg++); stm32_hash_write(hdev, HASH_STR, *preg++); stm32_hash_write(hdev, HASH_CR, *preg); reg = *preg++ | HASH_CR_INIT; stm32_hash_write(hdev, HASH_CR, reg); for (i = 0; i < swap_reg; i++) stm32_hash_write(hdev, HASH_CSR(i), *preg++); hdev->flags |= HASH_FLAGS_INIT; if (state->flags & HASH_FLAGS_HMAC) hdev->flags |= HASH_FLAGS_HMAC | HASH_FLAGS_HMAC_KEY; } if (rctx->op == HASH_OP_UPDATE) err = stm32_hash_update_req(hdev); else if (rctx->op == HASH_OP_FINAL) err = stm32_hash_final_req(hdev); /* If we have an IRQ, wait for that, else poll for completion */ if (err == -EINPROGRESS && hdev->polled) { if (stm32_hash_wait_busy(hdev)) err = -ETIMEDOUT; else { hdev->flags |= HASH_FLAGS_OUTPUT_READY; err = 0; } } if (err != -EINPROGRESS) /* done task will not finish it, so do it here */ stm32_hash_finish_req(req, err); return 0; } static int stm32_hash_enqueue(struct ahash_request *req, unsigned int op) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_ctx *ctx = crypto_tfm_ctx(req->base.tfm); struct stm32_hash_dev *hdev = ctx->hdev; rctx->op = op; return stm32_hash_handle_queue(hdev, req); } static int stm32_hash_update(struct ahash_request *req) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_state *state = &rctx->state; if (!req->nbytes || !(state->flags & HASH_FLAGS_CPU)) return 0; rctx->total = req->nbytes; rctx->sg = req->src; rctx->offset = 0; if ((state->bufcnt + rctx->total < state->blocklen)) { stm32_hash_append_sg(rctx); return 0; } return stm32_hash_enqueue(req, HASH_OP_UPDATE); } static int stm32_hash_final(struct ahash_request *req) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_state *state = &rctx->state; state->flags |= HASH_FLAGS_FINAL; return stm32_hash_enqueue(req, HASH_OP_FINAL); } static int stm32_hash_finup(struct ahash_request *req) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); struct stm32_hash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(req)); struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); struct stm32_hash_state *state = &rctx->state; if (!req->nbytes) goto out; state->flags |= HASH_FLAGS_FINUP; rctx->total = req->nbytes; rctx->sg = req->src; rctx->offset = 0; if (hdev->dma_lch && stm32_hash_dma_aligned_data(req)) state->flags &= ~HASH_FLAGS_CPU; out: return stm32_hash_final(req); } static int stm32_hash_digest(struct ahash_request *req) { return stm32_hash_init(req) ?: stm32_hash_finup(req); } static int stm32_hash_export(struct ahash_request *req, void *out) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); memcpy(out, &rctx->state, sizeof(rctx->state)); return 0; } static int stm32_hash_import(struct ahash_request *req, const void *in) { struct stm32_hash_request_ctx *rctx = ahash_request_ctx(req); stm32_hash_init(req); memcpy(&rctx->state, in, sizeof(rctx->state)); return 0; } static int stm32_hash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct stm32_hash_ctx *ctx = crypto_ahash_ctx(tfm); if (keylen <= HASH_MAX_KEY_SIZE) { memcpy(ctx->key, key, keylen); ctx->keylen = keylen; } else { return -ENOMEM; } return 0; } static int stm32_hash_init_fallback(struct crypto_tfm *tfm) { struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); struct stm32_hash_dev *hdev = stm32_hash_find_dev(ctx); const char *name = crypto_tfm_alg_name(tfm); struct crypto_shash *xtfm; /* The fallback is only needed on Ux500 */ if (!hdev->pdata->ux500) return 0; xtfm = crypto_alloc_shash(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(xtfm)) { dev_err(hdev->dev, "failed to allocate %s fallback\n", name); return PTR_ERR(xtfm); } dev_info(hdev->dev, "allocated %s fallback\n", name); ctx->xtfm = xtfm; return 0; } static int stm32_hash_cra_init_algs(struct crypto_tfm *tfm, u32 algs_flags) { struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct stm32_hash_request_ctx)); ctx->keylen = 0; if (algs_flags) ctx->flags |= algs_flags; return stm32_hash_init_fallback(tfm); } static int stm32_hash_cra_init(struct crypto_tfm *tfm) { return stm32_hash_cra_init_algs(tfm, 0); } static int stm32_hash_cra_hmac_init(struct crypto_tfm *tfm) { return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_HMAC); } static int stm32_hash_cra_sha3_init(struct crypto_tfm *tfm) { return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE); } static int stm32_hash_cra_sha3_hmac_init(struct crypto_tfm *tfm) { return stm32_hash_cra_init_algs(tfm, HASH_FLAGS_SHA3_MODE | HASH_FLAGS_HMAC); } static void stm32_hash_cra_exit(struct crypto_tfm *tfm) { struct stm32_hash_ctx *ctx = crypto_tfm_ctx(tfm); if (ctx->xtfm) crypto_free_shash(ctx->xtfm); } static irqreturn_t stm32_hash_irq_thread(int irq, void *dev_id) { struct stm32_hash_dev *hdev = dev_id; if (HASH_FLAGS_CPU & hdev->flags) { if (HASH_FLAGS_OUTPUT_READY & hdev->flags) { hdev->flags &= ~HASH_FLAGS_OUTPUT_READY; goto finish; } } else if (HASH_FLAGS_DMA_ACTIVE & hdev->flags) { hdev->flags &= ~HASH_FLAGS_DMA_ACTIVE; goto finish; } return IRQ_HANDLED; finish: /* Finish current request */ stm32_hash_finish_req(hdev->req, 0); return IRQ_HANDLED; } static irqreturn_t stm32_hash_irq_handler(int irq, void *dev_id) { struct stm32_hash_dev *hdev = dev_id; u32 reg; reg = stm32_hash_read(hdev, HASH_SR); if (reg & HASH_SR_OUTPUT_READY) { hdev->flags |= HASH_FLAGS_OUTPUT_READY; /* Disable IT*/ stm32_hash_write(hdev, HASH_IMR, 0); return IRQ_WAKE_THREAD; } return IRQ_NONE; } static struct ahash_engine_alg algs_md5[] = { { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "md5", .cra_driver_name = "stm32-md5", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.setkey = stm32_hash_setkey, .base.halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(md5)", .cra_driver_name = "stm32-hmac-md5", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, } }; static struct ahash_engine_alg algs_sha1[] = { { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "sha1", .cra_driver_name = "stm32-sha1", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.setkey = stm32_hash_setkey, .base.halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(sha1)", .cra_driver_name = "stm32-hmac-sha1", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, }; static struct ahash_engine_alg algs_sha224[] = { { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "sha224", .cra_driver_name = "stm32-sha224", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.setkey = stm32_hash_setkey, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(sha224)", .cra_driver_name = "stm32-hmac-sha224", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, }; static struct ahash_engine_alg algs_sha256[] = { { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "sha256", .cra_driver_name = "stm32-sha256", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.setkey = stm32_hash_setkey, .base.halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(sha256)", .cra_driver_name = "stm32-hmac-sha256", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, }; static struct ahash_engine_alg algs_sha384_sha512[] = { { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "sha384", .cra_driver_name = "stm32-sha384", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.setkey = stm32_hash_setkey, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(sha384)", .cra_driver_name = "stm32-hmac-sha384", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "sha512", .cra_driver_name = "stm32-sha512", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.setkey = stm32_hash_setkey, .base.halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(sha512)", .cra_driver_name = "stm32-hmac-sha512", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, }; static struct ahash_engine_alg algs_sha3[] = { { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.halg = { .digestsize = SHA3_224_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "sha3-224", .cra_driver_name = "stm32-sha3-224", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA3_224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_sha3_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.setkey = stm32_hash_setkey, .base.halg = { .digestsize = SHA3_224_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(sha3-224)", .cra_driver_name = "stm32-hmac-sha3-224", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA3_224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_sha3_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.halg = { .digestsize = SHA3_256_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "sha3-256", .cra_driver_name = "stm32-sha3-256", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA3_256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_sha3_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.setkey = stm32_hash_setkey, .base.halg = { .digestsize = SHA3_256_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(sha3-256)", .cra_driver_name = "stm32-hmac-sha3-256", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA3_256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_sha3_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.halg = { .digestsize = SHA3_384_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "sha3-384", .cra_driver_name = "stm32-sha3-384", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA3_384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_sha3_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.setkey = stm32_hash_setkey, .base.halg = { .digestsize = SHA3_384_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(sha3-384)", .cra_driver_name = "stm32-hmac-sha3-384", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA3_384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_sha3_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.halg = { .digestsize = SHA3_512_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "sha3-512", .cra_driver_name = "stm32-sha3-512", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA3_512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_sha3_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, }, { .base.init = stm32_hash_init, .base.update = stm32_hash_update, .base.final = stm32_hash_final, .base.finup = stm32_hash_finup, .base.digest = stm32_hash_digest, .base.export = stm32_hash_export, .base.import = stm32_hash_import, .base.setkey = stm32_hash_setkey, .base.halg = { .digestsize = SHA3_512_DIGEST_SIZE, .statesize = sizeof(struct stm32_hash_state), .base = { .cra_name = "hmac(sha3-512)", .cra_driver_name = "stm32-hmac-sha3-512", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA3_512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct stm32_hash_ctx), .cra_alignmask = 3, .cra_init = stm32_hash_cra_sha3_hmac_init, .cra_exit = stm32_hash_cra_exit, .cra_module = THIS_MODULE, } }, .op = { .do_one_request = stm32_hash_one_request, }, } }; static int stm32_hash_register_algs(struct stm32_hash_dev *hdev) { unsigned int i, j; int err; for (i = 0; i < hdev->pdata->algs_info_size; i++) { for (j = 0; j < hdev->pdata->algs_info[i].size; j++) { err = crypto_engine_register_ahash( &hdev->pdata->algs_info[i].algs_list[j]); if (err) goto err_algs; } } return 0; err_algs: dev_err(hdev->dev, "Algo %d : %d failed\n", i, j); for (; i--; ) { for (; j--;) crypto_engine_unregister_ahash( &hdev->pdata->algs_info[i].algs_list[j]); } return err; } static int stm32_hash_unregister_algs(struct stm32_hash_dev *hdev) { unsigned int i, j; for (i = 0; i < hdev->pdata->algs_info_size; i++) { for (j = 0; j < hdev->pdata->algs_info[i].size; j++) crypto_engine_unregister_ahash( &hdev->pdata->algs_info[i].algs_list[j]); } return 0; } static struct stm32_hash_algs_info stm32_hash_algs_info_ux500[] = { { .algs_list = algs_sha1, .size = ARRAY_SIZE(algs_sha1), }, { .algs_list = algs_sha256, .size = ARRAY_SIZE(algs_sha256), }, }; static const struct stm32_hash_pdata stm32_hash_pdata_ux500 = { .alg_shift = 7, .algs_info = stm32_hash_algs_info_ux500, .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_ux500), .broken_emptymsg = true, .ux500 = true, }; static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f4[] = { { .algs_list = algs_md5, .size = ARRAY_SIZE(algs_md5), }, { .algs_list = algs_sha1, .size = ARRAY_SIZE(algs_sha1), }, }; static const struct stm32_hash_pdata stm32_hash_pdata_stm32f4 = { .alg_shift = 7, .algs_info = stm32_hash_algs_info_stm32f4, .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f4), .has_sr = true, .has_mdmat = true, }; static struct stm32_hash_algs_info stm32_hash_algs_info_stm32f7[] = { { .algs_list = algs_md5, .size = ARRAY_SIZE(algs_md5), }, { .algs_list = algs_sha1, .size = ARRAY_SIZE(algs_sha1), }, { .algs_list = algs_sha224, .size = ARRAY_SIZE(algs_sha224), }, { .algs_list = algs_sha256, .size = ARRAY_SIZE(algs_sha256), }, }; static const struct stm32_hash_pdata stm32_hash_pdata_stm32f7 = { .alg_shift = 7, .algs_info = stm32_hash_algs_info_stm32f7, .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32f7), .has_sr = true, .has_mdmat = true, }; static struct stm32_hash_algs_info stm32_hash_algs_info_stm32mp13[] = { { .algs_list = algs_sha1, .size = ARRAY_SIZE(algs_sha1), }, { .algs_list = algs_sha224, .size = ARRAY_SIZE(algs_sha224), }, { .algs_list = algs_sha256, .size = ARRAY_SIZE(algs_sha256), }, { .algs_list = algs_sha384_sha512, .size = ARRAY_SIZE(algs_sha384_sha512), }, { .algs_list = algs_sha3, .size = ARRAY_SIZE(algs_sha3), }, }; static const struct stm32_hash_pdata stm32_hash_pdata_stm32mp13 = { .alg_shift = 17, .algs_info = stm32_hash_algs_info_stm32mp13, .algs_info_size = ARRAY_SIZE(stm32_hash_algs_info_stm32mp13), .has_sr = true, .has_mdmat = true, }; static const struct of_device_id stm32_hash_of_match[] = { { .compatible = "stericsson,ux500-hash", .data = &stm32_hash_pdata_ux500 }, { .compatible = "st,stm32f456-hash", .data = &stm32_hash_pdata_stm32f4 }, { .compatible = "st,stm32f756-hash", .data = &stm32_hash_pdata_stm32f7 }, { .compatible = "st,stm32mp13-hash", .data = &stm32_hash_pdata_stm32mp13 }, {}, }; MODULE_DEVICE_TABLE(of, stm32_hash_of_match); static int stm32_hash_get_of_match(struct stm32_hash_dev *hdev, struct device *dev) { hdev->pdata = of_device_get_match_data(dev); if (!hdev->pdata) { dev_err(dev, "no compatible OF match\n"); return -EINVAL; } return 0; } static int stm32_hash_probe(struct platform_device *pdev) { struct stm32_hash_dev *hdev; struct device *dev = &pdev->dev; struct resource *res; int ret, irq; hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL); if (!hdev) return -ENOMEM; hdev->io_base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(hdev->io_base)) return PTR_ERR(hdev->io_base); hdev->phys_base = res->start; ret = stm32_hash_get_of_match(hdev, dev); if (ret) return ret; irq = platform_get_irq_optional(pdev, 0); if (irq < 0 && irq != -ENXIO) return irq; if (irq > 0) { ret = devm_request_threaded_irq(dev, irq, stm32_hash_irq_handler, stm32_hash_irq_thread, IRQF_ONESHOT, dev_name(dev), hdev); if (ret) { dev_err(dev, "Cannot grab IRQ\n"); return ret; } } else { dev_info(dev, "No IRQ, use polling mode\n"); hdev->polled = true; } hdev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(hdev->clk)) return dev_err_probe(dev, PTR_ERR(hdev->clk), "failed to get clock for hash\n"); ret = clk_prepare_enable(hdev->clk); if (ret) { dev_err(dev, "failed to enable hash clock (%d)\n", ret); return ret; } pm_runtime_set_autosuspend_delay(dev, HASH_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(dev); pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); hdev->rst = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(hdev->rst)) { if (PTR_ERR(hdev->rst) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto err_reset; } } else { reset_control_assert(hdev->rst); udelay(2); reset_control_deassert(hdev->rst); } hdev->dev = dev; platform_set_drvdata(pdev, hdev); ret = stm32_hash_dma_init(hdev); switch (ret) { case 0: break; case -ENOENT: case -ENODEV: dev_info(dev, "DMA mode not available\n"); break; default: dev_err(dev, "DMA init error %d\n", ret); goto err_dma; } spin_lock(&stm32_hash.lock); list_add_tail(&hdev->list, &stm32_hash.dev_list); spin_unlock(&stm32_hash.lock); /* Initialize crypto engine */ hdev->engine = crypto_engine_alloc_init(dev, 1); if (!hdev->engine) { ret = -ENOMEM; goto err_engine; } ret = crypto_engine_start(hdev->engine); if (ret) goto err_engine_start; if (hdev->pdata->ux500) /* FIXME: implement DMA mode for Ux500 */ hdev->dma_mode = 0; else hdev->dma_mode = stm32_hash_read(hdev, HASH_HWCFGR) & HASH_HWCFG_DMA_MASK; /* Register algos */ ret = stm32_hash_register_algs(hdev); if (ret) goto err_algs; dev_info(dev, "Init HASH done HW ver %x DMA mode %u\n", stm32_hash_read(hdev, HASH_VER), hdev->dma_mode); pm_runtime_put_sync(dev); return 0; err_algs: err_engine_start: crypto_engine_exit(hdev->engine); err_engine: spin_lock(&stm32_hash.lock); list_del(&hdev->list); spin_unlock(&stm32_hash.lock); err_dma: if (hdev->dma_lch) dma_release_channel(hdev->dma_lch); err_reset: pm_runtime_disable(dev); pm_runtime_put_noidle(dev); clk_disable_unprepare(hdev->clk); return ret; } static void stm32_hash_remove(struct platform_device *pdev) { struct stm32_hash_dev *hdev = platform_get_drvdata(pdev); int ret; ret = pm_runtime_get_sync(hdev->dev); stm32_hash_unregister_algs(hdev); crypto_engine_exit(hdev->engine); spin_lock(&stm32_hash.lock); list_del(&hdev->list); spin_unlock(&stm32_hash.lock); if (hdev->dma_lch) dma_release_channel(hdev->dma_lch); pm_runtime_disable(hdev->dev); pm_runtime_put_noidle(hdev->dev); if (ret >= 0) clk_disable_unprepare(hdev->clk); } #ifdef CONFIG_PM static int stm32_hash_runtime_suspend(struct device *dev) { struct stm32_hash_dev *hdev = dev_get_drvdata(dev); clk_disable_unprepare(hdev->clk); return 0; } static int stm32_hash_runtime_resume(struct device *dev) { struct stm32_hash_dev *hdev = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(hdev->clk); if (ret) { dev_err(hdev->dev, "Failed to prepare_enable clock\n"); return ret; } return 0; } #endif static const struct dev_pm_ops stm32_hash_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(stm32_hash_runtime_suspend, stm32_hash_runtime_resume, NULL) }; static struct platform_driver stm32_hash_driver = { .probe = stm32_hash_probe, .remove_new = stm32_hash_remove, .driver = { .name = "stm32-hash", .pm = &stm32_hash_pm_ops, .of_match_table = stm32_hash_of_match, } }; module_platform_driver(stm32_hash_driver); MODULE_DESCRIPTION("STM32 SHA1/SHA2/SHA3 & MD5 (HMAC) hw accelerator driver"); MODULE_AUTHOR("Lionel Debieve <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/crypto/stm32/stm32-hash.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) STMicroelectronics SA 2017 * Author: Fabien Dessenne <[email protected]> * Ux500 support taken from snippets in the old Ux500 cryp driver */ #include <crypto/aes.h> #include <crypto/engine.h> #include <crypto/internal/aead.h> #include <crypto/internal/des.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/iopoll.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include <linux/string.h> #define DRIVER_NAME "stm32-cryp" /* Bit [0] encrypt / decrypt */ #define FLG_ENCRYPT BIT(0) /* Bit [8..1] algo & operation mode */ #define FLG_AES BIT(1) #define FLG_DES BIT(2) #define FLG_TDES BIT(3) #define FLG_ECB BIT(4) #define FLG_CBC BIT(5) #define FLG_CTR BIT(6) #define FLG_GCM BIT(7) #define FLG_CCM BIT(8) /* Mode mask = bits [15..0] */ #define FLG_MODE_MASK GENMASK(15, 0) /* Bit [31..16] status */ /* Registers */ #define CRYP_CR 0x00000000 #define CRYP_SR 0x00000004 #define CRYP_DIN 0x00000008 #define CRYP_DOUT 0x0000000C #define CRYP_DMACR 0x00000010 #define CRYP_IMSCR 0x00000014 #define CRYP_RISR 0x00000018 #define CRYP_MISR 0x0000001C #define CRYP_K0LR 0x00000020 #define CRYP_K0RR 0x00000024 #define CRYP_K1LR 0x00000028 #define CRYP_K1RR 0x0000002C #define CRYP_K2LR 0x00000030 #define CRYP_K2RR 0x00000034 #define CRYP_K3LR 0x00000038 #define CRYP_K3RR 0x0000003C #define CRYP_IV0LR 0x00000040 #define CRYP_IV0RR 0x00000044 #define CRYP_IV1LR 0x00000048 #define CRYP_IV1RR 0x0000004C #define CRYP_CSGCMCCM0R 0x00000050 #define CRYP_CSGCM0R 0x00000070 #define UX500_CRYP_CR 0x00000000 #define UX500_CRYP_SR 0x00000004 #define UX500_CRYP_DIN 0x00000008 #define UX500_CRYP_DINSIZE 0x0000000C #define UX500_CRYP_DOUT 0x00000010 #define UX500_CRYP_DOUSIZE 0x00000014 #define UX500_CRYP_DMACR 0x00000018 #define UX500_CRYP_IMSC 0x0000001C #define UX500_CRYP_RIS 0x00000020 #define UX500_CRYP_MIS 0x00000024 #define UX500_CRYP_K1L 0x00000028 #define UX500_CRYP_K1R 0x0000002C #define UX500_CRYP_K2L 0x00000030 #define UX500_CRYP_K2R 0x00000034 #define UX500_CRYP_K3L 0x00000038 #define UX500_CRYP_K3R 0x0000003C #define UX500_CRYP_K4L 0x00000040 #define UX500_CRYP_K4R 0x00000044 #define UX500_CRYP_IV0L 0x00000048 #define UX500_CRYP_IV0R 0x0000004C #define UX500_CRYP_IV1L 0x00000050 #define UX500_CRYP_IV1R 0x00000054 /* Registers values */ #define CR_DEC_NOT_ENC 0x00000004 #define CR_TDES_ECB 0x00000000 #define CR_TDES_CBC 0x00000008 #define CR_DES_ECB 0x00000010 #define CR_DES_CBC 0x00000018 #define CR_AES_ECB 0x00000020 #define CR_AES_CBC 0x00000028 #define CR_AES_CTR 0x00000030 #define CR_AES_KP 0x00000038 /* Not on Ux500 */ #define CR_AES_XTS 0x00000038 /* Only on Ux500 */ #define CR_AES_GCM 0x00080000 #define CR_AES_CCM 0x00080008 #define CR_AES_UNKNOWN 0xFFFFFFFF #define CR_ALGO_MASK 0x00080038 #define CR_DATA32 0x00000000 #define CR_DATA16 0x00000040 #define CR_DATA8 0x00000080 #define CR_DATA1 0x000000C0 #define CR_KEY128 0x00000000 #define CR_KEY192 0x00000100 #define CR_KEY256 0x00000200 #define CR_KEYRDEN 0x00000400 /* Only on Ux500 */ #define CR_KSE 0x00000800 /* Only on Ux500 */ #define CR_FFLUSH 0x00004000 #define CR_CRYPEN 0x00008000 #define CR_PH_INIT 0x00000000 #define CR_PH_HEADER 0x00010000 #define CR_PH_PAYLOAD 0x00020000 #define CR_PH_FINAL 0x00030000 #define CR_PH_MASK 0x00030000 #define CR_NBPBL_SHIFT 20 #define SR_BUSY 0x00000010 #define SR_OFNE 0x00000004 #define IMSCR_IN BIT(0) #define IMSCR_OUT BIT(1) #define MISR_IN BIT(0) #define MISR_OUT BIT(1) /* Misc */ #define AES_BLOCK_32 (AES_BLOCK_SIZE / sizeof(u32)) #define GCM_CTR_INIT 2 #define CRYP_AUTOSUSPEND_DELAY 50 struct stm32_cryp_caps { bool aeads_support; bool linear_aes_key; bool kp_mode; bool iv_protection; bool swap_final; bool padding_wa; u32 cr; u32 sr; u32 din; u32 dout; u32 imsc; u32 mis; u32 k1l; u32 k1r; u32 k3r; u32 iv0l; u32 iv0r; u32 iv1l; u32 iv1r; }; struct stm32_cryp_ctx { struct stm32_cryp *cryp; int keylen; __be32 key[AES_KEYSIZE_256 / sizeof(u32)]; unsigned long flags; }; struct stm32_cryp_reqctx { unsigned long mode; }; struct stm32_cryp { struct list_head list; struct device *dev; void __iomem *regs; struct clk *clk; unsigned long flags; u32 irq_status; const struct stm32_cryp_caps *caps; struct stm32_cryp_ctx *ctx; struct crypto_engine *engine; struct skcipher_request *req; struct aead_request *areq; size_t authsize; size_t hw_blocksize; size_t payload_in; size_t header_in; size_t payload_out; struct scatterlist *out_sg; struct scatter_walk in_walk; struct scatter_walk out_walk; __be32 last_ctr[4]; u32 gcm_ctr; }; struct stm32_cryp_list { struct list_head dev_list; spinlock_t lock; /* protect dev_list */ }; static struct stm32_cryp_list cryp_list = { .dev_list = LIST_HEAD_INIT(cryp_list.dev_list), .lock = __SPIN_LOCK_UNLOCKED(cryp_list.lock), }; static inline bool is_aes(struct stm32_cryp *cryp) { return cryp->flags & FLG_AES; } static inline bool is_des(struct stm32_cryp *cryp) { return cryp->flags & FLG_DES; } static inline bool is_tdes(struct stm32_cryp *cryp) { return cryp->flags & FLG_TDES; } static inline bool is_ecb(struct stm32_cryp *cryp) { return cryp->flags & FLG_ECB; } static inline bool is_cbc(struct stm32_cryp *cryp) { return cryp->flags & FLG_CBC; } static inline bool is_ctr(struct stm32_cryp *cryp) { return cryp->flags & FLG_CTR; } static inline bool is_gcm(struct stm32_cryp *cryp) { return cryp->flags & FLG_GCM; } static inline bool is_ccm(struct stm32_cryp *cryp) { return cryp->flags & FLG_CCM; } static inline bool is_encrypt(struct stm32_cryp *cryp) { return cryp->flags & FLG_ENCRYPT; } static inline bool is_decrypt(struct stm32_cryp *cryp) { return !is_encrypt(cryp); } static inline u32 stm32_cryp_read(struct stm32_cryp *cryp, u32 ofst) { return readl_relaxed(cryp->regs + ofst); } static inline void stm32_cryp_write(struct stm32_cryp *cryp, u32 ofst, u32 val) { writel_relaxed(val, cryp->regs + ofst); } static inline int stm32_cryp_wait_busy(struct stm32_cryp *cryp) { u32 status; return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->sr, status, !(status & SR_BUSY), 10, 100000); } static inline void stm32_cryp_enable(struct stm32_cryp *cryp) { writel_relaxed(readl_relaxed(cryp->regs + cryp->caps->cr) | CR_CRYPEN, cryp->regs + cryp->caps->cr); } static inline int stm32_cryp_wait_enable(struct stm32_cryp *cryp) { u32 status; return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->cr, status, !(status & CR_CRYPEN), 10, 100000); } static inline int stm32_cryp_wait_output(struct stm32_cryp *cryp) { u32 status; return readl_relaxed_poll_timeout(cryp->regs + cryp->caps->sr, status, status & SR_OFNE, 10, 100000); } static inline void stm32_cryp_key_read_enable(struct stm32_cryp *cryp) { writel_relaxed(readl_relaxed(cryp->regs + cryp->caps->cr) | CR_KEYRDEN, cryp->regs + cryp->caps->cr); } static inline void stm32_cryp_key_read_disable(struct stm32_cryp *cryp) { writel_relaxed(readl_relaxed(cryp->regs + cryp->caps->cr) & ~CR_KEYRDEN, cryp->regs + cryp->caps->cr); } static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp); static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err); static struct stm32_cryp *stm32_cryp_find_dev(struct stm32_cryp_ctx *ctx) { struct stm32_cryp *tmp, *cryp = NULL; spin_lock_bh(&cryp_list.lock); if (!ctx->cryp) { list_for_each_entry(tmp, &cryp_list.dev_list, list) { cryp = tmp; break; } ctx->cryp = cryp; } else { cryp = ctx->cryp; } spin_unlock_bh(&cryp_list.lock); return cryp; } static void stm32_cryp_hw_write_iv(struct stm32_cryp *cryp, __be32 *iv) { if (!iv) return; stm32_cryp_write(cryp, cryp->caps->iv0l, be32_to_cpu(*iv++)); stm32_cryp_write(cryp, cryp->caps->iv0r, be32_to_cpu(*iv++)); if (is_aes(cryp)) { stm32_cryp_write(cryp, cryp->caps->iv1l, be32_to_cpu(*iv++)); stm32_cryp_write(cryp, cryp->caps->iv1r, be32_to_cpu(*iv++)); } } static void stm32_cryp_get_iv(struct stm32_cryp *cryp) { struct skcipher_request *req = cryp->req; __be32 *tmp = (void *)req->iv; if (!tmp) return; if (cryp->caps->iv_protection) stm32_cryp_key_read_enable(cryp); *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0l)); *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0r)); if (is_aes(cryp)) { *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1l)); *tmp++ = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1r)); } if (cryp->caps->iv_protection) stm32_cryp_key_read_disable(cryp); } /** * ux500_swap_bits_in_byte() - mirror the bits in a byte * @b: the byte to be mirrored * * The bits are swapped the following way: * Byte b include bits 0-7, nibble 1 (n1) include bits 0-3 and * nibble 2 (n2) bits 4-7. * * Nibble 1 (n1): * (The "old" (moved) bit is replaced with a zero) * 1. Move bit 6 and 7, 4 positions to the left. * 2. Move bit 3 and 5, 2 positions to the left. * 3. Move bit 1-4, 1 position to the left. * * Nibble 2 (n2): * 1. Move bit 0 and 1, 4 positions to the right. * 2. Move bit 2 and 4, 2 positions to the right. * 3. Move bit 3-6, 1 position to the right. * * Combine the two nibbles to a complete and swapped byte. */ static inline u8 ux500_swap_bits_in_byte(u8 b) { #define R_SHIFT_4_MASK 0xc0 /* Bits 6 and 7, right shift 4 */ #define R_SHIFT_2_MASK 0x28 /* (After right shift 4) Bits 3 and 5, right shift 2 */ #define R_SHIFT_1_MASK 0x1e /* (After right shift 2) Bits 1-4, right shift 1 */ #define L_SHIFT_4_MASK 0x03 /* Bits 0 and 1, left shift 4 */ #define L_SHIFT_2_MASK 0x14 /* (After left shift 4) Bits 2 and 4, left shift 2 */ #define L_SHIFT_1_MASK 0x78 /* (After left shift 1) Bits 3-6, left shift 1 */ u8 n1; u8 n2; /* Swap most significant nibble */ /* Right shift 4, bits 6 and 7 */ n1 = ((b & R_SHIFT_4_MASK) >> 4) | (b & ~(R_SHIFT_4_MASK >> 4)); /* Right shift 2, bits 3 and 5 */ n1 = ((n1 & R_SHIFT_2_MASK) >> 2) | (n1 & ~(R_SHIFT_2_MASK >> 2)); /* Right shift 1, bits 1-4 */ n1 = (n1 & R_SHIFT_1_MASK) >> 1; /* Swap least significant nibble */ /* Left shift 4, bits 0 and 1 */ n2 = ((b & L_SHIFT_4_MASK) << 4) | (b & ~(L_SHIFT_4_MASK << 4)); /* Left shift 2, bits 2 and 4 */ n2 = ((n2 & L_SHIFT_2_MASK) << 2) | (n2 & ~(L_SHIFT_2_MASK << 2)); /* Left shift 1, bits 3-6 */ n2 = (n2 & L_SHIFT_1_MASK) << 1; return n1 | n2; } /** * ux500_swizzle_key() - Shuffle around words and bits in the AES key * @in: key to swizzle * @out: swizzled key * @len: length of key, in bytes * * This "key swizzling procedure" is described in the examples in the * DB8500 design specification. There is no real description of why * the bits have been arranged like this in the hardware. */ static inline void ux500_swizzle_key(const u8 *in, u8 *out, u32 len) { int i = 0; int bpw = sizeof(u32); int j; int index = 0; j = len - bpw; while (j >= 0) { for (i = 0; i < bpw; i++) { index = len - j - bpw + i; out[j + i] = ux500_swap_bits_in_byte(in[index]); } j -= bpw; } } static void stm32_cryp_hw_write_key(struct stm32_cryp *c) { unsigned int i; int r_id; if (is_des(c)) { stm32_cryp_write(c, c->caps->k1l, be32_to_cpu(c->ctx->key[0])); stm32_cryp_write(c, c->caps->k1r, be32_to_cpu(c->ctx->key[1])); return; } /* * On the Ux500 the AES key is considered as a single bit sequence * of 128, 192 or 256 bits length. It is written linearly into the * registers from K1L and down, and need to be processed to become * a proper big-endian bit sequence. */ if (is_aes(c) && c->caps->linear_aes_key) { u32 tmpkey[8]; ux500_swizzle_key((u8 *)c->ctx->key, (u8 *)tmpkey, c->ctx->keylen); r_id = c->caps->k1l; for (i = 0; i < c->ctx->keylen / sizeof(u32); i++, r_id += 4) stm32_cryp_write(c, r_id, tmpkey[i]); return; } r_id = c->caps->k3r; for (i = c->ctx->keylen / sizeof(u32); i > 0; i--, r_id -= 4) stm32_cryp_write(c, r_id, be32_to_cpu(c->ctx->key[i - 1])); } static u32 stm32_cryp_get_hw_mode(struct stm32_cryp *cryp) { if (is_aes(cryp) && is_ecb(cryp)) return CR_AES_ECB; if (is_aes(cryp) && is_cbc(cryp)) return CR_AES_CBC; if (is_aes(cryp) && is_ctr(cryp)) return CR_AES_CTR; if (is_aes(cryp) && is_gcm(cryp)) return CR_AES_GCM; if (is_aes(cryp) && is_ccm(cryp)) return CR_AES_CCM; if (is_des(cryp) && is_ecb(cryp)) return CR_DES_ECB; if (is_des(cryp) && is_cbc(cryp)) return CR_DES_CBC; if (is_tdes(cryp) && is_ecb(cryp)) return CR_TDES_ECB; if (is_tdes(cryp) && is_cbc(cryp)) return CR_TDES_CBC; dev_err(cryp->dev, "Unknown mode\n"); return CR_AES_UNKNOWN; } static unsigned int stm32_cryp_get_input_text_len(struct stm32_cryp *cryp) { return is_encrypt(cryp) ? cryp->areq->cryptlen : cryp->areq->cryptlen - cryp->authsize; } static int stm32_cryp_gcm_init(struct stm32_cryp *cryp, u32 cfg) { int ret; __be32 iv[4]; /* Phase 1 : init */ memcpy(iv, cryp->areq->iv, 12); iv[3] = cpu_to_be32(GCM_CTR_INIT); cryp->gcm_ctr = GCM_CTR_INIT; stm32_cryp_hw_write_iv(cryp, iv); stm32_cryp_write(cryp, cryp->caps->cr, cfg | CR_PH_INIT | CR_CRYPEN); /* Wait for end of processing */ ret = stm32_cryp_wait_enable(cryp); if (ret) { dev_err(cryp->dev, "Timeout (gcm init)\n"); return ret; } /* Prepare next phase */ if (cryp->areq->assoclen) { cfg |= CR_PH_HEADER; stm32_cryp_write(cryp, cryp->caps->cr, cfg); } else if (stm32_cryp_get_input_text_len(cryp)) { cfg |= CR_PH_PAYLOAD; stm32_cryp_write(cryp, cryp->caps->cr, cfg); } return 0; } static void stm32_crypt_gcmccm_end_header(struct stm32_cryp *cryp) { u32 cfg; int err; /* Check if whole header written */ if (!cryp->header_in) { /* Wait for completion */ err = stm32_cryp_wait_busy(cryp); if (err) { dev_err(cryp->dev, "Timeout (gcm/ccm header)\n"); stm32_cryp_write(cryp, cryp->caps->imsc, 0); stm32_cryp_finish_req(cryp, err); return; } if (stm32_cryp_get_input_text_len(cryp)) { /* Phase 3 : payload */ cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_CRYPEN; stm32_cryp_write(cryp, cryp->caps->cr, cfg); cfg &= ~CR_PH_MASK; cfg |= CR_PH_PAYLOAD | CR_CRYPEN; stm32_cryp_write(cryp, cryp->caps->cr, cfg); } else { /* * Phase 4 : tag. * Nothing to read, nothing to write, caller have to * end request */ } } } static void stm32_cryp_write_ccm_first_header(struct stm32_cryp *cryp) { size_t written; size_t len; u32 alen = cryp->areq->assoclen; u32 block[AES_BLOCK_32] = {0}; u8 *b8 = (u8 *)block; if (alen <= 65280) { /* Write first u32 of B1 */ b8[0] = (alen >> 8) & 0xFF; b8[1] = alen & 0xFF; len = 2; } else { /* Build the two first u32 of B1 */ b8[0] = 0xFF; b8[1] = 0xFE; b8[2] = (alen & 0xFF000000) >> 24; b8[3] = (alen & 0x00FF0000) >> 16; b8[4] = (alen & 0x0000FF00) >> 8; b8[5] = alen & 0x000000FF; len = 6; } written = min_t(size_t, AES_BLOCK_SIZE - len, alen); scatterwalk_copychunks((char *)block + len, &cryp->in_walk, written, 0); writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); cryp->header_in -= written; stm32_crypt_gcmccm_end_header(cryp); } static int stm32_cryp_ccm_init(struct stm32_cryp *cryp, u32 cfg) { int ret; u32 iv_32[AES_BLOCK_32], b0_32[AES_BLOCK_32]; u8 *iv = (u8 *)iv_32, *b0 = (u8 *)b0_32; __be32 *bd; u32 *d; unsigned int i, textlen; /* Phase 1 : init. Firstly set the CTR value to 1 (not 0) */ memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE); memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1); iv[AES_BLOCK_SIZE - 1] = 1; stm32_cryp_hw_write_iv(cryp, (__be32 *)iv); /* Build B0 */ memcpy(b0, iv, AES_BLOCK_SIZE); b0[0] |= (8 * ((cryp->authsize - 2) / 2)); if (cryp->areq->assoclen) b0[0] |= 0x40; textlen = stm32_cryp_get_input_text_len(cryp); b0[AES_BLOCK_SIZE - 2] = textlen >> 8; b0[AES_BLOCK_SIZE - 1] = textlen & 0xFF; /* Enable HW */ stm32_cryp_write(cryp, cryp->caps->cr, cfg | CR_PH_INIT | CR_CRYPEN); /* Write B0 */ d = (u32 *)b0; bd = (__be32 *)b0; for (i = 0; i < AES_BLOCK_32; i++) { u32 xd = d[i]; if (!cryp->caps->padding_wa) xd = be32_to_cpu(bd[i]); stm32_cryp_write(cryp, cryp->caps->din, xd); } /* Wait for end of processing */ ret = stm32_cryp_wait_enable(cryp); if (ret) { dev_err(cryp->dev, "Timeout (ccm init)\n"); return ret; } /* Prepare next phase */ if (cryp->areq->assoclen) { cfg |= CR_PH_HEADER | CR_CRYPEN; stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* Write first (special) block (may move to next phase [payload]) */ stm32_cryp_write_ccm_first_header(cryp); } else if (stm32_cryp_get_input_text_len(cryp)) { cfg |= CR_PH_PAYLOAD; stm32_cryp_write(cryp, cryp->caps->cr, cfg); } return 0; } static int stm32_cryp_hw_init(struct stm32_cryp *cryp) { int ret; u32 cfg, hw_mode; pm_runtime_get_sync(cryp->dev); /* Disable interrupt */ stm32_cryp_write(cryp, cryp->caps->imsc, 0); /* Set configuration */ cfg = CR_DATA8 | CR_FFLUSH; switch (cryp->ctx->keylen) { case AES_KEYSIZE_128: cfg |= CR_KEY128; break; case AES_KEYSIZE_192: cfg |= CR_KEY192; break; default: case AES_KEYSIZE_256: cfg |= CR_KEY256; break; } hw_mode = stm32_cryp_get_hw_mode(cryp); if (hw_mode == CR_AES_UNKNOWN) return -EINVAL; /* AES ECB/CBC decrypt: run key preparation first */ if (is_decrypt(cryp) && ((hw_mode == CR_AES_ECB) || (hw_mode == CR_AES_CBC))) { /* Configure in key preparation mode */ if (cryp->caps->kp_mode) stm32_cryp_write(cryp, cryp->caps->cr, cfg | CR_AES_KP); else stm32_cryp_write(cryp, cryp->caps->cr, cfg | CR_AES_ECB | CR_KSE); /* Set key only after full configuration done */ stm32_cryp_hw_write_key(cryp); /* Start prepare key */ stm32_cryp_enable(cryp); /* Wait for end of processing */ ret = stm32_cryp_wait_busy(cryp); if (ret) { dev_err(cryp->dev, "Timeout (key preparation)\n"); return ret; } cfg |= hw_mode | CR_DEC_NOT_ENC; /* Apply updated config (Decrypt + algo) and flush */ stm32_cryp_write(cryp, cryp->caps->cr, cfg); } else { cfg |= hw_mode; if (is_decrypt(cryp)) cfg |= CR_DEC_NOT_ENC; /* Apply config and flush */ stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* Set key only after configuration done */ stm32_cryp_hw_write_key(cryp); } switch (hw_mode) { case CR_AES_GCM: case CR_AES_CCM: /* Phase 1 : init */ if (hw_mode == CR_AES_CCM) ret = stm32_cryp_ccm_init(cryp, cfg); else ret = stm32_cryp_gcm_init(cryp, cfg); if (ret) return ret; break; case CR_DES_CBC: case CR_TDES_CBC: case CR_AES_CBC: case CR_AES_CTR: stm32_cryp_hw_write_iv(cryp, (__be32 *)cryp->req->iv); break; default: break; } /* Enable now */ stm32_cryp_enable(cryp); return 0; } static void stm32_cryp_finish_req(struct stm32_cryp *cryp, int err) { if (!err && (is_gcm(cryp) || is_ccm(cryp))) /* Phase 4 : output tag */ err = stm32_cryp_read_auth_tag(cryp); if (!err && (!(is_gcm(cryp) || is_ccm(cryp) || is_ecb(cryp)))) stm32_cryp_get_iv(cryp); pm_runtime_mark_last_busy(cryp->dev); pm_runtime_put_autosuspend(cryp->dev); if (is_gcm(cryp) || is_ccm(cryp)) crypto_finalize_aead_request(cryp->engine, cryp->areq, err); else crypto_finalize_skcipher_request(cryp->engine, cryp->req, err); } static int stm32_cryp_cpu_start(struct stm32_cryp *cryp) { /* Enable interrupt and let the IRQ handler do everything */ stm32_cryp_write(cryp, cryp->caps->imsc, IMSCR_IN | IMSCR_OUT); return 0; } static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq); static int stm32_cryp_init_tfm(struct crypto_skcipher *tfm) { crypto_skcipher_set_reqsize(tfm, sizeof(struct stm32_cryp_reqctx)); return 0; } static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq); static int stm32_cryp_aes_aead_init(struct crypto_aead *tfm) { tfm->reqsize = sizeof(struct stm32_cryp_reqctx); return 0; } static int stm32_cryp_crypt(struct skcipher_request *req, unsigned long mode) { struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); struct stm32_cryp_reqctx *rctx = skcipher_request_ctx(req); struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx); if (!cryp) return -ENODEV; rctx->mode = mode; return crypto_transfer_skcipher_request_to_engine(cryp->engine, req); } static int stm32_cryp_aead_crypt(struct aead_request *req, unsigned long mode) { struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct stm32_cryp_reqctx *rctx = aead_request_ctx(req); struct stm32_cryp *cryp = stm32_cryp_find_dev(ctx); if (!cryp) return -ENODEV; rctx->mode = mode; return crypto_transfer_aead_request_to_engine(cryp->engine, req); } static int stm32_cryp_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx(tfm); memcpy(ctx->key, key, keylen); ctx->keylen = keylen; return 0; } static int stm32_cryp_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) return -EINVAL; else return stm32_cryp_setkey(tfm, key, keylen); } static int stm32_cryp_des_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return verify_skcipher_des_key(tfm, key) ?: stm32_cryp_setkey(tfm, key, keylen); } static int stm32_cryp_tdes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return verify_skcipher_des3_key(tfm, key) ?: stm32_cryp_setkey(tfm, key, keylen); } static int stm32_cryp_aes_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct stm32_cryp_ctx *ctx = crypto_aead_ctx(tfm); if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) return -EINVAL; memcpy(ctx->key, key, keylen); ctx->keylen = keylen; return 0; } static int stm32_cryp_aes_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { switch (authsize) { case 4: case 8: case 12: case 13: case 14: case 15: case 16: break; default: return -EINVAL; } return 0; } static int stm32_cryp_aes_ccm_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { switch (authsize) { case 4: case 6: case 8: case 10: case 12: case 14: case 16: break; default: return -EINVAL; } return 0; } static int stm32_cryp_aes_ecb_encrypt(struct skcipher_request *req) { if (req->cryptlen % AES_BLOCK_SIZE) return -EINVAL; if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_AES | FLG_ECB | FLG_ENCRYPT); } static int stm32_cryp_aes_ecb_decrypt(struct skcipher_request *req) { if (req->cryptlen % AES_BLOCK_SIZE) return -EINVAL; if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_AES | FLG_ECB); } static int stm32_cryp_aes_cbc_encrypt(struct skcipher_request *req) { if (req->cryptlen % AES_BLOCK_SIZE) return -EINVAL; if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_AES | FLG_CBC | FLG_ENCRYPT); } static int stm32_cryp_aes_cbc_decrypt(struct skcipher_request *req) { if (req->cryptlen % AES_BLOCK_SIZE) return -EINVAL; if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_AES | FLG_CBC); } static int stm32_cryp_aes_ctr_encrypt(struct skcipher_request *req) { if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_AES | FLG_CTR | FLG_ENCRYPT); } static int stm32_cryp_aes_ctr_decrypt(struct skcipher_request *req) { if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_AES | FLG_CTR); } static int stm32_cryp_aes_gcm_encrypt(struct aead_request *req) { return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM | FLG_ENCRYPT); } static int stm32_cryp_aes_gcm_decrypt(struct aead_request *req) { return stm32_cryp_aead_crypt(req, FLG_AES | FLG_GCM); } static inline int crypto_ccm_check_iv(const u8 *iv) { /* 2 <= L <= 8, so 1 <= L' <= 7. */ if (iv[0] < 1 || iv[0] > 7) return -EINVAL; return 0; } static int stm32_cryp_aes_ccm_encrypt(struct aead_request *req) { int err; err = crypto_ccm_check_iv(req->iv); if (err) return err; return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM | FLG_ENCRYPT); } static int stm32_cryp_aes_ccm_decrypt(struct aead_request *req) { int err; err = crypto_ccm_check_iv(req->iv); if (err) return err; return stm32_cryp_aead_crypt(req, FLG_AES | FLG_CCM); } static int stm32_cryp_des_ecb_encrypt(struct skcipher_request *req) { if (req->cryptlen % DES_BLOCK_SIZE) return -EINVAL; if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_DES | FLG_ECB | FLG_ENCRYPT); } static int stm32_cryp_des_ecb_decrypt(struct skcipher_request *req) { if (req->cryptlen % DES_BLOCK_SIZE) return -EINVAL; if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_DES | FLG_ECB); } static int stm32_cryp_des_cbc_encrypt(struct skcipher_request *req) { if (req->cryptlen % DES_BLOCK_SIZE) return -EINVAL; if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_DES | FLG_CBC | FLG_ENCRYPT); } static int stm32_cryp_des_cbc_decrypt(struct skcipher_request *req) { if (req->cryptlen % DES_BLOCK_SIZE) return -EINVAL; if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_DES | FLG_CBC); } static int stm32_cryp_tdes_ecb_encrypt(struct skcipher_request *req) { if (req->cryptlen % DES_BLOCK_SIZE) return -EINVAL; if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB | FLG_ENCRYPT); } static int stm32_cryp_tdes_ecb_decrypt(struct skcipher_request *req) { if (req->cryptlen % DES_BLOCK_SIZE) return -EINVAL; if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_TDES | FLG_ECB); } static int stm32_cryp_tdes_cbc_encrypt(struct skcipher_request *req) { if (req->cryptlen % DES_BLOCK_SIZE) return -EINVAL; if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC | FLG_ENCRYPT); } static int stm32_cryp_tdes_cbc_decrypt(struct skcipher_request *req) { if (req->cryptlen % DES_BLOCK_SIZE) return -EINVAL; if (req->cryptlen == 0) return 0; return stm32_cryp_crypt(req, FLG_TDES | FLG_CBC); } static int stm32_cryp_prepare_req(struct skcipher_request *req, struct aead_request *areq) { struct stm32_cryp_ctx *ctx; struct stm32_cryp *cryp; struct stm32_cryp_reqctx *rctx; struct scatterlist *in_sg; int ret; if (!req && !areq) return -EINVAL; ctx = req ? crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)) : crypto_aead_ctx(crypto_aead_reqtfm(areq)); cryp = ctx->cryp; rctx = req ? skcipher_request_ctx(req) : aead_request_ctx(areq); rctx->mode &= FLG_MODE_MASK; ctx->cryp = cryp; cryp->flags = (cryp->flags & ~FLG_MODE_MASK) | rctx->mode; cryp->hw_blocksize = is_aes(cryp) ? AES_BLOCK_SIZE : DES_BLOCK_SIZE; cryp->ctx = ctx; if (req) { cryp->req = req; cryp->areq = NULL; cryp->header_in = 0; cryp->payload_in = req->cryptlen; cryp->payload_out = req->cryptlen; cryp->authsize = 0; } else { /* * Length of input and output data: * Encryption case: * INPUT = AssocData || PlainText * <- assoclen -> <- cryptlen -> * * OUTPUT = AssocData || CipherText || AuthTag * <- assoclen -> <-- cryptlen --> <- authsize -> * * Decryption case: * INPUT = AssocData || CipherTex || AuthTag * <- assoclen ---> <---------- cryptlen ----------> * * OUTPUT = AssocData || PlainText * <- assoclen -> <- cryptlen - authsize -> */ cryp->areq = areq; cryp->req = NULL; cryp->authsize = crypto_aead_authsize(crypto_aead_reqtfm(areq)); if (is_encrypt(cryp)) { cryp->payload_in = areq->cryptlen; cryp->header_in = areq->assoclen; cryp->payload_out = areq->cryptlen; } else { cryp->payload_in = areq->cryptlen - cryp->authsize; cryp->header_in = areq->assoclen; cryp->payload_out = cryp->payload_in; } } in_sg = req ? req->src : areq->src; scatterwalk_start(&cryp->in_walk, in_sg); cryp->out_sg = req ? req->dst : areq->dst; scatterwalk_start(&cryp->out_walk, cryp->out_sg); if (is_gcm(cryp) || is_ccm(cryp)) { /* In output, jump after assoc data */ scatterwalk_copychunks(NULL, &cryp->out_walk, cryp->areq->assoclen, 2); } if (is_ctr(cryp)) memset(cryp->last_ctr, 0, sizeof(cryp->last_ctr)); ret = stm32_cryp_hw_init(cryp); return ret; } static int stm32_cryp_cipher_one_req(struct crypto_engine *engine, void *areq) { struct skcipher_request *req = container_of(areq, struct skcipher_request, base); struct stm32_cryp_ctx *ctx = crypto_skcipher_ctx( crypto_skcipher_reqtfm(req)); struct stm32_cryp *cryp = ctx->cryp; if (!cryp) return -ENODEV; return stm32_cryp_prepare_req(req, NULL) ?: stm32_cryp_cpu_start(cryp); } static int stm32_cryp_aead_one_req(struct crypto_engine *engine, void *areq) { struct aead_request *req = container_of(areq, struct aead_request, base); struct stm32_cryp_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct stm32_cryp *cryp = ctx->cryp; int err; if (!cryp) return -ENODEV; err = stm32_cryp_prepare_req(NULL, req); if (err) return err; if (unlikely(!cryp->payload_in && !cryp->header_in)) { /* No input data to process: get tag and finish */ stm32_cryp_finish_req(cryp, 0); return 0; } return stm32_cryp_cpu_start(cryp); } static int stm32_cryp_read_auth_tag(struct stm32_cryp *cryp) { u32 cfg, size_bit; unsigned int i; int ret = 0; /* Update Config */ cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_PH_MASK; cfg |= CR_PH_FINAL; cfg &= ~CR_DEC_NOT_ENC; cfg |= CR_CRYPEN; stm32_cryp_write(cryp, cryp->caps->cr, cfg); if (is_gcm(cryp)) { /* GCM: write aad and payload size (in bits) */ size_bit = cryp->areq->assoclen * 8; if (cryp->caps->swap_final) size_bit = (__force u32)cpu_to_be32(size_bit); stm32_cryp_write(cryp, cryp->caps->din, 0); stm32_cryp_write(cryp, cryp->caps->din, size_bit); size_bit = is_encrypt(cryp) ? cryp->areq->cryptlen : cryp->areq->cryptlen - cryp->authsize; size_bit *= 8; if (cryp->caps->swap_final) size_bit = (__force u32)cpu_to_be32(size_bit); stm32_cryp_write(cryp, cryp->caps->din, 0); stm32_cryp_write(cryp, cryp->caps->din, size_bit); } else { /* CCM: write CTR0 */ u32 iv32[AES_BLOCK_32]; u8 *iv = (u8 *)iv32; __be32 *biv = (__be32 *)iv32; memcpy(iv, cryp->areq->iv, AES_BLOCK_SIZE); memset(iv + AES_BLOCK_SIZE - 1 - iv[0], 0, iv[0] + 1); for (i = 0; i < AES_BLOCK_32; i++) { u32 xiv = iv32[i]; if (!cryp->caps->padding_wa) xiv = be32_to_cpu(biv[i]); stm32_cryp_write(cryp, cryp->caps->din, xiv); } } /* Wait for output data */ ret = stm32_cryp_wait_output(cryp); if (ret) { dev_err(cryp->dev, "Timeout (read tag)\n"); return ret; } if (is_encrypt(cryp)) { u32 out_tag[AES_BLOCK_32]; /* Get and write tag */ readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32); scatterwalk_copychunks(out_tag, &cryp->out_walk, cryp->authsize, 1); } else { /* Get and check tag */ u32 in_tag[AES_BLOCK_32], out_tag[AES_BLOCK_32]; scatterwalk_copychunks(in_tag, &cryp->in_walk, cryp->authsize, 0); readsl(cryp->regs + cryp->caps->dout, out_tag, AES_BLOCK_32); if (crypto_memneq(in_tag, out_tag, cryp->authsize)) ret = -EBADMSG; } /* Disable cryp */ cfg &= ~CR_CRYPEN; stm32_cryp_write(cryp, cryp->caps->cr, cfg); return ret; } static void stm32_cryp_check_ctr_counter(struct stm32_cryp *cryp) { u32 cr; if (unlikely(cryp->last_ctr[3] == cpu_to_be32(0xFFFFFFFF))) { /* * In this case, we need to increment manually the ctr counter, * as HW doesn't handle the U32 carry. */ crypto_inc((u8 *)cryp->last_ctr, sizeof(cryp->last_ctr)); cr = stm32_cryp_read(cryp, cryp->caps->cr); stm32_cryp_write(cryp, cryp->caps->cr, cr & ~CR_CRYPEN); stm32_cryp_hw_write_iv(cryp, cryp->last_ctr); stm32_cryp_write(cryp, cryp->caps->cr, cr); } /* The IV registers are BE */ cryp->last_ctr[0] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0l)); cryp->last_ctr[1] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv0r)); cryp->last_ctr[2] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1l)); cryp->last_ctr[3] = cpu_to_be32(stm32_cryp_read(cryp, cryp->caps->iv1r)); } static void stm32_cryp_irq_read_data(struct stm32_cryp *cryp) { u32 block[AES_BLOCK_32]; readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, cryp->payload_out), 1); cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); } static void stm32_cryp_irq_write_block(struct stm32_cryp *cryp) { u32 block[AES_BLOCK_32] = {0}; scatterwalk_copychunks(block, &cryp->in_walk, min_t(size_t, cryp->hw_blocksize, cryp->payload_in), 0); writesl(cryp->regs + cryp->caps->din, block, cryp->hw_blocksize / sizeof(u32)); cryp->payload_in -= min_t(size_t, cryp->hw_blocksize, cryp->payload_in); } static void stm32_cryp_irq_write_gcm_padded_data(struct stm32_cryp *cryp) { int err; u32 cfg, block[AES_BLOCK_32] = {0}; unsigned int i; /* 'Special workaround' procedure described in the datasheet */ /* a) disable ip */ stm32_cryp_write(cryp, cryp->caps->imsc, 0); cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_CRYPEN; stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* b) Update IV1R */ stm32_cryp_write(cryp, cryp->caps->iv1r, cryp->gcm_ctr - 2); /* c) change mode to CTR */ cfg &= ~CR_ALGO_MASK; cfg |= CR_AES_CTR; stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* a) enable IP */ cfg |= CR_CRYPEN; stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* b) pad and write the last block */ stm32_cryp_irq_write_block(cryp); /* wait end of process */ err = stm32_cryp_wait_output(cryp); if (err) { dev_err(cryp->dev, "Timeout (write gcm last data)\n"); return stm32_cryp_finish_req(cryp, err); } /* c) get and store encrypted data */ /* * Same code as stm32_cryp_irq_read_data(), but we want to store * block value */ readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, cryp->payload_out), 1); cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); /* d) change mode back to AES GCM */ cfg &= ~CR_ALGO_MASK; cfg |= CR_AES_GCM; stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* e) change phase to Final */ cfg &= ~CR_PH_MASK; cfg |= CR_PH_FINAL; stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* f) write padded data */ writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); /* g) Empty fifo out */ err = stm32_cryp_wait_output(cryp); if (err) { dev_err(cryp->dev, "Timeout (write gcm padded data)\n"); return stm32_cryp_finish_req(cryp, err); } for (i = 0; i < AES_BLOCK_32; i++) stm32_cryp_read(cryp, cryp->caps->dout); /* h) run the he normal Final phase */ stm32_cryp_finish_req(cryp, 0); } static void stm32_cryp_irq_set_npblb(struct stm32_cryp *cryp) { u32 cfg; /* disable ip, set NPBLB and reneable ip */ cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_CRYPEN; stm32_cryp_write(cryp, cryp->caps->cr, cfg); cfg |= (cryp->hw_blocksize - cryp->payload_in) << CR_NBPBL_SHIFT; cfg |= CR_CRYPEN; stm32_cryp_write(cryp, cryp->caps->cr, cfg); } static void stm32_cryp_irq_write_ccm_padded_data(struct stm32_cryp *cryp) { int err = 0; u32 cfg, iv1tmp; u32 cstmp1[AES_BLOCK_32], cstmp2[AES_BLOCK_32]; u32 block[AES_BLOCK_32] = {0}; unsigned int i; /* 'Special workaround' procedure described in the datasheet */ /* a) disable ip */ stm32_cryp_write(cryp, cryp->caps->imsc, 0); cfg = stm32_cryp_read(cryp, cryp->caps->cr); cfg &= ~CR_CRYPEN; stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* b) get IV1 from CRYP_CSGCMCCM7 */ iv1tmp = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + 7 * 4); /* c) Load CRYP_CSGCMCCMxR */ for (i = 0; i < ARRAY_SIZE(cstmp1); i++) cstmp1[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4); /* d) Write IV1R */ stm32_cryp_write(cryp, cryp->caps->iv1r, iv1tmp); /* e) change mode to CTR */ cfg &= ~CR_ALGO_MASK; cfg |= CR_AES_CTR; stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* a) enable IP */ cfg |= CR_CRYPEN; stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* b) pad and write the last block */ stm32_cryp_irq_write_block(cryp); /* wait end of process */ err = stm32_cryp_wait_output(cryp); if (err) { dev_err(cryp->dev, "Timeout (write ccm padded data)\n"); return stm32_cryp_finish_req(cryp, err); } /* c) get and store decrypted data */ /* * Same code as stm32_cryp_irq_read_data(), but we want to store * block value */ readsl(cryp->regs + cryp->caps->dout, block, cryp->hw_blocksize / sizeof(u32)); scatterwalk_copychunks(block, &cryp->out_walk, min_t(size_t, cryp->hw_blocksize, cryp->payload_out), 1); cryp->payload_out -= min_t(size_t, cryp->hw_blocksize, cryp->payload_out); /* d) Load again CRYP_CSGCMCCMxR */ for (i = 0; i < ARRAY_SIZE(cstmp2); i++) cstmp2[i] = stm32_cryp_read(cryp, CRYP_CSGCMCCM0R + i * 4); /* e) change mode back to AES CCM */ cfg &= ~CR_ALGO_MASK; cfg |= CR_AES_CCM; stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* f) change phase to header */ cfg &= ~CR_PH_MASK; cfg |= CR_PH_HEADER; stm32_cryp_write(cryp, cryp->caps->cr, cfg); /* g) XOR and write padded data */ for (i = 0; i < ARRAY_SIZE(block); i++) { block[i] ^= cstmp1[i]; block[i] ^= cstmp2[i]; stm32_cryp_write(cryp, cryp->caps->din, block[i]); } /* h) wait for completion */ err = stm32_cryp_wait_busy(cryp); if (err) dev_err(cryp->dev, "Timeout (write ccm padded data)\n"); /* i) run the he normal Final phase */ stm32_cryp_finish_req(cryp, err); } static void stm32_cryp_irq_write_data(struct stm32_cryp *cryp) { if (unlikely(!cryp->payload_in)) { dev_warn(cryp->dev, "No more data to process\n"); return; } if (unlikely(cryp->payload_in < AES_BLOCK_SIZE && (stm32_cryp_get_hw_mode(cryp) == CR_AES_GCM) && is_encrypt(cryp))) { /* Padding for AES GCM encryption */ if (cryp->caps->padding_wa) { /* Special case 1 */ stm32_cryp_irq_write_gcm_padded_data(cryp); return; } /* Setting padding bytes (NBBLB) */ stm32_cryp_irq_set_npblb(cryp); } if (unlikely((cryp->payload_in < AES_BLOCK_SIZE) && (stm32_cryp_get_hw_mode(cryp) == CR_AES_CCM) && is_decrypt(cryp))) { /* Padding for AES CCM decryption */ if (cryp->caps->padding_wa) { /* Special case 2 */ stm32_cryp_irq_write_ccm_padded_data(cryp); return; } /* Setting padding bytes (NBBLB) */ stm32_cryp_irq_set_npblb(cryp); } if (is_aes(cryp) && is_ctr(cryp)) stm32_cryp_check_ctr_counter(cryp); stm32_cryp_irq_write_block(cryp); } static void stm32_cryp_irq_write_gcmccm_header(struct stm32_cryp *cryp) { u32 block[AES_BLOCK_32] = {0}; size_t written; written = min_t(size_t, AES_BLOCK_SIZE, cryp->header_in); scatterwalk_copychunks(block, &cryp->in_walk, written, 0); writesl(cryp->regs + cryp->caps->din, block, AES_BLOCK_32); cryp->header_in -= written; stm32_crypt_gcmccm_end_header(cryp); } static irqreturn_t stm32_cryp_irq_thread(int irq, void *arg) { struct stm32_cryp *cryp = arg; u32 ph; u32 it_mask = stm32_cryp_read(cryp, cryp->caps->imsc); if (cryp->irq_status & MISR_OUT) /* Output FIFO IRQ: read data */ stm32_cryp_irq_read_data(cryp); if (cryp->irq_status & MISR_IN) { if (is_gcm(cryp) || is_ccm(cryp)) { ph = stm32_cryp_read(cryp, cryp->caps->cr) & CR_PH_MASK; if (unlikely(ph == CR_PH_HEADER)) /* Write Header */ stm32_cryp_irq_write_gcmccm_header(cryp); else /* Input FIFO IRQ: write data */ stm32_cryp_irq_write_data(cryp); if (is_gcm(cryp)) cryp->gcm_ctr++; } else { /* Input FIFO IRQ: write data */ stm32_cryp_irq_write_data(cryp); } } /* Mask useless interrupts */ if (!cryp->payload_in && !cryp->header_in) it_mask &= ~IMSCR_IN; if (!cryp->payload_out) it_mask &= ~IMSCR_OUT; stm32_cryp_write(cryp, cryp->caps->imsc, it_mask); if (!cryp->payload_in && !cryp->header_in && !cryp->payload_out) stm32_cryp_finish_req(cryp, 0); return IRQ_HANDLED; } static irqreturn_t stm32_cryp_irq(int irq, void *arg) { struct stm32_cryp *cryp = arg; cryp->irq_status = stm32_cryp_read(cryp, cryp->caps->mis); return IRQ_WAKE_THREAD; } static struct skcipher_engine_alg crypto_algs[] = { { .base = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "stm32-ecb-aes", .base.cra_priority = 200, .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = stm32_cryp_aes_setkey, .encrypt = stm32_cryp_aes_ecb_encrypt, .decrypt = stm32_cryp_aes_ecb_decrypt, }, .op = { .do_one_request = stm32_cryp_cipher_one_req, }, }, { .base = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "stm32-cbc-aes", .base.cra_priority = 200, .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = stm32_cryp_aes_setkey, .encrypt = stm32_cryp_aes_cbc_encrypt, .decrypt = stm32_cryp_aes_cbc_decrypt, }, .op = { .do_one_request = stm32_cryp_cipher_one_req, }, }, { .base = { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "stm32-ctr-aes", .base.cra_priority = 200, .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = stm32_cryp_aes_setkey, .encrypt = stm32_cryp_aes_ctr_encrypt, .decrypt = stm32_cryp_aes_ctr_decrypt, }, .op = { .do_one_request = stm32_cryp_cipher_one_req, }, }, { .base = { .base.cra_name = "ecb(des)", .base.cra_driver_name = "stm32-ecb-des", .base.cra_priority = 200, .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, .min_keysize = DES_BLOCK_SIZE, .max_keysize = DES_BLOCK_SIZE, .setkey = stm32_cryp_des_setkey, .encrypt = stm32_cryp_des_ecb_encrypt, .decrypt = stm32_cryp_des_ecb_decrypt, }, .op = { .do_one_request = stm32_cryp_cipher_one_req, }, }, { .base = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "stm32-cbc-des", .base.cra_priority = 200, .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, .min_keysize = DES_BLOCK_SIZE, .max_keysize = DES_BLOCK_SIZE, .ivsize = DES_BLOCK_SIZE, .setkey = stm32_cryp_des_setkey, .encrypt = stm32_cryp_des_cbc_encrypt, .decrypt = stm32_cryp_des_cbc_decrypt, }, .op = { .do_one_request = stm32_cryp_cipher_one_req, }, }, { .base = { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "stm32-ecb-des3", .base.cra_priority = 200, .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, .min_keysize = 3 * DES_BLOCK_SIZE, .max_keysize = 3 * DES_BLOCK_SIZE, .setkey = stm32_cryp_tdes_setkey, .encrypt = stm32_cryp_tdes_ecb_encrypt, .decrypt = stm32_cryp_tdes_ecb_decrypt, }, .op = { .do_one_request = stm32_cryp_cipher_one_req, }, }, { .base = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "stm32-cbc-des3", .base.cra_priority = 200, .base.cra_flags = CRYPTO_ALG_ASYNC, .base.cra_blocksize = DES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct stm32_cryp_ctx), .base.cra_alignmask = 0, .base.cra_module = THIS_MODULE, .init = stm32_cryp_init_tfm, .min_keysize = 3 * DES_BLOCK_SIZE, .max_keysize = 3 * DES_BLOCK_SIZE, .ivsize = DES_BLOCK_SIZE, .setkey = stm32_cryp_tdes_setkey, .encrypt = stm32_cryp_tdes_cbc_encrypt, .decrypt = stm32_cryp_tdes_cbc_decrypt, }, .op = { .do_one_request = stm32_cryp_cipher_one_req, }, }, }; static struct aead_engine_alg aead_algs[] = { { .base.setkey = stm32_cryp_aes_aead_setkey, .base.setauthsize = stm32_cryp_aes_gcm_setauthsize, .base.encrypt = stm32_cryp_aes_gcm_encrypt, .base.decrypt = stm32_cryp_aes_gcm_decrypt, .base.init = stm32_cryp_aes_aead_init, .base.ivsize = 12, .base.maxauthsize = AES_BLOCK_SIZE, .base.base = { .cra_name = "gcm(aes)", .cra_driver_name = "stm32-gcm-aes", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct stm32_cryp_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .op = { .do_one_request = stm32_cryp_aead_one_req, }, }, { .base.setkey = stm32_cryp_aes_aead_setkey, .base.setauthsize = stm32_cryp_aes_ccm_setauthsize, .base.encrypt = stm32_cryp_aes_ccm_encrypt, .base.decrypt = stm32_cryp_aes_ccm_decrypt, .base.init = stm32_cryp_aes_aead_init, .base.ivsize = AES_BLOCK_SIZE, .base.maxauthsize = AES_BLOCK_SIZE, .base.base = { .cra_name = "ccm(aes)", .cra_driver_name = "stm32-ccm-aes", .cra_priority = 200, .cra_flags = CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct stm32_cryp_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .op = { .do_one_request = stm32_cryp_aead_one_req, }, }, }; static const struct stm32_cryp_caps ux500_data = { .aeads_support = false, .linear_aes_key = true, .kp_mode = false, .iv_protection = true, .swap_final = true, .padding_wa = true, .cr = UX500_CRYP_CR, .sr = UX500_CRYP_SR, .din = UX500_CRYP_DIN, .dout = UX500_CRYP_DOUT, .imsc = UX500_CRYP_IMSC, .mis = UX500_CRYP_MIS, .k1l = UX500_CRYP_K1L, .k1r = UX500_CRYP_K1R, .k3r = UX500_CRYP_K3R, .iv0l = UX500_CRYP_IV0L, .iv0r = UX500_CRYP_IV0R, .iv1l = UX500_CRYP_IV1L, .iv1r = UX500_CRYP_IV1R, }; static const struct stm32_cryp_caps f7_data = { .aeads_support = true, .linear_aes_key = false, .kp_mode = true, .iv_protection = false, .swap_final = true, .padding_wa = true, .cr = CRYP_CR, .sr = CRYP_SR, .din = CRYP_DIN, .dout = CRYP_DOUT, .imsc = CRYP_IMSCR, .mis = CRYP_MISR, .k1l = CRYP_K1LR, .k1r = CRYP_K1RR, .k3r = CRYP_K3RR, .iv0l = CRYP_IV0LR, .iv0r = CRYP_IV0RR, .iv1l = CRYP_IV1LR, .iv1r = CRYP_IV1RR, }; static const struct stm32_cryp_caps mp1_data = { .aeads_support = true, .linear_aes_key = false, .kp_mode = true, .iv_protection = false, .swap_final = false, .padding_wa = false, .cr = CRYP_CR, .sr = CRYP_SR, .din = CRYP_DIN, .dout = CRYP_DOUT, .imsc = CRYP_IMSCR, .mis = CRYP_MISR, .k1l = CRYP_K1LR, .k1r = CRYP_K1RR, .k3r = CRYP_K3RR, .iv0l = CRYP_IV0LR, .iv0r = CRYP_IV0RR, .iv1l = CRYP_IV1LR, .iv1r = CRYP_IV1RR, }; static const struct of_device_id stm32_dt_ids[] = { { .compatible = "stericsson,ux500-cryp", .data = &ux500_data}, { .compatible = "st,stm32f756-cryp", .data = &f7_data}, { .compatible = "st,stm32mp1-cryp", .data = &mp1_data}, {}, }; MODULE_DEVICE_TABLE(of, stm32_dt_ids); static int stm32_cryp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct stm32_cryp *cryp; struct reset_control *rst; int irq, ret; cryp = devm_kzalloc(dev, sizeof(*cryp), GFP_KERNEL); if (!cryp) return -ENOMEM; cryp->caps = of_device_get_match_data(dev); if (!cryp->caps) return -ENODEV; cryp->dev = dev; cryp->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(cryp->regs)) return PTR_ERR(cryp->regs); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_threaded_irq(dev, irq, stm32_cryp_irq, stm32_cryp_irq_thread, IRQF_ONESHOT, dev_name(dev), cryp); if (ret) { dev_err(dev, "Cannot grab IRQ\n"); return ret; } cryp->clk = devm_clk_get(dev, NULL); if (IS_ERR(cryp->clk)) { dev_err_probe(dev, PTR_ERR(cryp->clk), "Could not get clock\n"); return PTR_ERR(cryp->clk); } ret = clk_prepare_enable(cryp->clk); if (ret) { dev_err(cryp->dev, "Failed to enable clock\n"); return ret; } pm_runtime_set_autosuspend_delay(dev, CRYP_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(dev); pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); rst = devm_reset_control_get(dev, NULL); if (IS_ERR(rst)) { ret = PTR_ERR(rst); if (ret == -EPROBE_DEFER) goto err_rst; } else { reset_control_assert(rst); udelay(2); reset_control_deassert(rst); } platform_set_drvdata(pdev, cryp); spin_lock(&cryp_list.lock); list_add(&cryp->list, &cryp_list.dev_list); spin_unlock(&cryp_list.lock); /* Initialize crypto engine */ cryp->engine = crypto_engine_alloc_init(dev, 1); if (!cryp->engine) { dev_err(dev, "Could not init crypto engine\n"); ret = -ENOMEM; goto err_engine1; } ret = crypto_engine_start(cryp->engine); if (ret) { dev_err(dev, "Could not start crypto engine\n"); goto err_engine2; } ret = crypto_engine_register_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); if (ret) { dev_err(dev, "Could not register algs\n"); goto err_algs; } if (cryp->caps->aeads_support) { ret = crypto_engine_register_aeads(aead_algs, ARRAY_SIZE(aead_algs)); if (ret) goto err_aead_algs; } dev_info(dev, "Initialized\n"); pm_runtime_put_sync(dev); return 0; err_aead_algs: crypto_engine_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); err_algs: err_engine2: crypto_engine_exit(cryp->engine); err_engine1: spin_lock(&cryp_list.lock); list_del(&cryp->list); spin_unlock(&cryp_list.lock); err_rst: pm_runtime_disable(dev); pm_runtime_put_noidle(dev); clk_disable_unprepare(cryp->clk); return ret; } static int stm32_cryp_remove(struct platform_device *pdev) { struct stm32_cryp *cryp = platform_get_drvdata(pdev); int ret; if (!cryp) return -ENODEV; ret = pm_runtime_resume_and_get(cryp->dev); if (ret < 0) return ret; if (cryp->caps->aeads_support) crypto_engine_unregister_aeads(aead_algs, ARRAY_SIZE(aead_algs)); crypto_engine_unregister_skciphers(crypto_algs, ARRAY_SIZE(crypto_algs)); crypto_engine_exit(cryp->engine); spin_lock(&cryp_list.lock); list_del(&cryp->list); spin_unlock(&cryp_list.lock); pm_runtime_disable(cryp->dev); pm_runtime_put_noidle(cryp->dev); clk_disable_unprepare(cryp->clk); return 0; } #ifdef CONFIG_PM static int stm32_cryp_runtime_suspend(struct device *dev) { struct stm32_cryp *cryp = dev_get_drvdata(dev); clk_disable_unprepare(cryp->clk); return 0; } static int stm32_cryp_runtime_resume(struct device *dev) { struct stm32_cryp *cryp = dev_get_drvdata(dev); int ret; ret = clk_prepare_enable(cryp->clk); if (ret) { dev_err(cryp->dev, "Failed to prepare_enable clock\n"); return ret; } return 0; } #endif static const struct dev_pm_ops stm32_cryp_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) SET_RUNTIME_PM_OPS(stm32_cryp_runtime_suspend, stm32_cryp_runtime_resume, NULL) }; static struct platform_driver stm32_cryp_driver = { .probe = stm32_cryp_probe, .remove = stm32_cryp_remove, .driver = { .name = DRIVER_NAME, .pm = &stm32_cryp_pm_ops, .of_match_table = stm32_dt_ids, }, }; module_platform_driver(stm32_cryp_driver); MODULE_AUTHOR("Fabien Dessenne <[email protected]>"); MODULE_DESCRIPTION("STMicrolectronics STM32 CRYP hardware driver"); MODULE_LICENSE("GPL");
linux-master
drivers/crypto/stm32/stm32-cryp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) STMicroelectronics SA 2017 * Author: Fabien Dessenne <[email protected]> */ #include <linux/bitrev.h> #include <linux/clk.h> #include <linux/crc32.h> #include <linux/crc32poly.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <crypto/internal/hash.h> #include <asm/unaligned.h> #define DRIVER_NAME "stm32-crc32" #define CHKSUM_DIGEST_SIZE 4 #define CHKSUM_BLOCK_SIZE 1 /* Registers */ #define CRC_DR 0x00000000 #define CRC_CR 0x00000008 #define CRC_INIT 0x00000010 #define CRC_POL 0x00000014 /* Registers values */ #define CRC_CR_RESET BIT(0) #define CRC_CR_REV_IN_WORD (BIT(6) | BIT(5)) #define CRC_CR_REV_IN_BYTE BIT(5) #define CRC_CR_REV_OUT BIT(7) #define CRC32C_INIT_DEFAULT 0xFFFFFFFF #define CRC_AUTOSUSPEND_DELAY 50 static unsigned int burst_size; module_param(burst_size, uint, 0644); MODULE_PARM_DESC(burst_size, "Select burst byte size (0 unlimited)"); struct stm32_crc { struct list_head list; struct device *dev; void __iomem *regs; struct clk *clk; spinlock_t lock; }; struct stm32_crc_list { struct list_head dev_list; spinlock_t lock; /* protect dev_list */ }; static struct stm32_crc_list crc_list = { .dev_list = LIST_HEAD_INIT(crc_list.dev_list), .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock), }; struct stm32_crc_ctx { u32 key; u32 poly; }; struct stm32_crc_desc_ctx { u32 partial; /* crc32c: partial in first 4 bytes of that struct */ }; static int stm32_crc32_cra_init(struct crypto_tfm *tfm) { struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); mctx->key = 0; mctx->poly = CRC32_POLY_LE; return 0; } static int stm32_crc32c_cra_init(struct crypto_tfm *tfm) { struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); mctx->key = CRC32C_INIT_DEFAULT; mctx->poly = CRC32C_POLY_LE; return 0; } static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm); if (keylen != sizeof(u32)) return -EINVAL; mctx->key = get_unaligned_le32(key); return 0; } static struct stm32_crc *stm32_crc_get_next_crc(void) { struct stm32_crc *crc; spin_lock_bh(&crc_list.lock); crc = list_first_entry(&crc_list.dev_list, struct stm32_crc, list); if (crc) list_move_tail(&crc->list, &crc_list.dev_list); spin_unlock_bh(&crc_list.lock); return crc; } static int stm32_crc_init(struct shash_desc *desc) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct stm32_crc *crc; unsigned long flags; crc = stm32_crc_get_next_crc(); if (!crc) return -ENODEV; pm_runtime_get_sync(crc->dev); spin_lock_irqsave(&crc->lock, flags); /* Reset, set key, poly and configure in bit reverse mode */ writel_relaxed(bitrev32(mctx->key), crc->regs + CRC_INIT); writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, crc->regs + CRC_CR); /* Store partial result */ ctx->partial = readl_relaxed(crc->regs + CRC_DR); spin_unlock_irqrestore(&crc->lock, flags); pm_runtime_mark_last_busy(crc->dev); pm_runtime_put_autosuspend(crc->dev); return 0; } static int burst_update(struct shash_desc *desc, const u8 *d8, size_t length) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); struct stm32_crc *crc; crc = stm32_crc_get_next_crc(); if (!crc) return -ENODEV; pm_runtime_get_sync(crc->dev); if (!spin_trylock(&crc->lock)) { /* Hardware is busy, calculate crc32 by software */ if (mctx->poly == CRC32_POLY_LE) ctx->partial = crc32_le(ctx->partial, d8, length); else ctx->partial = __crc32c_le(ctx->partial, d8, length); goto pm_out; } /* * Restore previously calculated CRC for this context as init value * Restore polynomial configuration * Configure in register for word input data, * Configure out register in reversed bit mode data. */ writel_relaxed(bitrev32(ctx->partial), crc->regs + CRC_INIT); writel_relaxed(bitrev32(mctx->poly), crc->regs + CRC_POL); writel_relaxed(CRC_CR_RESET | CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, crc->regs + CRC_CR); if (d8 != PTR_ALIGN(d8, sizeof(u32))) { /* Configure for byte data */ writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, crc->regs + CRC_CR); while (d8 != PTR_ALIGN(d8, sizeof(u32)) && length) { writeb_relaxed(*d8++, crc->regs + CRC_DR); length--; } /* Configure for word data */ writel_relaxed(CRC_CR_REV_IN_WORD | CRC_CR_REV_OUT, crc->regs + CRC_CR); } for (; length >= sizeof(u32); d8 += sizeof(u32), length -= sizeof(u32)) writel_relaxed(*((u32 *)d8), crc->regs + CRC_DR); if (length) { /* Configure for byte data */ writel_relaxed(CRC_CR_REV_IN_BYTE | CRC_CR_REV_OUT, crc->regs + CRC_CR); while (length--) writeb_relaxed(*d8++, crc->regs + CRC_DR); } /* Store partial result */ ctx->partial = readl_relaxed(crc->regs + CRC_DR); spin_unlock(&crc->lock); pm_out: pm_runtime_mark_last_busy(crc->dev); pm_runtime_put_autosuspend(crc->dev); return 0; } static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, unsigned int length) { const unsigned int burst_sz = burst_size; unsigned int rem_sz; const u8 *cur; size_t size; int ret; if (!burst_sz) return burst_update(desc, d8, length); /* Digest first bytes not 32bit aligned at first pass in the loop */ size = min_t(size_t, length, burst_sz + (size_t)d8 - ALIGN_DOWN((size_t)d8, sizeof(u32))); for (rem_sz = length, cur = d8; rem_sz; rem_sz -= size, cur += size, size = min(rem_sz, burst_sz)) { ret = burst_update(desc, cur, size); if (ret) return ret; } return 0; } static int stm32_crc_final(struct shash_desc *desc, u8 *out) { struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); /* Send computed CRC */ put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ? ~ctx->partial : ctx->partial, out); return 0; } static int stm32_crc_finup(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { return stm32_crc_update(desc, data, length) ?: stm32_crc_final(desc, out); } static int stm32_crc_digest(struct shash_desc *desc, const u8 *data, unsigned int length, u8 *out) { return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out); } static unsigned int refcnt; static DEFINE_MUTEX(refcnt_lock); static struct shash_alg algs[] = { /* CRC-32 */ { .setkey = stm32_crc_setkey, .init = stm32_crc_init, .update = stm32_crc_update, .final = stm32_crc_final, .finup = stm32_crc_finup, .digest = stm32_crc_digest, .descsize = sizeof(struct stm32_crc_desc_ctx), .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32", .cra_driver_name = "stm32-crc32-crc32", .cra_priority = 200, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct stm32_crc_ctx), .cra_module = THIS_MODULE, .cra_init = stm32_crc32_cra_init, } }, /* CRC-32Castagnoli */ { .setkey = stm32_crc_setkey, .init = stm32_crc_init, .update = stm32_crc_update, .final = stm32_crc_final, .finup = stm32_crc_finup, .digest = stm32_crc_digest, .descsize = sizeof(struct stm32_crc_desc_ctx), .digestsize = CHKSUM_DIGEST_SIZE, .base = { .cra_name = "crc32c", .cra_driver_name = "stm32-crc32-crc32c", .cra_priority = 200, .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, .cra_blocksize = CHKSUM_BLOCK_SIZE, .cra_alignmask = 3, .cra_ctxsize = sizeof(struct stm32_crc_ctx), .cra_module = THIS_MODULE, .cra_init = stm32_crc32c_cra_init, } } }; static int stm32_crc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct stm32_crc *crc; int ret; crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL); if (!crc) return -ENOMEM; crc->dev = dev; crc->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(crc->regs)) { dev_err(dev, "Cannot map CRC IO\n"); return PTR_ERR(crc->regs); } crc->clk = devm_clk_get(dev, NULL); if (IS_ERR(crc->clk)) { dev_err(dev, "Could not get clock\n"); return PTR_ERR(crc->clk); } ret = clk_prepare_enable(crc->clk); if (ret) { dev_err(crc->dev, "Failed to enable clock\n"); return ret; } pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(dev); pm_runtime_get_noresume(dev); pm_runtime_set_active(dev); pm_runtime_irq_safe(dev); pm_runtime_enable(dev); spin_lock_init(&crc->lock); platform_set_drvdata(pdev, crc); spin_lock(&crc_list.lock); list_add(&crc->list, &crc_list.dev_list); spin_unlock(&crc_list.lock); mutex_lock(&refcnt_lock); if (!refcnt) { ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); if (ret) { mutex_unlock(&refcnt_lock); dev_err(dev, "Failed to register\n"); clk_disable_unprepare(crc->clk); return ret; } } refcnt++; mutex_unlock(&refcnt_lock); dev_info(dev, "Initialized\n"); pm_runtime_put_sync(dev); return 0; } static int stm32_crc_remove(struct platform_device *pdev) { struct stm32_crc *crc = platform_get_drvdata(pdev); int ret = pm_runtime_get_sync(crc->dev); if (ret < 0) { pm_runtime_put_noidle(crc->dev); return ret; } spin_lock(&crc_list.lock); list_del(&crc->list); spin_unlock(&crc_list.lock); mutex_lock(&refcnt_lock); if (!--refcnt) crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); mutex_unlock(&refcnt_lock); pm_runtime_disable(crc->dev); pm_runtime_put_noidle(crc->dev); clk_disable_unprepare(crc->clk); return 0; } static int __maybe_unused stm32_crc_suspend(struct device *dev) { struct stm32_crc *crc = dev_get_drvdata(dev); int ret; ret = pm_runtime_force_suspend(dev); if (ret) return ret; clk_unprepare(crc->clk); return 0; } static int __maybe_unused stm32_crc_resume(struct device *dev) { struct stm32_crc *crc = dev_get_drvdata(dev); int ret; ret = clk_prepare(crc->clk); if (ret) { dev_err(crc->dev, "Failed to prepare clock\n"); return ret; } return pm_runtime_force_resume(dev); } static int __maybe_unused stm32_crc_runtime_suspend(struct device *dev) { struct stm32_crc *crc = dev_get_drvdata(dev); clk_disable(crc->clk); return 0; } static int __maybe_unused stm32_crc_runtime_resume(struct device *dev) { struct stm32_crc *crc = dev_get_drvdata(dev); int ret; ret = clk_enable(crc->clk); if (ret) { dev_err(crc->dev, "Failed to enable clock\n"); return ret; } return 0; } static const struct dev_pm_ops stm32_crc_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(stm32_crc_suspend, stm32_crc_resume) SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend, stm32_crc_runtime_resume, NULL) }; static const struct of_device_id stm32_dt_ids[] = { { .compatible = "st,stm32f7-crc", }, {}, }; MODULE_DEVICE_TABLE(of, stm32_dt_ids); static struct platform_driver stm32_crc_driver = { .probe = stm32_crc_probe, .remove = stm32_crc_remove, .driver = { .name = DRIVER_NAME, .pm = &stm32_crc_pm_ops, .of_match_table = stm32_dt_ids, }, }; module_platform_driver(stm32_crc_driver); MODULE_AUTHOR("Fabien Dessenne <[email protected]>"); MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver"); MODULE_LICENSE("GPL");
linux-master
drivers/crypto/stm32/stm32-crc32.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC * * Copyright (C) 2013-2015 Corentin LABBE <[email protected]> * * This file add support for AES cipher with 128,192,256 bits * keysize in CBC and ECB mode. * Add support also for DES and 3DES in CBC and ECB mode. * * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ #include "sun4i-ss.h" static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); u32 mode = ctx->mode; /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ u32 rx_cnt = SS_RX_DEFAULT; u32 tx_cnt = 0; u32 spaces; u32 v; int err = 0; unsigned int i; unsigned int ileft = areq->cryptlen; unsigned int oleft = areq->cryptlen; unsigned int todo; unsigned long pi = 0, po = 0; /* progress for in and out */ bool miter_err; struct sg_mapping_iter mi, mo; unsigned int oi, oo; /* offset for in and out */ unsigned long flags; struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun4i_ss_alg_template *algt; if (!areq->cryptlen) return 0; if (!areq->src || !areq->dst) { dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); return -EINVAL; } if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) { scatterwalk_map_and_copy(ctx->backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0); } if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto); algt->stat_opti++; algt->stat_bytes += areq->cryptlen; } spin_lock_irqsave(&ss->slock, flags); for (i = 0; i < op->keylen / 4; i++) writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = *(u32 *)(areq->iv + i * 4); writesl(ss->base + SS_IV0 + i * 4, &v, 1); } } writel(mode, ss->base + SS_CTL); ileft = areq->cryptlen / 4; oleft = areq->cryptlen / 4; oi = 0; oo = 0; do { if (ileft) { sg_miter_start(&mi, areq->src, sg_nents(areq->src), SG_MITER_FROM_SG | SG_MITER_ATOMIC); if (pi) sg_miter_skip(&mi, pi); miter_err = sg_miter_next(&mi); if (!miter_err || !mi.addr) { dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); err = -EINVAL; goto release_ss; } todo = min(rx_cnt, ileft); todo = min_t(size_t, todo, (mi.length - oi) / 4); if (todo) { ileft -= todo; writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); oi += todo * 4; } if (oi == mi.length) { pi += mi.length; oi = 0; } sg_miter_stop(&mi); } spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), SG_MITER_TO_SG | SG_MITER_ATOMIC); if (po) sg_miter_skip(&mo, po); miter_err = sg_miter_next(&mo); if (!miter_err || !mo.addr) { dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); err = -EINVAL; goto release_ss; } todo = min(tx_cnt, oleft); todo = min_t(size_t, todo, (mo.length - oo) / 4); if (todo) { oleft -= todo; readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oo += todo * 4; } if (oo == mo.length) { oo = 0; po += mo.length; } sg_miter_stop(&mo); } while (oleft); if (areq->iv) { if (mode & SS_DECRYPTION) { memcpy(areq->iv, ctx->backup_iv, ivsize); memzero_explicit(ctx->backup_iv, ivsize); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize, ivsize, 0); } } release_ss: writel(0, ss->base + SS_CTL); spin_unlock_irqrestore(&ss->slock, flags); return err; } static int noinline_for_stack sun4i_ss_cipher_poll_fallback(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); int err; struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun4i_ss_alg_template *algt; if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto); algt->stat_fb++; } skcipher_request_set_tfm(&ctx->fallback_req, op->fallback_tfm); skcipher_request_set_callback(&ctx->fallback_req, areq->base.flags, areq->base.complete, areq->base.data); skcipher_request_set_crypt(&ctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (ctx->mode & SS_DECRYPTION) err = crypto_skcipher_decrypt(&ctx->fallback_req); else err = crypto_skcipher_encrypt(&ctx->fallback_req); return err; } /* Generic function that support SG with size not multiple of 4 */ static int sun4i_ss_cipher_poll(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; int no_chunk = 1; struct scatterlist *in_sg = areq->src; struct scatterlist *out_sg = areq->dst; unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun4i_cipher_req_ctx *ctx = skcipher_request_ctx(areq); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun4i_ss_alg_template *algt; u32 mode = ctx->mode; /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */ u32 rx_cnt = SS_RX_DEFAULT; u32 tx_cnt = 0; u32 v; u32 spaces; int err = 0; unsigned int i; unsigned int ileft = areq->cryptlen; unsigned int oleft = areq->cryptlen; unsigned int todo; struct sg_mapping_iter mi, mo; unsigned long pi = 0, po = 0; /* progress for in and out */ bool miter_err; unsigned int oi, oo; /* offset for in and out */ unsigned int ob = 0; /* offset in buf */ unsigned int obo = 0; /* offset in bufo*/ unsigned int obl = 0; /* length of data in bufo */ unsigned long flags; bool need_fallback = false; if (!areq->cryptlen) return 0; if (!areq->src || !areq->dst) { dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n"); return -EINVAL; } algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto); if (areq->cryptlen % algt->alg.crypto.base.cra_blocksize) need_fallback = true; /* * if we have only SGs with size multiple of 4, * we can use the SS optimized function */ while (in_sg && no_chunk == 1) { if ((in_sg->length | in_sg->offset) & 3u) no_chunk = 0; in_sg = sg_next(in_sg); } while (out_sg && no_chunk == 1) { if ((out_sg->length | out_sg->offset) & 3u) no_chunk = 0; out_sg = sg_next(out_sg); } if (no_chunk == 1 && !need_fallback) return sun4i_ss_opti_poll(areq); if (need_fallback) return sun4i_ss_cipher_poll_fallback(areq); if (areq->iv && ivsize > 0 && mode & SS_DECRYPTION) { scatterwalk_map_and_copy(ctx->backup_iv, areq->src, areq->cryptlen - ivsize, ivsize, 0); } if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { algt->stat_req++; algt->stat_bytes += areq->cryptlen; } spin_lock_irqsave(&ss->slock, flags); for (i = 0; i < op->keylen / 4; i++) writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1); if (areq->iv) { for (i = 0; i < 4 && i < ivsize / 4; i++) { v = *(u32 *)(areq->iv + i * 4); writesl(ss->base + SS_IV0 + i * 4, &v, 1); } } writel(mode, ss->base + SS_CTL); ileft = areq->cryptlen; oleft = areq->cryptlen; oi = 0; oo = 0; while (oleft) { if (ileft) { sg_miter_start(&mi, areq->src, sg_nents(areq->src), SG_MITER_FROM_SG | SG_MITER_ATOMIC); if (pi) sg_miter_skip(&mi, pi); miter_err = sg_miter_next(&mi); if (!miter_err || !mi.addr) { dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); err = -EINVAL; goto release_ss; } /* * todo is the number of consecutive 4byte word that we * can read from current SG */ todo = min(rx_cnt, ileft / 4); todo = min_t(size_t, todo, (mi.length - oi) / 4); if (todo && !ob) { writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo); ileft -= todo * 4; oi += todo * 4; } else { /* * not enough consecutive bytes, so we need to * linearize in buf. todo is in bytes * After that copy, if we have a multiple of 4 * we need to be able to write all buf in one * pass, so it is why we min() with rx_cnt */ todo = min(rx_cnt * 4 - ob, ileft); todo = min_t(size_t, todo, mi.length - oi); memcpy(ss->buf + ob, mi.addr + oi, todo); ileft -= todo; oi += todo; ob += todo; if (!(ob % 4)) { writesl(ss->base + SS_RXFIFO, ss->buf, ob / 4); ob = 0; } } if (oi == mi.length) { pi += mi.length; oi = 0; } sg_miter_stop(&mi); } spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); tx_cnt = SS_TXFIFO_SPACES(spaces); if (!tx_cnt) continue; sg_miter_start(&mo, areq->dst, sg_nents(areq->dst), SG_MITER_TO_SG | SG_MITER_ATOMIC); if (po) sg_miter_skip(&mo, po); miter_err = sg_miter_next(&mo); if (!miter_err || !mo.addr) { dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n"); err = -EINVAL; goto release_ss; } /* todo in 4bytes word */ todo = min(tx_cnt, oleft / 4); todo = min_t(size_t, todo, (mo.length - oo) / 4); if (todo) { readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo); oleft -= todo * 4; oo += todo * 4; if (oo == mo.length) { po += mo.length; oo = 0; } } else { /* * read obl bytes in bufo, we read at maximum for * emptying the device */ readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt); obl = tx_cnt * 4; obo = 0; do { /* * how many bytes we can copy ? * no more than remaining SG size * no more than remaining buffer * no need to test against oleft */ todo = min_t(size_t, mo.length - oo, obl - obo); memcpy(mo.addr + oo, ss->bufo + obo, todo); oleft -= todo; obo += todo; oo += todo; if (oo == mo.length) { po += mo.length; sg_miter_next(&mo); oo = 0; } } while (obo < obl); /* bufo must be fully used here */ } sg_miter_stop(&mo); } if (areq->iv) { if (mode & SS_DECRYPTION) { memcpy(areq->iv, ctx->backup_iv, ivsize); memzero_explicit(ctx->backup_iv, ivsize); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, areq->cryptlen - ivsize, ivsize, 0); } } release_ss: writel(0, ss->base + SS_CTL); spin_unlock_irqrestore(&ss->slock, flags); return err; } /* CBC AES */ int sun4i_ss_cbc_aes_encrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } int sun4i_ss_cbc_aes_decrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } /* ECB AES */ int sun4i_ss_ecb_aes_encrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } int sun4i_ss_ecb_aes_decrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } /* CBC DES */ int sun4i_ss_cbc_des_encrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } int sun4i_ss_cbc_des_decrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } /* ECB DES */ int sun4i_ss_ecb_des_encrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } int sun4i_ss_ecb_des_decrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } /* CBC 3DES */ int sun4i_ss_cbc_des3_encrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } int sun4i_ss_cbc_des3_decrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } /* ECB 3DES */ int sun4i_ss_ecb_des3_encrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } int sun4i_ss_ecb_des3_decrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION | op->keymode; return sun4i_ss_cipher_poll(areq); } int sun4i_ss_cipher_init(struct crypto_tfm *tfm) { struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); struct sun4i_ss_alg_template *algt; const char *name = crypto_tfm_alg_name(tfm); int err; memset(op, 0, sizeof(struct sun4i_tfm_ctx)); algt = container_of(tfm->__crt_alg, struct sun4i_ss_alg_template, alg.crypto.base); op->ss = algt->ss; op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), sizeof(struct sun4i_cipher_req_ctx) + crypto_skcipher_reqsize(op->fallback_tfm)); err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) goto error_pm; return 0; error_pm: crypto_free_skcipher(op->fallback_tfm); return err; } void sun4i_ss_cipher_exit(struct crypto_tfm *tfm) { struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); crypto_free_skcipher(op->fallback_tfm); pm_runtime_put(op->ss->dev); } /* check and set the AES key, prepare the mode to be used */ int sun4i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun4i_ss_ctx *ss = op->ss; switch (keylen) { case 128 / 8: op->keymode = SS_AES_128BITS; break; case 192 / 8: op->keymode = SS_AES_192BITS; break; case 256 / 8: op->keymode = SS_AES_256BITS; break; default: dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen); return -EINVAL; } op->keylen = keylen; memcpy(op->key, key, keylen); crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } /* check and set the DES key, prepare the mode to be used */ int sun4i_ss_des_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); int err; err = verify_skcipher_des_key(tfm, key); if (err) return err; op->keylen = keylen; memcpy(op->key, key, keylen); crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } /* check and set the 3DES key, prepare the mode to be used */ int sun4i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct sun4i_tfm_ctx *op = crypto_skcipher_ctx(tfm); int err; err = verify_skcipher_des3_key(tfm, key); if (err) return err; op->keylen = keylen; memcpy(op->key, key, keylen); crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); }
linux-master
drivers/crypto/allwinner/sun4i-ss/sun4i-ss-cipher.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * sun4i-ss-hash.c - hardware cryptographic accelerator for Allwinner A20 SoC * * Copyright (C) 2013-2015 Corentin LABBE <[email protected]> * * This file add support for MD5 and SHA1. * * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ #include "sun4i-ss.h" #include <asm/unaligned.h> #include <linux/scatterlist.h> /* This is a totally arbitrary value */ #define SS_TIMEOUT 100 int sun4i_hash_crainit(struct crypto_tfm *tfm) { struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); struct sun4i_ss_alg_template *algt; int err; memset(op, 0, sizeof(struct sun4i_tfm_ctx)); algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); op->ss = algt->ss; err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) return err; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct sun4i_req_ctx)); return 0; } void sun4i_hash_craexit(struct crypto_tfm *tfm) { struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm); pm_runtime_put(op->ss->dev); } /* sun4i_hash_init: initialize request context */ int sun4i_hash_init(struct ahash_request *areq) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); struct sun4i_ss_alg_template *algt; memset(op, 0, sizeof(struct sun4i_req_ctx)); algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); op->mode = algt->mode; return 0; } int sun4i_hash_export_md5(struct ahash_request *areq, void *out) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); struct md5_state *octx = out; int i; octx->byte_count = op->byte_count + op->len; memcpy(octx->block, op->buf, op->len); if (op->byte_count) { for (i = 0; i < 4; i++) octx->hash[i] = op->hash[i]; } else { octx->hash[0] = SHA1_H0; octx->hash[1] = SHA1_H1; octx->hash[2] = SHA1_H2; octx->hash[3] = SHA1_H3; } return 0; } int sun4i_hash_import_md5(struct ahash_request *areq, const void *in) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); const struct md5_state *ictx = in; int i; sun4i_hash_init(areq); op->byte_count = ictx->byte_count & ~0x3F; op->len = ictx->byte_count & 0x3F; memcpy(op->buf, ictx->block, op->len); for (i = 0; i < 4; i++) op->hash[i] = ictx->hash[i]; return 0; } int sun4i_hash_export_sha1(struct ahash_request *areq, void *out) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); struct sha1_state *octx = out; int i; octx->count = op->byte_count + op->len; memcpy(octx->buffer, op->buf, op->len); if (op->byte_count) { for (i = 0; i < 5; i++) octx->state[i] = op->hash[i]; } else { octx->state[0] = SHA1_H0; octx->state[1] = SHA1_H1; octx->state[2] = SHA1_H2; octx->state[3] = SHA1_H3; octx->state[4] = SHA1_H4; } return 0; } int sun4i_hash_import_sha1(struct ahash_request *areq, const void *in) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); const struct sha1_state *ictx = in; int i; sun4i_hash_init(areq); op->byte_count = ictx->count & ~0x3F; op->len = ictx->count & 0x3F; memcpy(op->buf, ictx->buffer, op->len); for (i = 0; i < 5; i++) op->hash[i] = ictx->state[i]; return 0; } #define SS_HASH_UPDATE 1 #define SS_HASH_FINAL 2 /* * sun4i_hash_update: update hash engine * * Could be used for both SHA1 and MD5 * Write data by step of 32bits and put then in the SS. * * Since we cannot leave partial data and hash state in the engine, * we need to get the hash state at the end of this function. * We can get the hash state every 64 bytes * * So the first work is to get the number of bytes to write to SS modulo 64 * The extra bytes will go to a temporary buffer op->buf storing op->len bytes * * So at the begin of update() * if op->len + areq->nbytes < 64 * => all data will be written to wait buffer (op->buf) and end=0 * if not, write all data from op->buf to the device and position end to * complete to 64bytes * * example 1: * update1 60o => op->len=60 * update2 60o => need one more word to have 64 bytes * end=4 * so write all data from op->buf and one word of SGs * write remaining data in op->buf * final state op->len=56 */ static int sun4i_hash(struct ahash_request *areq) { /* * i is the total bytes read from SGs, to be compared to areq->nbytes * i is important because we cannot rely on SG length since the sum of * SG->length could be greater than areq->nbytes * * end is the position when we need to stop writing to the device, * to be compared to i * * in_i: advancement in the current SG */ unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo; unsigned int in_i = 0; u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0; struct sun4i_req_ctx *op = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); struct sun4i_ss_ctx *ss = tfmctx->ss; struct sun4i_ss_alg_template *algt; struct scatterlist *in_sg = areq->src; struct sg_mapping_iter mi; int in_r, err = 0; size_t copied = 0; u32 wb = 0; dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x", __func__, crypto_tfm_alg_name(areq->base.tfm), op->byte_count, areq->nbytes, op->mode, op->len, op->hash[0]); if (unlikely(!areq->nbytes) && !(op->flags & SS_HASH_FINAL)) return 0; /* protect against overflow */ if (unlikely(areq->nbytes > UINT_MAX - op->len)) { dev_err(ss->dev, "Cannot process too large request\n"); return -EINVAL; } if (op->len + areq->nbytes < 64 && !(op->flags & SS_HASH_FINAL)) { /* linearize data to op->buf */ copied = sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), op->buf + op->len, areq->nbytes, 0); op->len += copied; return 0; } spin_lock_bh(&ss->slock); /* * if some data have been processed before, * we need to restore the partial hash state */ if (op->byte_count) { ivmode = SS_IV_ARBITRARY; for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) writel(op->hash[i], ss->base + SS_IV0 + i * 4); } /* Enable the device */ writel(op->mode | SS_ENABLED | ivmode, ss->base + SS_CTL); if (!(op->flags & SS_HASH_UPDATE)) goto hash_final; /* start of handling data */ if (!(op->flags & SS_HASH_FINAL)) { end = ((areq->nbytes + op->len) / 64) * 64 - op->len; if (end > areq->nbytes || areq->nbytes - end > 63) { dev_err(ss->dev, "ERROR: Bound error %u %u\n", end, areq->nbytes); err = -EINVAL; goto release_ss; } } else { /* Since we have the flag final, we can go up to modulo 4 */ if (areq->nbytes < 4) end = 0; else end = ((areq->nbytes + op->len) / 4) * 4 - op->len; } /* TODO if SGlen % 4 and !op->len then DMA */ i = 1; while (in_sg && i == 1) { if (in_sg->length % 4) i = 0; in_sg = sg_next(in_sg); } if (i == 1 && !op->len && areq->nbytes) dev_dbg(ss->dev, "We can DMA\n"); i = 0; sg_miter_start(&mi, areq->src, sg_nents(areq->src), SG_MITER_FROM_SG | SG_MITER_ATOMIC); sg_miter_next(&mi); in_i = 0; do { /* * we need to linearize in two case: * - the buffer is already used * - the SG does not have enough byte remaining ( < 4) */ if (op->len || (mi.length - in_i) < 4) { /* * if we have entered here we have two reason to stop * - the buffer is full * - reach the end */ while (op->len < 64 && i < end) { /* how many bytes we can read from current SG */ in_r = min(end - i, 64 - op->len); in_r = min_t(size_t, mi.length - in_i, in_r); memcpy(op->buf + op->len, mi.addr + in_i, in_r); op->len += in_r; i += in_r; in_i += in_r; if (in_i == mi.length) { sg_miter_next(&mi); in_i = 0; } } if (op->len > 3 && !(op->len % 4)) { /* write buf to the device */ writesl(ss->base + SS_RXFIFO, op->buf, op->len / 4); op->byte_count += op->len; op->len = 0; } } if (mi.length - in_i > 3 && i < end) { /* how many bytes we can read from current SG */ in_r = min_t(size_t, mi.length - in_i, areq->nbytes - i); in_r = min_t(size_t, ((mi.length - in_i) / 4) * 4, in_r); /* how many bytes we can write in the device*/ todo = min3((u32)(end - i) / 4, rx_cnt, (u32)in_r / 4); writesl(ss->base + SS_RXFIFO, mi.addr + in_i, todo); op->byte_count += todo * 4; i += todo * 4; in_i += todo * 4; rx_cnt -= todo; if (!rx_cnt) { spaces = readl(ss->base + SS_FCSR); rx_cnt = SS_RXFIFO_SPACES(spaces); } if (in_i == mi.length) { sg_miter_next(&mi); in_i = 0; } } } while (i < end); /* * Now we have written to the device all that we can, * store the remaining bytes in op->buf */ if ((areq->nbytes - i) < 64) { while (i < areq->nbytes && in_i < mi.length && op->len < 64) { /* how many bytes we can read from current SG */ in_r = min(areq->nbytes - i, 64 - op->len); in_r = min_t(size_t, mi.length - in_i, in_r); memcpy(op->buf + op->len, mi.addr + in_i, in_r); op->len += in_r; i += in_r; in_i += in_r; if (in_i == mi.length) { sg_miter_next(&mi); in_i = 0; } } } sg_miter_stop(&mi); /* * End of data process * Now if we have the flag final go to finalize part * If not, store the partial hash */ if (op->flags & SS_HASH_FINAL) goto hash_final; writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); i = 0; do { v = readl(ss->base + SS_CTL); i++; } while (i < SS_TIMEOUT && (v & SS_DATA_END)); if (unlikely(i >= SS_TIMEOUT)) { dev_err_ratelimited(ss->dev, "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", i, SS_TIMEOUT, v, areq->nbytes); err = -EIO; goto release_ss; } /* * The datasheet isn't very clear about when to retrieve the digest. The * bit SS_DATA_END is cleared when the engine has processed the data and * when the digest is computed *but* it doesn't mean the digest is * available in the digest registers. Hence the delay to be sure we can * read it. */ ndelay(1); for (i = 0; i < crypto_ahash_digestsize(tfm) / 4; i++) op->hash[i] = readl(ss->base + SS_MD0 + i * 4); goto release_ss; /* * hash_final: finalize hashing operation * * If we have some remaining bytes, we write them. * Then ask the SS for finalizing the hashing operation * * I do not check RX FIFO size in this function since the size is 32 * after each enabling and this function neither write more than 32 words. * If we come from the update part, we cannot have more than * 3 remaining bytes to write and SS is fast enough to not care about it. */ hash_final: if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { algt = container_of(alg, struct sun4i_ss_alg_template, alg.hash); algt->stat_req++; } /* write the remaining words of the wait buffer */ if (op->len) { nwait = op->len / 4; if (nwait) { writesl(ss->base + SS_RXFIFO, op->buf, nwait); op->byte_count += 4 * nwait; } nbw = op->len - 4 * nwait; if (nbw) { wb = le32_to_cpup((__le32 *)(op->buf + nwait * 4)); wb &= GENMASK((nbw * 8) - 1, 0); op->byte_count += nbw; } } /* write the remaining bytes of the nbw buffer */ wb |= ((1 << 7) << (nbw * 8)); ((__le32 *)bf)[j++] = cpu_to_le32(wb); /* * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1) * I take the operations from other MD5/SHA1 implementations */ /* last block size */ fill = 64 - (op->byte_count % 64); min_fill = 2 * sizeof(u32) + (nbw ? 0 : sizeof(u32)); /* if we can't fill all data, jump to the next 64 block */ if (fill < min_fill) fill += 64; j += (fill - min_fill) / sizeof(u32); /* write the length of data */ if (op->mode == SS_OP_SHA1) { __be64 *bits = (__be64 *)&bf[j]; *bits = cpu_to_be64(op->byte_count << 3); j += 2; } else { __le64 *bits = (__le64 *)&bf[j]; *bits = cpu_to_le64(op->byte_count << 3); j += 2; } writesl(ss->base + SS_RXFIFO, bf, j); /* Tell the SS to stop the hashing */ writel(op->mode | SS_ENABLED | SS_DATA_END, ss->base + SS_CTL); /* * Wait for SS to finish the hash. * The timeout could happen only in case of bad overclocking * or driver bug. */ i = 0; do { v = readl(ss->base + SS_CTL); i++; } while (i < SS_TIMEOUT && (v & SS_DATA_END)); if (unlikely(i >= SS_TIMEOUT)) { dev_err_ratelimited(ss->dev, "ERROR: hash end timeout %d>%d ctl=%x len=%u\n", i, SS_TIMEOUT, v, areq->nbytes); err = -EIO; goto release_ss; } /* * The datasheet isn't very clear about when to retrieve the digest. The * bit SS_DATA_END is cleared when the engine has processed the data and * when the digest is computed *but* it doesn't mean the digest is * available in the digest registers. Hence the delay to be sure we can * read it. */ ndelay(1); /* Get the hash from the device */ if (op->mode == SS_OP_SHA1) { for (i = 0; i < 5; i++) { v = readl(ss->base + SS_MD0 + i * 4); if (ss->variant->sha1_in_be) put_unaligned_le32(v, areq->result + i * 4); else put_unaligned_be32(v, areq->result + i * 4); } } else { for (i = 0; i < 4; i++) { v = readl(ss->base + SS_MD0 + i * 4); put_unaligned_le32(v, areq->result + i * 4); } } release_ss: writel(0, ss->base + SS_CTL); spin_unlock_bh(&ss->slock); return err; } int sun4i_hash_final(struct ahash_request *areq) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); op->flags = SS_HASH_FINAL; return sun4i_hash(areq); } int sun4i_hash_update(struct ahash_request *areq) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); op->flags = SS_HASH_UPDATE; return sun4i_hash(areq); } /* sun4i_hash_finup: finalize hashing operation after an update */ int sun4i_hash_finup(struct ahash_request *areq) { struct sun4i_req_ctx *op = ahash_request_ctx(areq); op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; return sun4i_hash(areq); } /* combo of init/update/final functions */ int sun4i_hash_digest(struct ahash_request *areq) { int err; struct sun4i_req_ctx *op = ahash_request_ctx(areq); err = sun4i_hash_init(areq); if (err) return err; op->flags = SS_HASH_UPDATE | SS_HASH_FINAL; return sun4i_hash(areq); }
linux-master
drivers/crypto/allwinner/sun4i-ss/sun4i-ss-hash.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * sun4i-ss-core.c - hardware cryptographic accelerator for Allwinner A20 SoC * * Copyright (C) 2013-2015 Corentin LABBE <[email protected]> * * Core file which registers crypto algorithms supported by the SS. * * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include <linux/clk.h> #include <linux/crypto.h> #include <linux/debugfs.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <crypto/scatterwalk.h> #include <linux/scatterlist.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/reset.h> #include "sun4i-ss.h" static const struct ss_variant ss_a10_variant = { .sha1_in_be = false, }; static const struct ss_variant ss_a33_variant = { .sha1_in_be = true, }; static struct sun4i_ss_alg_template ss_algs[] = { { .type = CRYPTO_ALG_TYPE_AHASH, .mode = SS_OP_MD5, .alg.hash = { .init = sun4i_hash_init, .update = sun4i_hash_update, .final = sun4i_hash_final, .finup = sun4i_hash_finup, .digest = sun4i_hash_digest, .export = sun4i_hash_export_md5, .import = sun4i_hash_import_md5, .halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_driver_name = "md5-sun4i-ss", .cra_priority = 300, .cra_alignmask = 3, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun4i_req_ctx), .cra_module = THIS_MODULE, .cra_init = sun4i_hash_crainit, .cra_exit = sun4i_hash_craexit, } } } }, { .type = CRYPTO_ALG_TYPE_AHASH, .mode = SS_OP_SHA1, .alg.hash = { .init = sun4i_hash_init, .update = sun4i_hash_update, .final = sun4i_hash_final, .finup = sun4i_hash_finup, .digest = sun4i_hash_digest, .export = sun4i_hash_export_sha1, .import = sun4i_hash_import_sha1, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name = "sha1-sun4i-ss", .cra_priority = 300, .cra_alignmask = 3, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun4i_req_ctx), .cra_module = THIS_MODULE, .cra_init = sun4i_hash_crainit, .cra_exit = sun4i_hash_craexit, } } } }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.crypto = { .setkey = sun4i_ss_aes_setkey, .encrypt = sun4i_ss_cbc_aes_encrypt, .decrypt = sun4i_ss_cbc_aes_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .base = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-sun4i-ss", .cra_priority = 300, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 3, .cra_init = sun4i_ss_cipher_init, .cra_exit = sun4i_ss_cipher_exit, } } }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.crypto = { .setkey = sun4i_ss_aes_setkey, .encrypt = sun4i_ss_ecb_aes_encrypt, .decrypt = sun4i_ss_ecb_aes_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .base = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-sun4i-ss", .cra_priority = 300, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun4i_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 3, .cra_init = sun4i_ss_cipher_init, .cra_exit = sun4i_ss_cipher_exit, } } }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.crypto = { .setkey = sun4i_ss_des_setkey, .encrypt = sun4i_ss_cbc_des_encrypt, .decrypt = sun4i_ss_cbc_des_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, .base = { .cra_name = "cbc(des)", .cra_driver_name = "cbc-des-sun4i-ss", .cra_priority = 300, .cra_blocksize = DES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun4i_req_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 3, .cra_init = sun4i_ss_cipher_init, .cra_exit = sun4i_ss_cipher_exit, } } }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.crypto = { .setkey = sun4i_ss_des_setkey, .encrypt = sun4i_ss_ecb_des_encrypt, .decrypt = sun4i_ss_ecb_des_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .base = { .cra_name = "ecb(des)", .cra_driver_name = "ecb-des-sun4i-ss", .cra_priority = 300, .cra_blocksize = DES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun4i_req_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 3, .cra_init = sun4i_ss_cipher_init, .cra_exit = sun4i_ss_cipher_exit, } } }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.crypto = { .setkey = sun4i_ss_des3_setkey, .encrypt = sun4i_ss_cbc_des3_encrypt, .decrypt = sun4i_ss_cbc_des3_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-des3-sun4i-ss", .cra_priority = 300, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun4i_req_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 3, .cra_init = sun4i_ss_cipher_init, .cra_exit = sun4i_ss_cipher_exit, } } }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.crypto = { .setkey = sun4i_ss_des3_setkey, .encrypt = sun4i_ss_ecb_des3_encrypt, .decrypt = sun4i_ss_ecb_des3_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "ecb-des3-sun4i-ss", .cra_priority = 300, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun4i_req_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 3, .cra_init = sun4i_ss_cipher_init, .cra_exit = sun4i_ss_cipher_exit, } } }, #ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG { .type = CRYPTO_ALG_TYPE_RNG, .alg.rng = { .base = { .cra_name = "stdrng", .cra_driver_name = "sun4i_ss_rng", .cra_priority = 300, .cra_ctxsize = 0, .cra_module = THIS_MODULE, }, .generate = sun4i_ss_prng_generate, .seed = sun4i_ss_prng_seed, .seedsize = SS_SEED_LEN / BITS_PER_BYTE, } }, #endif }; static int sun4i_ss_debugfs_show(struct seq_file *seq, void *v) { unsigned int i; for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { if (!ss_algs[i].ss) continue; switch (ss_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: seq_printf(seq, "%s %s reqs=%lu opti=%lu fallback=%lu tsize=%lu\n", ss_algs[i].alg.crypto.base.cra_driver_name, ss_algs[i].alg.crypto.base.cra_name, ss_algs[i].stat_req, ss_algs[i].stat_opti, ss_algs[i].stat_fb, ss_algs[i].stat_bytes); break; case CRYPTO_ALG_TYPE_RNG: seq_printf(seq, "%s %s reqs=%lu tsize=%lu\n", ss_algs[i].alg.rng.base.cra_driver_name, ss_algs[i].alg.rng.base.cra_name, ss_algs[i].stat_req, ss_algs[i].stat_bytes); break; case CRYPTO_ALG_TYPE_AHASH: seq_printf(seq, "%s %s reqs=%lu\n", ss_algs[i].alg.hash.halg.base.cra_driver_name, ss_algs[i].alg.hash.halg.base.cra_name, ss_algs[i].stat_req); break; } } return 0; } DEFINE_SHOW_ATTRIBUTE(sun4i_ss_debugfs); /* * Power management strategy: The device is suspended unless a TFM exists for * one of the algorithms proposed by this driver. */ static int sun4i_ss_pm_suspend(struct device *dev) { struct sun4i_ss_ctx *ss = dev_get_drvdata(dev); reset_control_assert(ss->reset); clk_disable_unprepare(ss->ssclk); clk_disable_unprepare(ss->busclk); return 0; } static int sun4i_ss_pm_resume(struct device *dev) { struct sun4i_ss_ctx *ss = dev_get_drvdata(dev); int err; err = clk_prepare_enable(ss->busclk); if (err) { dev_err(ss->dev, "Cannot prepare_enable busclk\n"); goto err_enable; } err = clk_prepare_enable(ss->ssclk); if (err) { dev_err(ss->dev, "Cannot prepare_enable ssclk\n"); goto err_enable; } err = reset_control_deassert(ss->reset); if (err) { dev_err(ss->dev, "Cannot deassert reset control\n"); goto err_enable; } return err; err_enable: sun4i_ss_pm_suspend(dev); return err; } static const struct dev_pm_ops sun4i_ss_pm_ops = { SET_RUNTIME_PM_OPS(sun4i_ss_pm_suspend, sun4i_ss_pm_resume, NULL) }; /* * When power management is enabled, this function enables the PM and set the * device as suspended * When power management is disabled, this function just enables the device */ static int sun4i_ss_pm_init(struct sun4i_ss_ctx *ss) { int err; pm_runtime_use_autosuspend(ss->dev); pm_runtime_set_autosuspend_delay(ss->dev, 2000); err = pm_runtime_set_suspended(ss->dev); if (err) return err; pm_runtime_enable(ss->dev); return err; } static void sun4i_ss_pm_exit(struct sun4i_ss_ctx *ss) { pm_runtime_disable(ss->dev); } static int sun4i_ss_probe(struct platform_device *pdev) { u32 v; int err, i; unsigned long cr; const unsigned long cr_ahb = 24 * 1000 * 1000; const unsigned long cr_mod = 150 * 1000 * 1000; struct sun4i_ss_ctx *ss; if (!pdev->dev.of_node) return -ENODEV; ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL); if (!ss) return -ENOMEM; ss->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ss->base)) { dev_err(&pdev->dev, "Cannot request MMIO\n"); return PTR_ERR(ss->base); } ss->variant = of_device_get_match_data(&pdev->dev); if (!ss->variant) { dev_err(&pdev->dev, "Missing Security System variant\n"); return -EINVAL; } ss->ssclk = devm_clk_get(&pdev->dev, "mod"); if (IS_ERR(ss->ssclk)) { err = PTR_ERR(ss->ssclk); dev_err(&pdev->dev, "Cannot get SS clock err=%d\n", err); return err; } dev_dbg(&pdev->dev, "clock ss acquired\n"); ss->busclk = devm_clk_get(&pdev->dev, "ahb"); if (IS_ERR(ss->busclk)) { err = PTR_ERR(ss->busclk); dev_err(&pdev->dev, "Cannot get AHB SS clock err=%d\n", err); return err; } dev_dbg(&pdev->dev, "clock ahb_ss acquired\n"); ss->reset = devm_reset_control_get_optional(&pdev->dev, "ahb"); if (IS_ERR(ss->reset)) return PTR_ERR(ss->reset); if (!ss->reset) dev_info(&pdev->dev, "no reset control found\n"); /* * Check that clock have the correct rates given in the datasheet * Try to set the clock to the maximum allowed */ err = clk_set_rate(ss->ssclk, cr_mod); if (err) { dev_err(&pdev->dev, "Cannot set clock rate to ssclk\n"); return err; } /* * The only impact on clocks below requirement are bad performance, * so do not print "errors" * warn on Overclocked clocks */ cr = clk_get_rate(ss->busclk); if (cr >= cr_ahb) dev_dbg(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n", cr, cr / 1000000, cr_ahb); else dev_warn(&pdev->dev, "Clock bus %lu (%lu MHz) (must be >= %lu)\n", cr, cr / 1000000, cr_ahb); cr = clk_get_rate(ss->ssclk); if (cr <= cr_mod) if (cr < cr_mod) dev_warn(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n", cr, cr / 1000000, cr_mod); else dev_dbg(&pdev->dev, "Clock ss %lu (%lu MHz) (must be <= %lu)\n", cr, cr / 1000000, cr_mod); else dev_warn(&pdev->dev, "Clock ss is at %lu (%lu MHz) (must be <= %lu)\n", cr, cr / 1000000, cr_mod); ss->dev = &pdev->dev; platform_set_drvdata(pdev, ss); spin_lock_init(&ss->slock); err = sun4i_ss_pm_init(ss); if (err) return err; /* * Datasheet named it "Die Bonding ID" * I expect to be a sort of Security System Revision number. * Since the A80 seems to have an other version of SS * this info could be useful */ err = pm_runtime_resume_and_get(ss->dev); if (err < 0) goto error_pm; writel(SS_ENABLED, ss->base + SS_CTL); v = readl(ss->base + SS_CTL); v >>= 16; v &= 0x07; dev_info(&pdev->dev, "Die ID %d\n", v); writel(0, ss->base + SS_CTL); pm_runtime_put_sync(ss->dev); for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { ss_algs[i].ss = ss; switch (ss_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: err = crypto_register_skcipher(&ss_algs[i].alg.crypto); if (err) { dev_err(ss->dev, "Fail to register %s\n", ss_algs[i].alg.crypto.base.cra_name); goto error_alg; } break; case CRYPTO_ALG_TYPE_AHASH: err = crypto_register_ahash(&ss_algs[i].alg.hash); if (err) { dev_err(ss->dev, "Fail to register %s\n", ss_algs[i].alg.hash.halg.base.cra_name); goto error_alg; } break; case CRYPTO_ALG_TYPE_RNG: err = crypto_register_rng(&ss_algs[i].alg.rng); if (err) { dev_err(ss->dev, "Fail to register %s\n", ss_algs[i].alg.rng.base.cra_name); } break; } } /* Ignore error of debugfs */ ss->dbgfs_dir = debugfs_create_dir("sun4i-ss", NULL); ss->dbgfs_stats = debugfs_create_file("stats", 0444, ss->dbgfs_dir, ss, &sun4i_ss_debugfs_fops); return 0; error_alg: i--; for (; i >= 0; i--) { switch (ss_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: crypto_unregister_skcipher(&ss_algs[i].alg.crypto); break; case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&ss_algs[i].alg.hash); break; case CRYPTO_ALG_TYPE_RNG: crypto_unregister_rng(&ss_algs[i].alg.rng); break; } } error_pm: sun4i_ss_pm_exit(ss); return err; } static int sun4i_ss_remove(struct platform_device *pdev) { int i; struct sun4i_ss_ctx *ss = platform_get_drvdata(pdev); for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { switch (ss_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: crypto_unregister_skcipher(&ss_algs[i].alg.crypto); break; case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&ss_algs[i].alg.hash); break; case CRYPTO_ALG_TYPE_RNG: crypto_unregister_rng(&ss_algs[i].alg.rng); break; } } sun4i_ss_pm_exit(ss); return 0; } static const struct of_device_id a20ss_crypto_of_match_table[] = { { .compatible = "allwinner,sun4i-a10-crypto", .data = &ss_a10_variant }, { .compatible = "allwinner,sun8i-a33-crypto", .data = &ss_a33_variant }, {} }; MODULE_DEVICE_TABLE(of, a20ss_crypto_of_match_table); static struct platform_driver sun4i_ss_driver = { .probe = sun4i_ss_probe, .remove = sun4i_ss_remove, .driver = { .name = "sun4i-ss", .pm = &sun4i_ss_pm_ops, .of_match_table = a20ss_crypto_of_match_table, }, }; module_platform_driver(sun4i_ss_driver); MODULE_ALIAS("platform:sun4i-ss"); MODULE_DESCRIPTION("Allwinner Security System cryptographic accelerator"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corentin LABBE <[email protected]>");
linux-master
drivers/crypto/allwinner/sun4i-ss/sun4i-ss-core.c
// SPDX-License-Identifier: GPL-2.0-or-later #include "sun4i-ss.h" int sun4i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { struct sun4i_ss_alg_template *algt; struct rng_alg *alg = crypto_rng_alg(tfm); algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); memcpy(algt->ss->seed, seed, slen); return 0; } int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { struct sun4i_ss_alg_template *algt; struct rng_alg *alg = crypto_rng_alg(tfm); int i, err; u32 v; u32 *data = (u32 *)dst; const u32 mode = SS_OP_PRNG | SS_PRNG_CONTINUE | SS_ENABLED; size_t len; struct sun4i_ss_ctx *ss; unsigned int todo = (dlen / 4) * 4; algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng); ss = algt->ss; err = pm_runtime_resume_and_get(ss->dev); if (err < 0) return err; if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN4I_SS_DEBUG)) { algt->stat_req++; algt->stat_bytes += todo; } spin_lock_bh(&ss->slock); writel(mode, ss->base + SS_CTL); while (todo > 0) { /* write the seed */ for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) writel(ss->seed[i], ss->base + SS_KEY0 + i * 4); /* Read the random data */ len = min_t(size_t, SS_DATA_LEN / BITS_PER_BYTE, todo); readsl(ss->base + SS_TXFIFO, data, len / 4); data += len / 4; todo -= len; /* Update the seed */ for (i = 0; i < SS_SEED_LEN / BITS_PER_LONG; i++) { v = readl(ss->base + SS_KEY0 + i * 4); ss->seed[i] = v; } } writel(0, ss->base + SS_CTL); spin_unlock_bh(&ss->slock); pm_runtime_put(ss->dev); return 0; }
linux-master
drivers/crypto/allwinner/sun4i-ss/sun4i-ss-prng.c
// SPDX-License-Identifier: GPL-2.0 /* * sun8i-ce-hash.c - hardware cryptographic offloader for * Allwinner H3/A64/H5/H2+/H6/R40 SoC * * Copyright (C) 2015-2020 Corentin Labbe <[email protected]> * * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512. * * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ #include <crypto/internal/hash.h> #include <crypto/md5.h> #include <crypto/sha1.h> #include <crypto/sha2.h> #include <linux/bottom_half.h> #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/pm_runtime.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/string.h> #include "sun8i-ce.h" int sun8i_ce_hash_init_tfm(struct crypto_ahash *tfm) { struct sun8i_ce_hash_tfm_ctx *op = crypto_ahash_ctx(tfm); struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ce_alg_template *algt; int err; algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); op->ce = algt->ce; /* FALLBACK */ op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(algt->ce->dev, "Fallback driver could no be loaded\n"); return PTR_ERR(op->fallback_tfm); } crypto_ahash_set_statesize(tfm, crypto_ahash_statesize(op->fallback_tfm)); crypto_ahash_set_reqsize(tfm, sizeof(struct sun8i_ce_hash_reqctx) + crypto_ahash_reqsize(op->fallback_tfm)); memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm), CRYPTO_MAX_ALG_NAME); err = pm_runtime_get_sync(op->ce->dev); if (err < 0) goto error_pm; return 0; error_pm: pm_runtime_put_noidle(op->ce->dev); crypto_free_ahash(op->fallback_tfm); return err; } void sun8i_ce_hash_exit_tfm(struct crypto_ahash *tfm) { struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); crypto_free_ahash(tfmctx->fallback_tfm); pm_runtime_put_sync_suspend(tfmctx->ce->dev); } int sun8i_ce_hash_init(struct ahash_request *areq) { struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); memset(rctx, 0, sizeof(struct sun8i_ce_hash_reqctx)); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_ahash_init(&rctx->fallback_req); } int sun8i_ce_hash_export(struct ahash_request *areq, void *out) { struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_ahash_export(&rctx->fallback_req, out); } int sun8i_ce_hash_import(struct ahash_request *areq, const void *in) { struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_ahash_import(&rctx->fallback_req, in); } int sun8i_ce_hash_final(struct ahash_request *areq) { struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.result = areq->result; if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { struct sun8i_ce_alg_template *algt __maybe_unused; struct ahash_alg *alg = crypto_ahash_alg(tfm); algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG algt->stat_fb++; #endif } return crypto_ahash_final(&rctx->fallback_req); } int sun8i_ce_hash_update(struct ahash_request *areq) { struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.nbytes = areq->nbytes; rctx->fallback_req.src = areq->src; return crypto_ahash_update(&rctx->fallback_req); } int sun8i_ce_hash_finup(struct ahash_request *areq) { struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.nbytes = areq->nbytes; rctx->fallback_req.src = areq->src; rctx->fallback_req.result = areq->result; if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { struct sun8i_ce_alg_template *algt __maybe_unused; struct ahash_alg *alg = crypto_ahash_alg(tfm); algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG algt->stat_fb++; #endif } return crypto_ahash_finup(&rctx->fallback_req); } static int sun8i_ce_hash_digest_fb(struct ahash_request *areq) { struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ce_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.nbytes = areq->nbytes; rctx->fallback_req.src = areq->src; rctx->fallback_req.result = areq->result; if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { struct sun8i_ce_alg_template *algt __maybe_unused; struct ahash_alg *alg = crypto_ahash_alg(tfm); algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG algt->stat_fb++; #endif } return crypto_ahash_digest(&rctx->fallback_req); } static bool sun8i_ce_hash_need_fallback(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); struct sun8i_ce_alg_template *algt; struct scatterlist *sg; algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); if (areq->nbytes == 0) { algt->stat_fb_len0++; return true; } /* we need to reserve one SG for padding one */ if (sg_nents_for_len(areq->src, areq->nbytes) > MAX_SG - 1) { algt->stat_fb_maxsg++; return true; } sg = areq->src; while (sg) { if (sg->length % 4) { algt->stat_fb_srclen++; return true; } if (!IS_ALIGNED(sg->offset, sizeof(u32))) { algt->stat_fb_srcali++; return true; } sg = sg_next(sg); } return false; } int sun8i_ce_hash_digest(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); struct sun8i_ce_alg_template *algt; struct sun8i_ce_dev *ce; struct crypto_engine *engine; struct scatterlist *sg; int nr_sgs, e, i; if (sun8i_ce_hash_need_fallback(areq)) return sun8i_ce_hash_digest_fb(areq); nr_sgs = sg_nents_for_len(areq->src, areq->nbytes); if (nr_sgs > MAX_SG - 1) return sun8i_ce_hash_digest_fb(areq); for_each_sg(areq->src, sg, nr_sgs, i) { if (sg->length % 4 || !IS_ALIGNED(sg->offset, sizeof(u32))) return sun8i_ce_hash_digest_fb(areq); } algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); ce = algt->ce; e = sun8i_ce_get_engine_number(ce); rctx->flow = e; engine = ce->chanlist[e].engine; return crypto_transfer_hash_request_to_engine(engine, areq); } static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs) { u64 fill, min_fill, j, k; __be64 *bebits; __le64 *lebits; j = padi; buf[j++] = cpu_to_le32(0x80); if (bs == 64) { fill = 64 - (byte_count % 64); min_fill = 2 * sizeof(u32) + sizeof(u32); } else { fill = 128 - (byte_count % 128); min_fill = 4 * sizeof(u32) + sizeof(u32); } if (fill < min_fill) fill += bs; k = j; j += (fill - min_fill) / sizeof(u32); if (j * 4 > bufsize) { pr_err("%s OVERFLOW %llu\n", __func__, j); return 0; } for (; k < j; k++) buf[k] = 0; if (le) { /* MD5 */ lebits = (__le64 *)&buf[j]; *lebits = cpu_to_le64(byte_count << 3); j += 2; } else { if (bs == 64) { /* sha1 sha224 sha256 */ bebits = (__be64 *)&buf[j]; *bebits = cpu_to_be64(byte_count << 3); j += 2; } else { /* sha384 sha512*/ bebits = (__be64 *)&buf[j]; *bebits = cpu_to_be64(byte_count >> 61); j += 2; bebits = (__be64 *)&buf[j]; *bebits = cpu_to_be64(byte_count << 3); j += 2; } } if (j * 4 > bufsize) { pr_err("%s OVERFLOW %llu\n", __func__, j); return 0; } return j; } int sun8i_ce_hash_run(struct crypto_engine *engine, void *breq) { struct ahash_request *areq = container_of(breq, struct ahash_request, base); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct ahash_alg *alg = __crypto_ahash_alg(tfm->base.__crt_alg); struct sun8i_ce_hash_reqctx *rctx = ahash_request_ctx(areq); struct sun8i_ce_alg_template *algt; struct sun8i_ce_dev *ce; struct sun8i_ce_flow *chan; struct ce_task *cet; struct scatterlist *sg; int nr_sgs, flow, err; unsigned int len; u32 common; u64 byte_count; __le32 *bf; void *buf = NULL; int j, i, todo; void *result = NULL; u64 bs; int digestsize; dma_addr_t addr_res, addr_pad; int ns = sg_nents_for_len(areq->src, areq->nbytes); algt = container_of(alg, struct sun8i_ce_alg_template, alg.hash.base); ce = algt->ce; bs = algt->alg.hash.base.halg.base.cra_blocksize; digestsize = algt->alg.hash.base.halg.digestsize; if (digestsize == SHA224_DIGEST_SIZE) digestsize = SHA256_DIGEST_SIZE; if (digestsize == SHA384_DIGEST_SIZE) digestsize = SHA512_DIGEST_SIZE; /* the padding could be up to two block. */ buf = kzalloc(bs * 2, GFP_KERNEL | GFP_DMA); if (!buf) { err = -ENOMEM; goto theend; } bf = (__le32 *)buf; result = kzalloc(digestsize, GFP_KERNEL | GFP_DMA); if (!result) { err = -ENOMEM; goto theend; } flow = rctx->flow; chan = &ce->chanlist[flow]; #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG algt->stat_req++; #endif dev_dbg(ce->dev, "%s %s len=%d\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->nbytes); cet = chan->tl; memset(cet, 0, sizeof(struct ce_task)); cet->t_id = cpu_to_le32(flow); common = ce->variant->alg_hash[algt->ce_algo_id]; common |= CE_COMM_INT; cet->t_common_ctl = cpu_to_le32(common); cet->t_sym_ctl = 0; cet->t_asym_ctl = 0; nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend; } len = areq->nbytes; for_each_sg(areq->src, sg, nr_sgs, i) { cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg)); todo = min(len, sg_dma_len(sg)); cet->t_src[i].len = cpu_to_le32(todo / 4); len -= todo; } if (len > 0) { dev_err(ce->dev, "remaining len %d\n", len); err = -EINVAL; goto theend; } addr_res = dma_map_single(ce->dev, result, digestsize, DMA_FROM_DEVICE); cet->t_dst[0].addr = cpu_to_le32(addr_res); cet->t_dst[0].len = cpu_to_le32(digestsize / 4); if (dma_mapping_error(ce->dev, addr_res)) { dev_err(ce->dev, "DMA map dest\n"); err = -EINVAL; goto theend; } byte_count = areq->nbytes; j = 0; switch (algt->ce_algo_id) { case CE_ID_HASH_MD5: j = hash_pad(bf, 2 * bs, j, byte_count, true, bs); break; case CE_ID_HASH_SHA1: case CE_ID_HASH_SHA224: case CE_ID_HASH_SHA256: j = hash_pad(bf, 2 * bs, j, byte_count, false, bs); break; case CE_ID_HASH_SHA384: case CE_ID_HASH_SHA512: j = hash_pad(bf, 2 * bs, j, byte_count, false, bs); break; } if (!j) { err = -EINVAL; goto theend; } addr_pad = dma_map_single(ce->dev, buf, j * 4, DMA_TO_DEVICE); cet->t_src[i].addr = cpu_to_le32(addr_pad); cet->t_src[i].len = cpu_to_le32(j); if (dma_mapping_error(ce->dev, addr_pad)) { dev_err(ce->dev, "DMA error on padding SG\n"); err = -EINVAL; goto theend; } if (ce->variant->hash_t_dlen_in_bits) cet->t_dlen = cpu_to_le32((areq->nbytes + j * 4) * 8); else cet->t_dlen = cpu_to_le32(areq->nbytes / 4 + j); chan->timeout = areq->nbytes; err = sun8i_ce_run_task(ce, flow, crypto_ahash_alg_name(tfm)); dma_unmap_single(ce->dev, addr_pad, j * 4, DMA_TO_DEVICE); dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); dma_unmap_single(ce->dev, addr_res, digestsize, DMA_FROM_DEVICE); memcpy(areq->result, result, algt->alg.hash.base.halg.digestsize); theend: kfree(buf); kfree(result); local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); local_bh_enable(); return 0; }
linux-master
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c
// SPDX-License-Identifier: GPL-2.0 /* * sun8i-ce-cipher.c - hardware cryptographic offloader for * Allwinner H3/A64/H5/H2+/H6/R40 SoC * * Copyright (C) 2016-2019 Corentin LABBE <[email protected]> * * This file add support for AES cipher with 128,192,256 bits keysize in * CBC and ECB mode. * * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include <linux/bottom_half.h> #include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <crypto/scatterwalk.h> #include <crypto/internal/des.h> #include <crypto/internal/skcipher.h> #include "sun8i-ce.h" static int sun8i_ce_cipher_need_fallback(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct scatterlist *sg; struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ce_alg_template *algt; unsigned int todo, len; algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base); if (sg_nents_for_len(areq->src, areq->cryptlen) > MAX_SG || sg_nents_for_len(areq->dst, areq->cryptlen) > MAX_SG) { algt->stat_fb_maxsg++; return true; } if (areq->cryptlen < crypto_skcipher_ivsize(tfm)) { algt->stat_fb_leniv++; return true; } if (areq->cryptlen == 0) { algt->stat_fb_len0++; return true; } if (areq->cryptlen % 16) { algt->stat_fb_mod16++; return true; } len = areq->cryptlen; sg = areq->src; while (sg) { if (!IS_ALIGNED(sg->offset, sizeof(u32))) { algt->stat_fb_srcali++; return true; } todo = min(len, sg->length); if (todo % 4) { algt->stat_fb_srclen++; return true; } len -= todo; sg = sg_next(sg); } len = areq->cryptlen; sg = areq->dst; while (sg) { if (!IS_ALIGNED(sg->offset, sizeof(u32))) { algt->stat_fb_dstali++; return true; } todo = min(len, sg->length); if (todo % 4) { algt->stat_fb_dstlen++; return true; } len -= todo; sg = sg_next(sg); } return false; } static int sun8i_ce_cipher_fallback(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); int err; if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ce_alg_template *algt __maybe_unused; algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG algt->stat_fb++; #endif } skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, areq->base.complete, areq->base.data); skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (rctx->op_dir & CE_DECRYPTION) err = crypto_skcipher_decrypt(&rctx->fallback_req); else err = crypto_skcipher_encrypt(&rctx->fallback_req); return err; } static int sun8i_ce_cipher_prepare(struct crypto_engine *engine, void *async_req) { struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_ce_dev *ce = op->ce; struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ce_alg_template *algt; struct sun8i_ce_flow *chan; struct ce_task *cet; struct scatterlist *sg; unsigned int todo, len, offset, ivsize; u32 common, sym; int flow, i; int nr_sgs = 0; int nr_sgd = 0; int err = 0; int ns = sg_nents_for_len(areq->src, areq->cryptlen); int nd = sg_nents_for_len(areq->dst, areq->cryptlen); algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base); dev_dbg(ce->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->cryptlen, rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm), op->keylen); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG algt->stat_req++; #endif flow = rctx->flow; chan = &ce->chanlist[flow]; cet = chan->tl; memset(cet, 0, sizeof(struct ce_task)); cet->t_id = cpu_to_le32(flow); common = ce->variant->alg_cipher[algt->ce_algo_id]; common |= rctx->op_dir | CE_COMM_INT; cet->t_common_ctl = cpu_to_le32(common); /* CTS and recent CE (H6) need length in bytes, in word otherwise */ if (ce->variant->cipher_t_dlen_in_bytes) cet->t_dlen = cpu_to_le32(areq->cryptlen); else cet->t_dlen = cpu_to_le32(areq->cryptlen / 4); sym = ce->variant->op_mode[algt->ce_blockmode]; len = op->keylen; switch (len) { case 128 / 8: sym |= CE_AES_128BITS; break; case 192 / 8: sym |= CE_AES_192BITS; break; case 256 / 8: sym |= CE_AES_256BITS; break; } cet->t_sym_ctl = cpu_to_le32(sym); cet->t_asym_ctl = 0; rctx->addr_key = dma_map_single(ce->dev, op->key, op->keylen, DMA_TO_DEVICE); if (dma_mapping_error(ce->dev, rctx->addr_key)) { dev_err(ce->dev, "Cannot DMA MAP KEY\n"); err = -EFAULT; goto theend; } cet->t_key = cpu_to_le32(rctx->addr_key); ivsize = crypto_skcipher_ivsize(tfm); if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { rctx->ivlen = ivsize; if (rctx->op_dir & CE_DECRYPTION) { offset = areq->cryptlen - ivsize; scatterwalk_map_and_copy(chan->backup_iv, areq->src, offset, ivsize, 0); } memcpy(chan->bounce_iv, areq->iv, ivsize); rctx->addr_iv = dma_map_single(ce->dev, chan->bounce_iv, rctx->ivlen, DMA_TO_DEVICE); if (dma_mapping_error(ce->dev, rctx->addr_iv)) { dev_err(ce->dev, "Cannot DMA MAP IV\n"); err = -ENOMEM; goto theend_iv; } cet->t_iv = cpu_to_le32(rctx->addr_iv); } if (areq->src == areq->dst) { nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL); if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; } nr_sgd = nr_sgs; } else { nr_sgs = dma_map_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; } nr_sgd = dma_map_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); if (nr_sgd <= 0 || nr_sgd > MAX_SG) { dev_err(ce->dev, "Invalid sg number %d\n", nr_sgd); err = -EINVAL; goto theend_sgs; } } len = areq->cryptlen; for_each_sg(areq->src, sg, nr_sgs, i) { cet->t_src[i].addr = cpu_to_le32(sg_dma_address(sg)); todo = min(len, sg_dma_len(sg)); cet->t_src[i].len = cpu_to_le32(todo / 4); dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__, areq->cryptlen, i, cet->t_src[i].len, sg->offset, todo); len -= todo; } if (len > 0) { dev_err(ce->dev, "remaining len %d\n", len); err = -EINVAL; goto theend_sgs; } len = areq->cryptlen; for_each_sg(areq->dst, sg, nr_sgd, i) { cet->t_dst[i].addr = cpu_to_le32(sg_dma_address(sg)); todo = min(len, sg_dma_len(sg)); cet->t_dst[i].len = cpu_to_le32(todo / 4); dev_dbg(ce->dev, "%s total=%u SG(%d %u off=%d) todo=%u\n", __func__, areq->cryptlen, i, cet->t_dst[i].len, sg->offset, todo); len -= todo; } if (len > 0) { dev_err(ce->dev, "remaining len %d\n", len); err = -EINVAL; goto theend_sgs; } chan->timeout = areq->cryptlen; rctx->nr_sgs = nr_sgs; rctx->nr_sgd = nr_sgd; return 0; theend_sgs: if (areq->src == areq->dst) { dma_unmap_sg(ce->dev, areq->src, ns, DMA_BIDIRECTIONAL); } else { if (nr_sgs > 0) dma_unmap_sg(ce->dev, areq->src, ns, DMA_TO_DEVICE); dma_unmap_sg(ce->dev, areq->dst, nd, DMA_FROM_DEVICE); } theend_iv: if (areq->iv && ivsize > 0) { if (rctx->addr_iv) dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); offset = areq->cryptlen - ivsize; if (rctx->op_dir & CE_DECRYPTION) { memcpy(areq->iv, chan->backup_iv, ivsize); memzero_explicit(chan->backup_iv, ivsize); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, offset, ivsize, 0); } memzero_explicit(chan->bounce_iv, ivsize); } dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); theend: return err; } static void sun8i_ce_cipher_run(struct crypto_engine *engine, void *areq) { struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(breq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_ce_dev *ce = op->ce; struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(breq); int flow, err; flow = rctx->flow; err = sun8i_ce_run_task(ce, flow, crypto_tfm_alg_name(breq->base.tfm)); local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); local_bh_enable(); } static void sun8i_ce_cipher_unprepare(struct crypto_engine *engine, void *async_req) { struct skcipher_request *areq = container_of(async_req, struct skcipher_request, base); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_ce_dev *ce = op->ce; struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct sun8i_ce_flow *chan; struct ce_task *cet; unsigned int ivsize, offset; int nr_sgs = rctx->nr_sgs; int nr_sgd = rctx->nr_sgd; int flow; flow = rctx->flow; chan = &ce->chanlist[flow]; cet = chan->tl; ivsize = crypto_skcipher_ivsize(tfm); if (areq->src == areq->dst) { dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_BIDIRECTIONAL); } else { if (nr_sgs > 0) dma_unmap_sg(ce->dev, areq->src, nr_sgs, DMA_TO_DEVICE); dma_unmap_sg(ce->dev, areq->dst, nr_sgd, DMA_FROM_DEVICE); } if (areq->iv && ivsize > 0) { if (cet->t_iv) dma_unmap_single(ce->dev, rctx->addr_iv, rctx->ivlen, DMA_TO_DEVICE); offset = areq->cryptlen - ivsize; if (rctx->op_dir & CE_DECRYPTION) { memcpy(areq->iv, chan->backup_iv, ivsize); memzero_explicit(chan->backup_iv, ivsize); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, offset, ivsize, 0); } memzero_explicit(chan->bounce_iv, ivsize); } dma_unmap_single(ce->dev, rctx->addr_key, op->keylen, DMA_TO_DEVICE); } int sun8i_ce_cipher_do_one(struct crypto_engine *engine, void *areq) { int err = sun8i_ce_cipher_prepare(engine, areq); if (err) return err; sun8i_ce_cipher_run(engine, areq); sun8i_ce_cipher_unprepare(engine, areq); return 0; } int sun8i_ce_skdecrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct crypto_engine *engine; int e; rctx->op_dir = CE_DECRYPTION; if (sun8i_ce_cipher_need_fallback(areq)) return sun8i_ce_cipher_fallback(areq); e = sun8i_ce_get_engine_number(op->ce); rctx->flow = e; engine = op->ce->chanlist[e].engine; return crypto_transfer_skcipher_request_to_engine(engine, areq); } int sun8i_ce_skencrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct crypto_engine *engine; int e; rctx->op_dir = CE_ENCRYPTION; if (sun8i_ce_cipher_need_fallback(areq)) return sun8i_ce_cipher_fallback(areq); e = sun8i_ce_get_engine_number(op->ce); rctx->flow = e; engine = op->ce->chanlist[e].engine; return crypto_transfer_skcipher_request_to_engine(engine, areq); } int sun8i_ce_cipher_init(struct crypto_tfm *tfm) { struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); struct sun8i_ce_alg_template *algt; const char *name = crypto_tfm_alg_name(tfm); struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(sktfm); int err; memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx)); algt = container_of(alg, struct sun8i_ce_alg_template, alg.skcipher.base); op->ce = algt->ce; op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->ce->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + crypto_skcipher_reqsize(op->fallback_tfm); memcpy(algt->fbname, crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), CRYPTO_MAX_ALG_NAME); err = pm_runtime_get_sync(op->ce->dev); if (err < 0) goto error_pm; return 0; error_pm: pm_runtime_put_noidle(op->ce->dev); crypto_free_skcipher(op->fallback_tfm); return err; } void sun8i_ce_cipher_exit(struct crypto_tfm *tfm) { struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); kfree_sensitive(op->key); crypto_free_skcipher(op->fallback_tfm); pm_runtime_put_sync_suspend(op->ce->dev); } int sun8i_ce_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_ce_dev *ce = op->ce; switch (keylen) { case 128 / 8: break; case 192 / 8: break; case 256 / 8: break; default: dev_dbg(ce->dev, "ERROR: Invalid keylen %u\n", keylen); return -EINVAL; } kfree_sensitive(op->key); op->keylen = keylen; op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); if (!op->key) return -ENOMEM; crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } int sun8i_ce_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); int err; err = verify_skcipher_des3_key(tfm, key); if (err) return err; kfree_sensitive(op->key); op->keylen = keylen; op->key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA); if (!op->key) return -ENOMEM; crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); }
linux-master
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-cipher.c
// SPDX-License-Identifier: GPL-2.0 /* * sun8i-ce-trng.c - hardware cryptographic offloader for * Allwinner H3/A64/H5/H2+/H6/R40 SoC * * Copyright (C) 2015-2020 Corentin Labbe <[email protected]> * * This file handle the TRNG * * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include "sun8i-ce.h" #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/hw_random.h> /* * Note that according to the algorithm ID, 2 versions of the TRNG exists, * The first present in H3/H5/R40/A64 and the second present in H6. * This file adds support for both, but only the second is working * reliabily according to rngtest. **/ static int sun8i_ce_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct sun8i_ce_dev *ce; dma_addr_t dma_dst; int err = 0; int flow = 3; unsigned int todo; struct sun8i_ce_flow *chan; struct ce_task *cet; u32 common; void *d; ce = container_of(rng, struct sun8i_ce_dev, trng); /* round the data length to a multiple of 32*/ todo = max + 32; todo -= todo % 32; d = kzalloc(todo, GFP_KERNEL | GFP_DMA); if (!d) return -ENOMEM; #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG ce->hwrng_stat_req++; ce->hwrng_stat_bytes += todo; #endif dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE); if (dma_mapping_error(ce->dev, dma_dst)) { dev_err(ce->dev, "Cannot DMA MAP DST\n"); err = -EFAULT; goto err_dst; } err = pm_runtime_resume_and_get(ce->dev); if (err < 0) goto err_pm; mutex_lock(&ce->rnglock); chan = &ce->chanlist[flow]; cet = &chan->tl[0]; memset(cet, 0, sizeof(struct ce_task)); cet->t_id = cpu_to_le32(flow); common = ce->variant->trng | CE_COMM_INT; cet->t_common_ctl = cpu_to_le32(common); /* recent CE (H6) need length in bytes, in word otherwise */ if (ce->variant->trng_t_dlen_in_bytes) cet->t_dlen = cpu_to_le32(todo); else cet->t_dlen = cpu_to_le32(todo / 4); cet->t_sym_ctl = 0; cet->t_asym_ctl = 0; cet->t_dst[0].addr = cpu_to_le32(dma_dst); cet->t_dst[0].len = cpu_to_le32(todo / 4); ce->chanlist[flow].timeout = todo; err = sun8i_ce_run_task(ce, 3, "TRNG"); mutex_unlock(&ce->rnglock); pm_runtime_put(ce->dev); err_pm: dma_unmap_single(ce->dev, dma_dst, todo, DMA_FROM_DEVICE); if (!err) { memcpy(data, d, max); err = max; } err_dst: kfree_sensitive(d); return err; } int sun8i_ce_hwrng_register(struct sun8i_ce_dev *ce) { int ret; if (ce->variant->trng == CE_ID_NOTSUPP) { dev_info(ce->dev, "TRNG not supported\n"); return 0; } ce->trng.name = "sun8i Crypto Engine TRNG"; ce->trng.read = sun8i_ce_trng_read; ret = hwrng_register(&ce->trng); if (ret) dev_err(ce->dev, "Fail to register the TRNG\n"); return ret; } void sun8i_ce_hwrng_unregister(struct sun8i_ce_dev *ce) { if (ce->variant->trng == CE_ID_NOTSUPP) return; hwrng_unregister(&ce->trng); }
linux-master
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c
// SPDX-License-Identifier: GPL-2.0 /* * sun8i-ce-prng.c - hardware cryptographic offloader for * Allwinner H3/A64/H5/H2+/H6/R40 SoC * * Copyright (C) 2015-2020 Corentin Labbe <[email protected]> * * This file handle the PRNG * * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include "sun8i-ce.h" #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <crypto/internal/rng.h> int sun8i_ce_prng_init(struct crypto_tfm *tfm) { struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm); memset(ctx, 0, sizeof(struct sun8i_ce_rng_tfm_ctx)); return 0; } void sun8i_ce_prng_exit(struct crypto_tfm *tfm) { struct sun8i_ce_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm); kfree_sensitive(ctx->seed); ctx->seed = NULL; ctx->slen = 0; } int sun8i_ce_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); if (ctx->seed && ctx->slen != slen) { kfree_sensitive(ctx->seed); ctx->slen = 0; ctx->seed = NULL; } if (!ctx->seed) ctx->seed = kmalloc(slen, GFP_KERNEL | GFP_DMA); if (!ctx->seed) return -ENOMEM; memcpy(ctx->seed, seed, slen); ctx->slen = slen; return 0; } int sun8i_ce_prng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { struct sun8i_ce_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); struct rng_alg *alg = crypto_rng_alg(tfm); struct sun8i_ce_alg_template *algt; struct sun8i_ce_dev *ce; dma_addr_t dma_iv, dma_dst; int err = 0; int flow = 3; unsigned int todo; struct sun8i_ce_flow *chan; struct ce_task *cet; u32 common, sym; void *d; algt = container_of(alg, struct sun8i_ce_alg_template, alg.rng); ce = algt->ce; if (ctx->slen == 0) { dev_err(ce->dev, "not seeded\n"); return -EINVAL; } /* we want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE */ todo = dlen + ctx->slen + PRNG_DATA_SIZE * 2; todo -= todo % PRNG_DATA_SIZE; d = kzalloc(todo, GFP_KERNEL | GFP_DMA); if (!d) { err = -ENOMEM; goto err_mem; } dev_dbg(ce->dev, "%s PRNG slen=%u dlen=%u todo=%u multi=%u\n", __func__, slen, dlen, todo, todo / PRNG_DATA_SIZE); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG algt->stat_req++; algt->stat_bytes += todo; #endif dma_iv = dma_map_single(ce->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); if (dma_mapping_error(ce->dev, dma_iv)) { dev_err(ce->dev, "Cannot DMA MAP IV\n"); err = -EFAULT; goto err_iv; } dma_dst = dma_map_single(ce->dev, d, todo, DMA_FROM_DEVICE); if (dma_mapping_error(ce->dev, dma_dst)) { dev_err(ce->dev, "Cannot DMA MAP DST\n"); err = -EFAULT; goto err_dst; } err = pm_runtime_resume_and_get(ce->dev); if (err < 0) goto err_pm; mutex_lock(&ce->rnglock); chan = &ce->chanlist[flow]; cet = &chan->tl[0]; memset(cet, 0, sizeof(struct ce_task)); cet->t_id = cpu_to_le32(flow); common = ce->variant->prng | CE_COMM_INT; cet->t_common_ctl = cpu_to_le32(common); /* recent CE (H6) need length in bytes, in word otherwise */ if (ce->variant->prng_t_dlen_in_bytes) cet->t_dlen = cpu_to_le32(todo); else cet->t_dlen = cpu_to_le32(todo / 4); sym = PRNG_LD; cet->t_sym_ctl = cpu_to_le32(sym); cet->t_asym_ctl = 0; cet->t_key = cpu_to_le32(dma_iv); cet->t_iv = cpu_to_le32(dma_iv); cet->t_dst[0].addr = cpu_to_le32(dma_dst); cet->t_dst[0].len = cpu_to_le32(todo / 4); ce->chanlist[flow].timeout = 2000; err = sun8i_ce_run_task(ce, 3, "PRNG"); mutex_unlock(&ce->rnglock); pm_runtime_put(ce->dev); err_pm: dma_unmap_single(ce->dev, dma_dst, todo, DMA_FROM_DEVICE); err_dst: dma_unmap_single(ce->dev, dma_iv, ctx->slen, DMA_TO_DEVICE); if (!err) { memcpy(dst, d, dlen); memcpy(ctx->seed, d + dlen, ctx->slen); } err_iv: kfree_sensitive(d); err_mem: return err; }
linux-master
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c
// SPDX-License-Identifier: GPL-2.0 /* * sun8i-ce-core.c - hardware cryptographic offloader for * Allwinner H3/A64/H5/H2+/H6/R40 SoC * * Copyright (C) 2015-2019 Corentin Labbe <[email protected]> * * Core file which registers crypto algorithms supported by the CryptoEngine. * * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include <crypto/engine.h> #include <crypto/internal/hash.h> #include <crypto/internal/rng.h> #include <crypto/internal/skcipher.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include "sun8i-ce.h" /* * mod clock is lower on H3 than other SoC due to some DMA timeout occurring * with high value. * If you want to tune mod clock, loading driver and passing selftest is * insufficient, you need to test with some LUKS test (mount and write to it) */ static const struct ce_variant ce_h3_variant = { .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, }, .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, CE_ALG_SHA384, CE_ALG_SHA512 }, .op_mode = { CE_OP_ECB, CE_OP_CBC }, .ce_clks = { { "bus", 0, 200000000 }, { "mod", 50000000, 0 }, }, .esr = ESR_H3, .prng = CE_ALG_PRNG, .trng = CE_ID_NOTSUPP, }; static const struct ce_variant ce_h5_variant = { .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, }, .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, CE_ID_NOTSUPP, CE_ID_NOTSUPP }, .op_mode = { CE_OP_ECB, CE_OP_CBC }, .ce_clks = { { "bus", 0, 200000000 }, { "mod", 300000000, 0 }, }, .esr = ESR_H5, .prng = CE_ALG_PRNG, .trng = CE_ID_NOTSUPP, }; static const struct ce_variant ce_h6_variant = { .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, }, .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, CE_ALG_SHA384, CE_ALG_SHA512 }, .op_mode = { CE_OP_ECB, CE_OP_CBC }, .cipher_t_dlen_in_bytes = true, .hash_t_dlen_in_bits = true, .prng_t_dlen_in_bytes = true, .trng_t_dlen_in_bytes = true, .ce_clks = { { "bus", 0, 200000000 }, { "mod", 300000000, 0 }, { "ram", 0, 400000000 }, }, .esr = ESR_H6, .prng = CE_ALG_PRNG_V2, .trng = CE_ALG_TRNG_V2, }; static const struct ce_variant ce_a64_variant = { .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, }, .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, CE_ID_NOTSUPP, CE_ID_NOTSUPP }, .op_mode = { CE_OP_ECB, CE_OP_CBC }, .ce_clks = { { "bus", 0, 200000000 }, { "mod", 300000000, 0 }, }, .esr = ESR_A64, .prng = CE_ALG_PRNG, .trng = CE_ID_NOTSUPP, }; static const struct ce_variant ce_d1_variant = { .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, }, .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, CE_ALG_SHA384, CE_ALG_SHA512 }, .op_mode = { CE_OP_ECB, CE_OP_CBC }, .ce_clks = { { "bus", 0, 200000000 }, { "mod", 300000000, 0 }, { "ram", 0, 400000000 }, { "trng", 0, 0 }, }, .esr = ESR_D1, .prng = CE_ALG_PRNG, .trng = CE_ALG_TRNG, }; static const struct ce_variant ce_r40_variant = { .alg_cipher = { CE_ALG_AES, CE_ALG_DES, CE_ALG_3DES, }, .alg_hash = { CE_ALG_MD5, CE_ALG_SHA1, CE_ALG_SHA224, CE_ALG_SHA256, CE_ID_NOTSUPP, CE_ID_NOTSUPP }, .op_mode = { CE_OP_ECB, CE_OP_CBC }, .ce_clks = { { "bus", 0, 200000000 }, { "mod", 300000000, 0 }, }, .esr = ESR_R40, .prng = CE_ALG_PRNG, .trng = CE_ID_NOTSUPP, }; /* * sun8i_ce_get_engine_number() get the next channel slot * This is a simple round-robin way of getting the next channel * The flow 3 is reserve for xRNG operations */ int sun8i_ce_get_engine_number(struct sun8i_ce_dev *ce) { return atomic_inc_return(&ce->flow) % (MAXFLOW - 1); } int sun8i_ce_run_task(struct sun8i_ce_dev *ce, int flow, const char *name) { u32 v; int err = 0; struct ce_task *cet = ce->chanlist[flow].tl; #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG ce->chanlist[flow].stat_req++; #endif mutex_lock(&ce->mlock); v = readl(ce->base + CE_ICR); v |= 1 << flow; writel(v, ce->base + CE_ICR); reinit_completion(&ce->chanlist[flow].complete); writel(ce->chanlist[flow].t_phy, ce->base + CE_TDQ); ce->chanlist[flow].status = 0; /* Be sure all data is written before enabling the task */ wmb(); /* Only H6 needs to write a part of t_common_ctl along with "1", but since it is ignored * on older SoCs, we have no reason to complicate things. */ v = 1 | ((le32_to_cpu(ce->chanlist[flow].tl->t_common_ctl) & 0x7F) << 8); writel(v, ce->base + CE_TLR); mutex_unlock(&ce->mlock); wait_for_completion_interruptible_timeout(&ce->chanlist[flow].complete, msecs_to_jiffies(ce->chanlist[flow].timeout)); if (ce->chanlist[flow].status == 0) { dev_err(ce->dev, "DMA timeout for %s (tm=%d) on flow %d\n", name, ce->chanlist[flow].timeout, flow); err = -EFAULT; } /* No need to lock for this read, the channel is locked so * nothing could modify the error value for this channel */ v = readl(ce->base + CE_ESR); switch (ce->variant->esr) { case ESR_H3: /* Sadly, the error bit is not per flow */ if (v) { dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); err = -EFAULT; print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, cet, sizeof(struct ce_task), false); } if (v & CE_ERR_ALGO_NOTSUP) dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); if (v & CE_ERR_DATALEN) dev_err(ce->dev, "CE ERROR: data length error\n"); if (v & CE_ERR_KEYSRAM) dev_err(ce->dev, "CE ERROR: keysram access error for AES\n"); break; case ESR_A64: case ESR_D1: case ESR_H5: case ESR_R40: v >>= (flow * 4); v &= 0xF; if (v) { dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); err = -EFAULT; print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, cet, sizeof(struct ce_task), false); } if (v & CE_ERR_ALGO_NOTSUP) dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); if (v & CE_ERR_DATALEN) dev_err(ce->dev, "CE ERROR: data length error\n"); if (v & CE_ERR_KEYSRAM) dev_err(ce->dev, "CE ERROR: keysram access error for AES\n"); break; case ESR_H6: v >>= (flow * 8); v &= 0xFF; if (v) { dev_err(ce->dev, "CE ERROR: %x for flow %x\n", v, flow); err = -EFAULT; print_hex_dump(KERN_INFO, "TASK: ", DUMP_PREFIX_NONE, 16, 4, cet, sizeof(struct ce_task), false); } if (v & CE_ERR_ALGO_NOTSUP) dev_err(ce->dev, "CE ERROR: algorithm not supported\n"); if (v & CE_ERR_DATALEN) dev_err(ce->dev, "CE ERROR: data length error\n"); if (v & CE_ERR_KEYSRAM) dev_err(ce->dev, "CE ERROR: keysram access error for AES\n"); if (v & CE_ERR_ADDR_INVALID) dev_err(ce->dev, "CE ERROR: address invalid\n"); if (v & CE_ERR_KEYLADDER) dev_err(ce->dev, "CE ERROR: key ladder configuration error\n"); break; } return err; } static irqreturn_t ce_irq_handler(int irq, void *data) { struct sun8i_ce_dev *ce = (struct sun8i_ce_dev *)data; int flow = 0; u32 p; p = readl(ce->base + CE_ISR); for (flow = 0; flow < MAXFLOW; flow++) { if (p & (BIT(flow))) { writel(BIT(flow), ce->base + CE_ISR); ce->chanlist[flow].status = 1; complete(&ce->chanlist[flow].complete); } } return IRQ_HANDLED; } static struct sun8i_ce_alg_template ce_algs[] = { { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ce_algo_id = CE_ID_CIPHER_AES, .ce_blockmode = CE_ID_OP_CBC, .alg.skcipher.base = { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-sun8i-ce", .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, .cra_init = sun8i_ce_cipher_init, .cra_exit = sun8i_ce_cipher_exit, }, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = sun8i_ce_aes_setkey, .encrypt = sun8i_ce_skencrypt, .decrypt = sun8i_ce_skdecrypt, }, .alg.skcipher.op = { .do_one_request = sun8i_ce_cipher_do_one, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ce_algo_id = CE_ID_CIPHER_AES, .ce_blockmode = CE_ID_OP_ECB, .alg.skcipher.base = { .base = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-sun8i-ce", .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, .cra_init = sun8i_ce_cipher_init, .cra_exit = sun8i_ce_cipher_exit, }, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = sun8i_ce_aes_setkey, .encrypt = sun8i_ce_skencrypt, .decrypt = sun8i_ce_skdecrypt, }, .alg.skcipher.op = { .do_one_request = sun8i_ce_cipher_do_one, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ce_algo_id = CE_ID_CIPHER_DES3, .ce_blockmode = CE_ID_OP_CBC, .alg.skcipher.base = { .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-des3-sun8i-ce", .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, .cra_init = sun8i_ce_cipher_init, .cra_exit = sun8i_ce_cipher_exit, }, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = sun8i_ce_des3_setkey, .encrypt = sun8i_ce_skencrypt, .decrypt = sun8i_ce_skdecrypt, }, .alg.skcipher.op = { .do_one_request = sun8i_ce_cipher_do_one, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ce_algo_id = CE_ID_CIPHER_DES3, .ce_blockmode = CE_ID_OP_ECB, .alg.skcipher.base = { .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "ecb-des3-sun8i-ce", .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, .cra_init = sun8i_ce_cipher_init, .cra_exit = sun8i_ce_cipher_exit, }, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = sun8i_ce_des3_setkey, .encrypt = sun8i_ce_skencrypt, .decrypt = sun8i_ce_skdecrypt, }, .alg.skcipher.op = { .do_one_request = sun8i_ce_cipher_do_one, }, }, #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_HASH { .type = CRYPTO_ALG_TYPE_AHASH, .ce_algo_id = CE_ID_HASH_MD5, .alg.hash.base = { .init = sun8i_ce_hash_init, .update = sun8i_ce_hash_update, .final = sun8i_ce_hash_final, .finup = sun8i_ce_hash_finup, .digest = sun8i_ce_hash_digest, .export = sun8i_ce_hash_export, .import = sun8i_ce_hash_import, .init_tfm = sun8i_ce_hash_init_tfm, .exit_tfm = sun8i_ce_hash_exit_tfm, .halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_driver_name = "md5-sun8i-ce", .cra_priority = 300, .cra_alignmask = 3, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = sun8i_ce_hash_run, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ce_algo_id = CE_ID_HASH_SHA1, .alg.hash.base = { .init = sun8i_ce_hash_init, .update = sun8i_ce_hash_update, .final = sun8i_ce_hash_final, .finup = sun8i_ce_hash_finup, .digest = sun8i_ce_hash_digest, .export = sun8i_ce_hash_export, .import = sun8i_ce_hash_import, .init_tfm = sun8i_ce_hash_init_tfm, .exit_tfm = sun8i_ce_hash_exit_tfm, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name = "sha1-sun8i-ce", .cra_priority = 300, .cra_alignmask = 3, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = sun8i_ce_hash_run, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ce_algo_id = CE_ID_HASH_SHA224, .alg.hash.base = { .init = sun8i_ce_hash_init, .update = sun8i_ce_hash_update, .final = sun8i_ce_hash_final, .finup = sun8i_ce_hash_finup, .digest = sun8i_ce_hash_digest, .export = sun8i_ce_hash_export, .import = sun8i_ce_hash_import, .init_tfm = sun8i_ce_hash_init_tfm, .exit_tfm = sun8i_ce_hash_exit_tfm, .halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name = "sha224-sun8i-ce", .cra_priority = 300, .cra_alignmask = 3, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = sun8i_ce_hash_run, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ce_algo_id = CE_ID_HASH_SHA256, .alg.hash.base = { .init = sun8i_ce_hash_init, .update = sun8i_ce_hash_update, .final = sun8i_ce_hash_final, .finup = sun8i_ce_hash_finup, .digest = sun8i_ce_hash_digest, .export = sun8i_ce_hash_export, .import = sun8i_ce_hash_import, .init_tfm = sun8i_ce_hash_init_tfm, .exit_tfm = sun8i_ce_hash_exit_tfm, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-sun8i-ce", .cra_priority = 300, .cra_alignmask = 3, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = sun8i_ce_hash_run, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ce_algo_id = CE_ID_HASH_SHA384, .alg.hash.base = { .init = sun8i_ce_hash_init, .update = sun8i_ce_hash_update, .final = sun8i_ce_hash_final, .finup = sun8i_ce_hash_finup, .digest = sun8i_ce_hash_digest, .export = sun8i_ce_hash_export, .import = sun8i_ce_hash_import, .init_tfm = sun8i_ce_hash_init_tfm, .exit_tfm = sun8i_ce_hash_exit_tfm, .halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct sha512_state), .base = { .cra_name = "sha384", .cra_driver_name = "sha384-sun8i-ce", .cra_priority = 300, .cra_alignmask = 3, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = sun8i_ce_hash_run, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ce_algo_id = CE_ID_HASH_SHA512, .alg.hash.base = { .init = sun8i_ce_hash_init, .update = sun8i_ce_hash_update, .final = sun8i_ce_hash_final, .finup = sun8i_ce_hash_finup, .digest = sun8i_ce_hash_digest, .export = sun8i_ce_hash_export, .import = sun8i_ce_hash_import, .init_tfm = sun8i_ce_hash_init_tfm, .exit_tfm = sun8i_ce_hash_exit_tfm, .halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct sha512_state), .base = { .cra_name = "sha512", .cra_driver_name = "sha512-sun8i-ce", .cra_priority = 300, .cra_alignmask = 3, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ce_hash_tfm_ctx), .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = sun8i_ce_hash_run, }, }, #endif #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_PRNG { .type = CRYPTO_ALG_TYPE_RNG, .alg.rng = { .base = { .cra_name = "stdrng", .cra_driver_name = "sun8i-ce-prng", .cra_priority = 300, .cra_ctxsize = sizeof(struct sun8i_ce_rng_tfm_ctx), .cra_module = THIS_MODULE, .cra_init = sun8i_ce_prng_init, .cra_exit = sun8i_ce_prng_exit, }, .generate = sun8i_ce_prng_generate, .seed = sun8i_ce_prng_seed, .seedsize = PRNG_SEED_SIZE, } }, #endif }; static int sun8i_ce_debugfs_show(struct seq_file *seq, void *v) { struct sun8i_ce_dev *ce __maybe_unused = seq->private; unsigned int i; for (i = 0; i < MAXFLOW; i++) seq_printf(seq, "Channel %d: nreq %lu\n", i, #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG ce->chanlist[i].stat_req); #else 0ul); #endif for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { if (!ce_algs[i].ce) continue; switch (ce_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", ce_algs[i].alg.skcipher.base.base.cra_driver_name, ce_algs[i].alg.skcipher.base.base.cra_name, ce_algs[i].stat_req, ce_algs[i].stat_fb); seq_printf(seq, "\tLast fallback is: %s\n", ce_algs[i].fbname); seq_printf(seq, "\tFallback due to 0 length: %lu\n", ce_algs[i].stat_fb_len0); seq_printf(seq, "\tFallback due to length !mod16: %lu\n", ce_algs[i].stat_fb_mod16); seq_printf(seq, "\tFallback due to length < IV: %lu\n", ce_algs[i].stat_fb_leniv); seq_printf(seq, "\tFallback due to source alignment: %lu\n", ce_algs[i].stat_fb_srcali); seq_printf(seq, "\tFallback due to dest alignment: %lu\n", ce_algs[i].stat_fb_dstali); seq_printf(seq, "\tFallback due to source length: %lu\n", ce_algs[i].stat_fb_srclen); seq_printf(seq, "\tFallback due to dest length: %lu\n", ce_algs[i].stat_fb_dstlen); seq_printf(seq, "\tFallback due to SG numbers: %lu\n", ce_algs[i].stat_fb_maxsg); break; case CRYPTO_ALG_TYPE_AHASH: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", ce_algs[i].alg.hash.base.halg.base.cra_driver_name, ce_algs[i].alg.hash.base.halg.base.cra_name, ce_algs[i].stat_req, ce_algs[i].stat_fb); seq_printf(seq, "\tLast fallback is: %s\n", ce_algs[i].fbname); seq_printf(seq, "\tFallback due to 0 length: %lu\n", ce_algs[i].stat_fb_len0); seq_printf(seq, "\tFallback due to length: %lu\n", ce_algs[i].stat_fb_srclen); seq_printf(seq, "\tFallback due to alignment: %lu\n", ce_algs[i].stat_fb_srcali); seq_printf(seq, "\tFallback due to SG numbers: %lu\n", ce_algs[i].stat_fb_maxsg); break; case CRYPTO_ALG_TYPE_RNG: seq_printf(seq, "%s %s reqs=%lu bytes=%lu\n", ce_algs[i].alg.rng.base.cra_driver_name, ce_algs[i].alg.rng.base.cra_name, ce_algs[i].stat_req, ce_algs[i].stat_bytes); break; } } #if defined(CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG) && \ defined(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG) seq_printf(seq, "HWRNG %lu %lu\n", ce->hwrng_stat_req, ce->hwrng_stat_bytes); #endif return 0; } DEFINE_SHOW_ATTRIBUTE(sun8i_ce_debugfs); static void sun8i_ce_free_chanlist(struct sun8i_ce_dev *ce, int i) { while (i >= 0) { crypto_engine_exit(ce->chanlist[i].engine); if (ce->chanlist[i].tl) dma_free_coherent(ce->dev, sizeof(struct ce_task), ce->chanlist[i].tl, ce->chanlist[i].t_phy); i--; } } /* * Allocate the channel list structure */ static int sun8i_ce_allocate_chanlist(struct sun8i_ce_dev *ce) { int i, err; ce->chanlist = devm_kcalloc(ce->dev, MAXFLOW, sizeof(struct sun8i_ce_flow), GFP_KERNEL); if (!ce->chanlist) return -ENOMEM; for (i = 0; i < MAXFLOW; i++) { init_completion(&ce->chanlist[i].complete); ce->chanlist[i].engine = crypto_engine_alloc_init(ce->dev, true); if (!ce->chanlist[i].engine) { dev_err(ce->dev, "Cannot allocate engine\n"); i--; err = -ENOMEM; goto error_engine; } err = crypto_engine_start(ce->chanlist[i].engine); if (err) { dev_err(ce->dev, "Cannot start engine\n"); goto error_engine; } ce->chanlist[i].tl = dma_alloc_coherent(ce->dev, sizeof(struct ce_task), &ce->chanlist[i].t_phy, GFP_KERNEL); if (!ce->chanlist[i].tl) { dev_err(ce->dev, "Cannot get DMA memory for task %d\n", i); err = -ENOMEM; goto error_engine; } ce->chanlist[i].bounce_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); if (!ce->chanlist[i].bounce_iv) { err = -ENOMEM; goto error_engine; } ce->chanlist[i].backup_iv = devm_kmalloc(ce->dev, AES_BLOCK_SIZE, GFP_KERNEL); if (!ce->chanlist[i].backup_iv) { err = -ENOMEM; goto error_engine; } } return 0; error_engine: sun8i_ce_free_chanlist(ce, i); return err; } /* * Power management strategy: The device is suspended unless a TFM exists for * one of the algorithms proposed by this driver. */ static int sun8i_ce_pm_suspend(struct device *dev) { struct sun8i_ce_dev *ce = dev_get_drvdata(dev); int i; reset_control_assert(ce->reset); for (i = 0; i < CE_MAX_CLOCKS; i++) clk_disable_unprepare(ce->ceclks[i]); return 0; } static int sun8i_ce_pm_resume(struct device *dev) { struct sun8i_ce_dev *ce = dev_get_drvdata(dev); int err, i; for (i = 0; i < CE_MAX_CLOCKS; i++) { if (!ce->variant->ce_clks[i].name) continue; err = clk_prepare_enable(ce->ceclks[i]); if (err) { dev_err(ce->dev, "Cannot prepare_enable %s\n", ce->variant->ce_clks[i].name); goto error; } } err = reset_control_deassert(ce->reset); if (err) { dev_err(ce->dev, "Cannot deassert reset control\n"); goto error; } return 0; error: sun8i_ce_pm_suspend(dev); return err; } static const struct dev_pm_ops sun8i_ce_pm_ops = { SET_RUNTIME_PM_OPS(sun8i_ce_pm_suspend, sun8i_ce_pm_resume, NULL) }; static int sun8i_ce_pm_init(struct sun8i_ce_dev *ce) { int err; pm_runtime_use_autosuspend(ce->dev); pm_runtime_set_autosuspend_delay(ce->dev, 2000); err = pm_runtime_set_suspended(ce->dev); if (err) return err; pm_runtime_enable(ce->dev); return err; } static void sun8i_ce_pm_exit(struct sun8i_ce_dev *ce) { pm_runtime_disable(ce->dev); } static int sun8i_ce_get_clks(struct sun8i_ce_dev *ce) { unsigned long cr; int err, i; for (i = 0; i < CE_MAX_CLOCKS; i++) { if (!ce->variant->ce_clks[i].name) continue; ce->ceclks[i] = devm_clk_get(ce->dev, ce->variant->ce_clks[i].name); if (IS_ERR(ce->ceclks[i])) { err = PTR_ERR(ce->ceclks[i]); dev_err(ce->dev, "Cannot get %s CE clock err=%d\n", ce->variant->ce_clks[i].name, err); return err; } cr = clk_get_rate(ce->ceclks[i]); if (!cr) return -EINVAL; if (ce->variant->ce_clks[i].freq > 0 && cr != ce->variant->ce_clks[i].freq) { dev_info(ce->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n", ce->variant->ce_clks[i].name, ce->variant->ce_clks[i].freq, ce->variant->ce_clks[i].freq / 1000000, cr, cr / 1000000); err = clk_set_rate(ce->ceclks[i], ce->variant->ce_clks[i].freq); if (err) dev_err(ce->dev, "Fail to set %s clk speed to %lu hz\n", ce->variant->ce_clks[i].name, ce->variant->ce_clks[i].freq); } if (ce->variant->ce_clks[i].max_freq > 0 && cr > ce->variant->ce_clks[i].max_freq) dev_warn(ce->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)", ce->variant->ce_clks[i].name, cr, ce->variant->ce_clks[i].max_freq); } return 0; } static int sun8i_ce_register_algs(struct sun8i_ce_dev *ce) { int ce_method, err, id; unsigned int i; for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { ce_algs[i].ce = ce; switch (ce_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: id = ce_algs[i].ce_algo_id; ce_method = ce->variant->alg_cipher[id]; if (ce_method == CE_ID_NOTSUPP) { dev_dbg(ce->dev, "DEBUG: Algo of %s not supported\n", ce_algs[i].alg.skcipher.base.base.cra_name); ce_algs[i].ce = NULL; break; } id = ce_algs[i].ce_blockmode; ce_method = ce->variant->op_mode[id]; if (ce_method == CE_ID_NOTSUPP) { dev_dbg(ce->dev, "DEBUG: Blockmode of %s not supported\n", ce_algs[i].alg.skcipher.base.base.cra_name); ce_algs[i].ce = NULL; break; } dev_info(ce->dev, "Register %s\n", ce_algs[i].alg.skcipher.base.base.cra_name); err = crypto_engine_register_skcipher(&ce_algs[i].alg.skcipher); if (err) { dev_err(ce->dev, "ERROR: Fail to register %s\n", ce_algs[i].alg.skcipher.base.base.cra_name); ce_algs[i].ce = NULL; return err; } break; case CRYPTO_ALG_TYPE_AHASH: id = ce_algs[i].ce_algo_id; ce_method = ce->variant->alg_hash[id]; if (ce_method == CE_ID_NOTSUPP) { dev_info(ce->dev, "DEBUG: Algo of %s not supported\n", ce_algs[i].alg.hash.base.halg.base.cra_name); ce_algs[i].ce = NULL; break; } dev_info(ce->dev, "Register %s\n", ce_algs[i].alg.hash.base.halg.base.cra_name); err = crypto_engine_register_ahash(&ce_algs[i].alg.hash); if (err) { dev_err(ce->dev, "ERROR: Fail to register %s\n", ce_algs[i].alg.hash.base.halg.base.cra_name); ce_algs[i].ce = NULL; return err; } break; case CRYPTO_ALG_TYPE_RNG: if (ce->variant->prng == CE_ID_NOTSUPP) { dev_info(ce->dev, "DEBUG: Algo of %s not supported\n", ce_algs[i].alg.rng.base.cra_name); ce_algs[i].ce = NULL; break; } dev_info(ce->dev, "Register %s\n", ce_algs[i].alg.rng.base.cra_name); err = crypto_register_rng(&ce_algs[i].alg.rng); if (err) { dev_err(ce->dev, "Fail to register %s\n", ce_algs[i].alg.rng.base.cra_name); ce_algs[i].ce = NULL; } break; default: ce_algs[i].ce = NULL; dev_err(ce->dev, "ERROR: tried to register an unknown algo\n"); } } return 0; } static void sun8i_ce_unregister_algs(struct sun8i_ce_dev *ce) { unsigned int i; for (i = 0; i < ARRAY_SIZE(ce_algs); i++) { if (!ce_algs[i].ce) continue; switch (ce_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: dev_info(ce->dev, "Unregister %d %s\n", i, ce_algs[i].alg.skcipher.base.base.cra_name); crypto_engine_unregister_skcipher(&ce_algs[i].alg.skcipher); break; case CRYPTO_ALG_TYPE_AHASH: dev_info(ce->dev, "Unregister %d %s\n", i, ce_algs[i].alg.hash.base.halg.base.cra_name); crypto_engine_unregister_ahash(&ce_algs[i].alg.hash); break; case CRYPTO_ALG_TYPE_RNG: dev_info(ce->dev, "Unregister %d %s\n", i, ce_algs[i].alg.rng.base.cra_name); crypto_unregister_rng(&ce_algs[i].alg.rng); break; } } } static int sun8i_ce_probe(struct platform_device *pdev) { struct sun8i_ce_dev *ce; int err, irq; u32 v; ce = devm_kzalloc(&pdev->dev, sizeof(*ce), GFP_KERNEL); if (!ce) return -ENOMEM; ce->dev = &pdev->dev; platform_set_drvdata(pdev, ce); ce->variant = of_device_get_match_data(&pdev->dev); if (!ce->variant) { dev_err(&pdev->dev, "Missing Crypto Engine variant\n"); return -EINVAL; } ce->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ce->base)) return PTR_ERR(ce->base); err = sun8i_ce_get_clks(ce); if (err) return err; /* Get Non Secure IRQ */ irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ce->reset = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(ce->reset)) return dev_err_probe(&pdev->dev, PTR_ERR(ce->reset), "No reset control found\n"); mutex_init(&ce->mlock); mutex_init(&ce->rnglock); err = sun8i_ce_allocate_chanlist(ce); if (err) return err; err = sun8i_ce_pm_init(ce); if (err) goto error_pm; err = devm_request_irq(&pdev->dev, irq, ce_irq_handler, 0, "sun8i-ce-ns", ce); if (err) { dev_err(ce->dev, "Cannot request CryptoEngine Non-secure IRQ (err=%d)\n", err); goto error_irq; } err = sun8i_ce_register_algs(ce); if (err) goto error_alg; err = pm_runtime_resume_and_get(ce->dev); if (err < 0) goto error_alg; #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG sun8i_ce_hwrng_register(ce); #endif v = readl(ce->base + CE_CTR); v >>= CE_DIE_ID_SHIFT; v &= CE_DIE_ID_MASK; dev_info(&pdev->dev, "CryptoEngine Die ID %x\n", v); pm_runtime_put_sync(ce->dev); if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG)) { struct dentry *dbgfs_dir __maybe_unused; struct dentry *dbgfs_stats __maybe_unused; /* Ignore error of debugfs */ dbgfs_dir = debugfs_create_dir("sun8i-ce", NULL); dbgfs_stats = debugfs_create_file("stats", 0444, dbgfs_dir, ce, &sun8i_ce_debugfs_fops); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG ce->dbgfs_dir = dbgfs_dir; ce->dbgfs_stats = dbgfs_stats; #endif } return 0; error_alg: sun8i_ce_unregister_algs(ce); error_irq: sun8i_ce_pm_exit(ce); error_pm: sun8i_ce_free_chanlist(ce, MAXFLOW - 1); return err; } static int sun8i_ce_remove(struct platform_device *pdev) { struct sun8i_ce_dev *ce = platform_get_drvdata(pdev); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_TRNG sun8i_ce_hwrng_unregister(ce); #endif sun8i_ce_unregister_algs(ce); #ifdef CONFIG_CRYPTO_DEV_SUN8I_CE_DEBUG debugfs_remove_recursive(ce->dbgfs_dir); #endif sun8i_ce_free_chanlist(ce, MAXFLOW - 1); sun8i_ce_pm_exit(ce); return 0; } static const struct of_device_id sun8i_ce_crypto_of_match_table[] = { { .compatible = "allwinner,sun8i-h3-crypto", .data = &ce_h3_variant }, { .compatible = "allwinner,sun8i-r40-crypto", .data = &ce_r40_variant }, { .compatible = "allwinner,sun20i-d1-crypto", .data = &ce_d1_variant }, { .compatible = "allwinner,sun50i-a64-crypto", .data = &ce_a64_variant }, { .compatible = "allwinner,sun50i-h5-crypto", .data = &ce_h5_variant }, { .compatible = "allwinner,sun50i-h6-crypto", .data = &ce_h6_variant }, {} }; MODULE_DEVICE_TABLE(of, sun8i_ce_crypto_of_match_table); static struct platform_driver sun8i_ce_driver = { .probe = sun8i_ce_probe, .remove = sun8i_ce_remove, .driver = { .name = "sun8i-ce", .pm = &sun8i_ce_pm_ops, .of_match_table = sun8i_ce_crypto_of_match_table, }, }; module_platform_driver(sun8i_ce_driver); MODULE_DESCRIPTION("Allwinner Crypto Engine cryptographic offloader"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corentin Labbe <[email protected]>");
linux-master
drivers/crypto/allwinner/sun8i-ce/sun8i-ce-core.c
// SPDX-License-Identifier: GPL-2.0 /* * sun8i-ss-cipher.c - hardware cryptographic offloader for * Allwinner A80/A83T SoC * * Copyright (C) 2016-2019 Corentin LABBE <[email protected]> * * This file add support for AES cipher with 128,192,256 bits keysize in * CBC and ECB mode. * * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include <linux/bottom_half.h> #include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <crypto/scatterwalk.h> #include <crypto/internal/skcipher.h> #include "sun8i-ss.h" static bool sun8i_ss_need_fallback(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ss_alg_template *algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); struct scatterlist *in_sg = areq->src; struct scatterlist *out_sg = areq->dst; struct scatterlist *sg; unsigned int todo, len; if (areq->cryptlen == 0 || areq->cryptlen % 16) { algt->stat_fb_len++; return true; } if (sg_nents_for_len(areq->src, areq->cryptlen) > 8 || sg_nents_for_len(areq->dst, areq->cryptlen) > 8) { algt->stat_fb_sgnum++; return true; } len = areq->cryptlen; sg = areq->src; while (sg) { todo = min(len, sg->length); if ((todo % 16) != 0) { algt->stat_fb_sglen++; return true; } if (!IS_ALIGNED(sg->offset, 16)) { algt->stat_fb_align++; return true; } len -= todo; sg = sg_next(sg); } len = areq->cryptlen; sg = areq->dst; while (sg) { todo = min(len, sg->length); if ((todo % 16) != 0) { algt->stat_fb_sglen++; return true; } if (!IS_ALIGNED(sg->offset, 16)) { algt->stat_fb_align++; return true; } len -= todo; sg = sg_next(sg); } /* SS need same numbers of SG (with same length) for source and destination */ in_sg = areq->src; out_sg = areq->dst; while (in_sg && out_sg) { if (in_sg->length != out_sg->length) return true; in_sg = sg_next(in_sg); out_sg = sg_next(out_sg); } if (in_sg || out_sg) return true; return false; } static int sun8i_ss_cipher_fallback(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); int err; if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ss_alg_template *algt __maybe_unused; algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG algt->stat_fb++; #endif } skcipher_request_set_tfm(&rctx->fallback_req, op->fallback_tfm); skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, areq->base.complete, areq->base.data); skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (rctx->op_dir & SS_DECRYPTION) err = crypto_skcipher_decrypt(&rctx->fallback_req); else err = crypto_skcipher_encrypt(&rctx->fallback_req); return err; } static int sun8i_ss_setup_ivs(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_ss_dev *ss = op->ss; struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct scatterlist *sg = areq->src; unsigned int todo, offset; unsigned int len = areq->cryptlen; unsigned int ivsize = crypto_skcipher_ivsize(tfm); struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; int i = 0; dma_addr_t a; int err; rctx->ivlen = ivsize; if (rctx->op_dir & SS_DECRYPTION) { offset = areq->cryptlen - ivsize; scatterwalk_map_and_copy(sf->biv, areq->src, offset, ivsize, 0); } /* we need to copy all IVs from source in case DMA is bi-directionnal */ while (sg && len) { if (sg_dma_len(sg) == 0) { sg = sg_next(sg); continue; } if (i == 0) memcpy(sf->iv[0], areq->iv, ivsize); a = dma_map_single(ss->dev, sf->iv[i], ivsize, DMA_TO_DEVICE); if (dma_mapping_error(ss->dev, a)) { memzero_explicit(sf->iv[i], ivsize); dev_err(ss->dev, "Cannot DMA MAP IV\n"); err = -EFAULT; goto dma_iv_error; } rctx->p_iv[i] = a; /* we need to setup all others IVs only in the decrypt way */ if (rctx->op_dir == SS_ENCRYPTION) return 0; todo = min(len, sg_dma_len(sg)); len -= todo; i++; if (i < MAX_SG) { offset = sg->length - ivsize; scatterwalk_map_and_copy(sf->iv[i], sg, offset, ivsize, 0); } rctx->niv = i; sg = sg_next(sg); } return 0; dma_iv_error: i--; while (i >= 0) { dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); memzero_explicit(sf->iv[i], ivsize); i--; } return err; } static int sun8i_ss_cipher(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_ss_dev *ss = op->ss; struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct sun8i_ss_alg_template *algt; struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; struct scatterlist *sg; unsigned int todo, len, offset, ivsize; int nr_sgs = 0; int nr_sgd = 0; int err = 0; int nsgs = sg_nents_for_len(areq->src, areq->cryptlen); int nsgd = sg_nents_for_len(areq->dst, areq->cryptlen); int i; algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); dev_dbg(ss->dev, "%s %s %u %x IV(%p %u) key=%u\n", __func__, crypto_tfm_alg_name(areq->base.tfm), areq->cryptlen, rctx->op_dir, areq->iv, crypto_skcipher_ivsize(tfm), op->keylen); #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG algt->stat_req++; #endif rctx->op_mode = ss->variant->op_mode[algt->ss_blockmode]; rctx->method = ss->variant->alg_cipher[algt->ss_algo_id]; rctx->keylen = op->keylen; rctx->p_key = dma_map_single(ss->dev, op->key, op->keylen, DMA_TO_DEVICE); if (dma_mapping_error(ss->dev, rctx->p_key)) { dev_err(ss->dev, "Cannot DMA MAP KEY\n"); err = -EFAULT; goto theend; } ivsize = crypto_skcipher_ivsize(tfm); if (areq->iv && crypto_skcipher_ivsize(tfm) > 0) { err = sun8i_ss_setup_ivs(areq); if (err) goto theend_key; } if (areq->src == areq->dst) { nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL); if (nr_sgs <= 0 || nr_sgs > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; } nr_sgd = nr_sgs; } else { nr_sgs = dma_map_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE); if (nr_sgs <= 0 || nr_sgs > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend_iv; } nr_sgd = dma_map_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE); if (nr_sgd <= 0 || nr_sgd > 8) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgd); err = -EINVAL; goto theend_sgs; } } len = areq->cryptlen; i = 0; sg = areq->src; while (i < nr_sgs && sg && len) { if (sg_dma_len(sg) == 0) goto sgs_next; rctx->t_src[i].addr = sg_dma_address(sg); todo = min(len, sg_dma_len(sg)); rctx->t_src[i].len = todo / 4; dev_dbg(ss->dev, "%s total=%u SGS(%d %u off=%d) todo=%u\n", __func__, areq->cryptlen, i, rctx->t_src[i].len, sg->offset, todo); len -= todo; i++; sgs_next: sg = sg_next(sg); } if (len > 0) { dev_err(ss->dev, "remaining len %d\n", len); err = -EINVAL; goto theend_sgs; } len = areq->cryptlen; i = 0; sg = areq->dst; while (i < nr_sgd && sg && len) { if (sg_dma_len(sg) == 0) goto sgd_next; rctx->t_dst[i].addr = sg_dma_address(sg); todo = min(len, sg_dma_len(sg)); rctx->t_dst[i].len = todo / 4; dev_dbg(ss->dev, "%s total=%u SGD(%d %u off=%d) todo=%u\n", __func__, areq->cryptlen, i, rctx->t_dst[i].len, sg->offset, todo); len -= todo; i++; sgd_next: sg = sg_next(sg); } if (len > 0) { dev_err(ss->dev, "remaining len %d\n", len); err = -EINVAL; goto theend_sgs; } err = sun8i_ss_run_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm)); theend_sgs: if (areq->src == areq->dst) { dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_BIDIRECTIONAL); } else { dma_unmap_sg(ss->dev, areq->src, nsgs, DMA_TO_DEVICE); dma_unmap_sg(ss->dev, areq->dst, nsgd, DMA_FROM_DEVICE); } theend_iv: if (areq->iv && ivsize > 0) { for (i = 0; i < rctx->niv; i++) { dma_unmap_single(ss->dev, rctx->p_iv[i], ivsize, DMA_TO_DEVICE); memzero_explicit(sf->iv[i], ivsize); } offset = areq->cryptlen - ivsize; if (rctx->op_dir & SS_DECRYPTION) { memcpy(areq->iv, sf->biv, ivsize); memzero_explicit(sf->biv, ivsize); } else { scatterwalk_map_and_copy(areq->iv, areq->dst, offset, ivsize, 0); } } theend_key: dma_unmap_single(ss->dev, rctx->p_key, op->keylen, DMA_TO_DEVICE); theend: return err; } int sun8i_ss_handle_cipher_request(struct crypto_engine *engine, void *areq) { int err; struct skcipher_request *breq = container_of(areq, struct skcipher_request, base); err = sun8i_ss_cipher(breq); local_bh_disable(); crypto_finalize_skcipher_request(engine, breq, err); local_bh_enable(); return 0; } int sun8i_ss_skdecrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct crypto_engine *engine; int e; memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx)); rctx->op_dir = SS_DECRYPTION; if (sun8i_ss_need_fallback(areq)) return sun8i_ss_cipher_fallback(areq); e = sun8i_ss_get_engine_number(op->ss); engine = op->ss->flows[e].engine; rctx->flow = e; return crypto_transfer_skcipher_request_to_engine(engine, areq); } int sun8i_ss_skencrypt(struct skcipher_request *areq) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_cipher_req_ctx *rctx = skcipher_request_ctx(areq); struct crypto_engine *engine; int e; memset(rctx, 0, sizeof(struct sun8i_cipher_req_ctx)); rctx->op_dir = SS_ENCRYPTION; if (sun8i_ss_need_fallback(areq)) return sun8i_ss_cipher_fallback(areq); e = sun8i_ss_get_engine_number(op->ss); engine = op->ss->flows[e].engine; rctx->flow = e; return crypto_transfer_skcipher_request_to_engine(engine, areq); } int sun8i_ss_cipher_init(struct crypto_tfm *tfm) { struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); struct sun8i_ss_alg_template *algt; const char *name = crypto_tfm_alg_name(tfm); struct crypto_skcipher *sktfm = __crypto_skcipher_cast(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(sktfm); int err; memset(op, 0, sizeof(struct sun8i_cipher_tfm_ctx)); algt = container_of(alg, struct sun8i_ss_alg_template, alg.skcipher.base); op->ss = algt->ss; op->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(op->ss->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(op->fallback_tfm)); return PTR_ERR(op->fallback_tfm); } sktfm->reqsize = sizeof(struct sun8i_cipher_req_ctx) + crypto_skcipher_reqsize(op->fallback_tfm); memcpy(algt->fbname, crypto_tfm_alg_driver_name(crypto_skcipher_tfm(op->fallback_tfm)), CRYPTO_MAX_ALG_NAME); err = pm_runtime_resume_and_get(op->ss->dev); if (err < 0) { dev_err(op->ss->dev, "pm error %d\n", err); goto error_pm; } return 0; error_pm: crypto_free_skcipher(op->fallback_tfm); return err; } void sun8i_ss_cipher_exit(struct crypto_tfm *tfm) { struct sun8i_cipher_tfm_ctx *op = crypto_tfm_ctx(tfm); kfree_sensitive(op->key); crypto_free_skcipher(op->fallback_tfm); pm_runtime_put_sync(op->ss->dev); } int sun8i_ss_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_ss_dev *ss = op->ss; switch (keylen) { case 128 / 8: break; case 192 / 8: break; case 256 / 8: break; default: dev_dbg(ss->dev, "ERROR: Invalid keylen %u\n", keylen); return -EINVAL; } kfree_sensitive(op->key); op->keylen = keylen; op->key = kmemdup(key, keylen, GFP_KERNEL); if (!op->key) return -ENOMEM; crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); } int sun8i_ss_des3_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { struct sun8i_cipher_tfm_ctx *op = crypto_skcipher_ctx(tfm); struct sun8i_ss_dev *ss = op->ss; if (unlikely(keylen != 3 * DES_KEY_SIZE)) { dev_dbg(ss->dev, "Invalid keylen %u\n", keylen); return -EINVAL; } kfree_sensitive(op->key); op->keylen = keylen; op->key = kmemdup(key, keylen, GFP_KERNEL); if (!op->key) return -ENOMEM; crypto_skcipher_clear_flags(op->fallback_tfm, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(op->fallback_tfm, tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK); return crypto_skcipher_setkey(op->fallback_tfm, key, keylen); }
linux-master
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-cipher.c
// SPDX-License-Identifier: GPL-2.0 /* * sun8i-ss-prng.c - hardware cryptographic offloader for * Allwinner A80/A83T SoC * * Copyright (C) 2015-2020 Corentin Labbe <[email protected]> * * This file handle the PRNG found in the SS * * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include "sun8i-ss.h" #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pm_runtime.h> #include <crypto/internal/rng.h> int sun8i_ss_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); if (ctx->seed && ctx->slen != slen) { kfree_sensitive(ctx->seed); ctx->slen = 0; ctx->seed = NULL; } if (!ctx->seed) ctx->seed = kmalloc(slen, GFP_KERNEL); if (!ctx->seed) return -ENOMEM; memcpy(ctx->seed, seed, slen); ctx->slen = slen; return 0; } int sun8i_ss_prng_init(struct crypto_tfm *tfm) { struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm); memset(ctx, 0, sizeof(struct sun8i_ss_rng_tfm_ctx)); return 0; } void sun8i_ss_prng_exit(struct crypto_tfm *tfm) { struct sun8i_ss_rng_tfm_ctx *ctx = crypto_tfm_ctx(tfm); kfree_sensitive(ctx->seed); ctx->seed = NULL; ctx->slen = 0; } int sun8i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { struct sun8i_ss_rng_tfm_ctx *ctx = crypto_rng_ctx(tfm); struct rng_alg *alg = crypto_rng_alg(tfm); struct sun8i_ss_alg_template *algt; unsigned int todo_with_padding; struct sun8i_ss_dev *ss; dma_addr_t dma_iv, dma_dst; unsigned int todo; int err = 0; int flow; void *d; u32 v; algt = container_of(alg, struct sun8i_ss_alg_template, alg.rng); ss = algt->ss; if (ctx->slen == 0) { dev_err(ss->dev, "The PRNG is not seeded\n"); return -EINVAL; } /* The SS does not give an updated seed, so we need to get a new one. * So we will ask for an extra PRNG_SEED_SIZE data. * We want dlen + seedsize rounded up to a multiple of PRNG_DATA_SIZE */ todo = dlen + PRNG_SEED_SIZE + PRNG_DATA_SIZE; todo -= todo % PRNG_DATA_SIZE; todo_with_padding = ALIGN(todo, dma_get_cache_alignment()); if (todo_with_padding < todo || todo < dlen) return -EOVERFLOW; d = kzalloc(todo_with_padding, GFP_KERNEL); if (!d) return -ENOMEM; flow = sun8i_ss_get_engine_number(ss); #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG algt->stat_req++; algt->stat_bytes += todo; #endif v = SS_ALG_PRNG | SS_PRNG_CONTINUE | SS_START; if (flow) v |= SS_FLOW1; else v |= SS_FLOW0; dma_iv = dma_map_single(ss->dev, ctx->seed, ctx->slen, DMA_TO_DEVICE); if (dma_mapping_error(ss->dev, dma_iv)) { dev_err(ss->dev, "Cannot DMA MAP IV\n"); err = -EFAULT; goto err_free; } dma_dst = dma_map_single(ss->dev, d, todo, DMA_FROM_DEVICE); if (dma_mapping_error(ss->dev, dma_dst)) { dev_err(ss->dev, "Cannot DMA MAP DST\n"); err = -EFAULT; goto err_iv; } err = pm_runtime_resume_and_get(ss->dev); if (err < 0) goto err_pm; err = 0; mutex_lock(&ss->mlock); writel(dma_iv, ss->base + SS_IV_ADR_REG); /* the PRNG act badly (failing rngtest) without SS_KEY_ADR_REG set */ writel(dma_iv, ss->base + SS_KEY_ADR_REG); writel(dma_dst, ss->base + SS_DST_ADR_REG); writel(todo / 4, ss->base + SS_LEN_ADR_REG); reinit_completion(&ss->flows[flow].complete); ss->flows[flow].status = 0; /* Be sure all data is written before enabling the task */ wmb(); writel(v, ss->base + SS_CTL_REG); wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, msecs_to_jiffies(todo)); if (ss->flows[flow].status == 0) { dev_err(ss->dev, "DMA timeout for PRNG (size=%u)\n", todo); err = -EFAULT; } /* Since cipher and hash use the linux/cryptoengine and that we have * a cryptoengine per flow, we are sure that they will issue only one * request per flow. * Since the cryptoengine wait for completion before submitting a new * one, the mlock could be left just after the final writel. * But cryptoengine cannot handle crypto_rng, so we need to be sure * nothing will use our flow. * The easiest way is to grab mlock until the hardware end our requests. * We could have used a per flow lock, but this would increase * complexity. * The drawback is that no request could be handled for the other flow. */ mutex_unlock(&ss->mlock); pm_runtime_put(ss->dev); err_pm: dma_unmap_single(ss->dev, dma_dst, todo, DMA_FROM_DEVICE); err_iv: dma_unmap_single(ss->dev, dma_iv, ctx->slen, DMA_TO_DEVICE); if (!err) { memcpy(dst, d, dlen); /* Update seed */ memcpy(ctx->seed, d + dlen, ctx->slen); } err_free: kfree_sensitive(d); return err; }
linux-master
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-prng.c
// SPDX-License-Identifier: GPL-2.0 /* * sun8i-ss-core.c - hardware cryptographic offloader for * Allwinner A80/A83T SoC * * Copyright (C) 2015-2019 Corentin Labbe <[email protected]> * * Core file which registers crypto algorithms supported by the SecuritySystem * * You could find a link for the datasheet in Documentation/arch/arm/sunxi.rst */ #include <crypto/engine.h> #include <crypto/internal/rng.h> #include <crypto/internal/skcipher.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #include "sun8i-ss.h" static const struct ss_variant ss_a80_variant = { .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, }, .alg_hash = { SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, SS_ID_NOTSUPP, }, .op_mode = { SS_OP_ECB, SS_OP_CBC, }, .ss_clks = { { "bus", 0, 300 * 1000 * 1000 }, { "mod", 0, 300 * 1000 * 1000 }, } }; static const struct ss_variant ss_a83t_variant = { .alg_cipher = { SS_ALG_AES, SS_ALG_DES, SS_ALG_3DES, }, .alg_hash = { SS_ALG_MD5, SS_ALG_SHA1, SS_ALG_SHA224, SS_ALG_SHA256, }, .op_mode = { SS_OP_ECB, SS_OP_CBC, }, .ss_clks = { { "bus", 0, 300 * 1000 * 1000 }, { "mod", 0, 300 * 1000 * 1000 }, } }; /* * sun8i_ss_get_engine_number() get the next channel slot * This is a simple round-robin way of getting the next channel */ int sun8i_ss_get_engine_number(struct sun8i_ss_dev *ss) { return atomic_inc_return(&ss->flow) % MAXFLOW; } int sun8i_ss_run_task(struct sun8i_ss_dev *ss, struct sun8i_cipher_req_ctx *rctx, const char *name) { int flow = rctx->flow; unsigned int ivlen = rctx->ivlen; u32 v = SS_START; int i; #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG ss->flows[flow].stat_req++; #endif /* choose between stream0/stream1 */ if (flow) v |= SS_FLOW1; else v |= SS_FLOW0; v |= rctx->op_mode; v |= rctx->method; if (rctx->op_dir) v |= SS_DECRYPTION; switch (rctx->keylen) { case 128 / 8: v |= SS_AES_128BITS << 7; break; case 192 / 8: v |= SS_AES_192BITS << 7; break; case 256 / 8: v |= SS_AES_256BITS << 7; break; } for (i = 0; i < MAX_SG; i++) { if (!rctx->t_dst[i].addr) break; mutex_lock(&ss->mlock); writel(rctx->p_key, ss->base + SS_KEY_ADR_REG); if (ivlen) { if (rctx->op_dir == SS_ENCRYPTION) { if (i == 0) writel(rctx->p_iv[0], ss->base + SS_IV_ADR_REG); else writel(rctx->t_dst[i - 1].addr + rctx->t_dst[i - 1].len * 4 - ivlen, ss->base + SS_IV_ADR_REG); } else { writel(rctx->p_iv[i], ss->base + SS_IV_ADR_REG); } } dev_dbg(ss->dev, "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x opmode=%x opdir=%x srclen=%d\n", i, flow, name, v, rctx->t_src[i].len, rctx->t_dst[i].len, rctx->method, rctx->op_mode, rctx->op_dir, rctx->t_src[i].len); writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG); writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG); writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG); reinit_completion(&ss->flows[flow].complete); ss->flows[flow].status = 0; wmb(); writel(v, ss->base + SS_CTL_REG); mutex_unlock(&ss->mlock); wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, msecs_to_jiffies(2000)); if (ss->flows[flow].status == 0) { dev_err(ss->dev, "DMA timeout for %s\n", name); return -EFAULT; } } return 0; } static irqreturn_t ss_irq_handler(int irq, void *data) { struct sun8i_ss_dev *ss = (struct sun8i_ss_dev *)data; int flow = 0; u32 p; p = readl(ss->base + SS_INT_STA_REG); for (flow = 0; flow < MAXFLOW; flow++) { if (p & (BIT(flow))) { writel(BIT(flow), ss->base + SS_INT_STA_REG); ss->flows[flow].status = 1; complete(&ss->flows[flow].complete); } } return IRQ_HANDLED; } static struct sun8i_ss_alg_template ss_algs[] = { { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ss_algo_id = SS_ID_CIPHER_AES, .ss_blockmode = SS_ID_OP_CBC, .alg.skcipher.base = { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-sun8i-ss", .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, .cra_init = sun8i_ss_cipher_init, .cra_exit = sun8i_ss_cipher_exit, }, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = sun8i_ss_aes_setkey, .encrypt = sun8i_ss_skencrypt, .decrypt = sun8i_ss_skdecrypt, }, .alg.skcipher.op = { .do_one_request = sun8i_ss_handle_cipher_request, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ss_algo_id = SS_ID_CIPHER_AES, .ss_blockmode = SS_ID_OP_ECB, .alg.skcipher.base = { .base = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-sun8i-ss", .cra_priority = 400, .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, .cra_init = sun8i_ss_cipher_init, .cra_exit = sun8i_ss_cipher_exit, }, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = sun8i_ss_aes_setkey, .encrypt = sun8i_ss_skencrypt, .decrypt = sun8i_ss_skdecrypt, }, .alg.skcipher.op = { .do_one_request = sun8i_ss_handle_cipher_request, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ss_algo_id = SS_ID_CIPHER_DES3, .ss_blockmode = SS_ID_OP_CBC, .alg.skcipher.base = { .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-des3-sun8i-ss", .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, .cra_init = sun8i_ss_cipher_init, .cra_exit = sun8i_ss_cipher_exit, }, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = sun8i_ss_des3_setkey, .encrypt = sun8i_ss_skencrypt, .decrypt = sun8i_ss_skdecrypt, }, .alg.skcipher.op = { .do_one_request = sun8i_ss_handle_cipher_request, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .ss_algo_id = SS_ID_CIPHER_DES3, .ss_blockmode = SS_ID_OP_ECB, .alg.skcipher.base = { .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "ecb-des3-sun8i-ss", .cra_priority = 400, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .cra_ctxsize = sizeof(struct sun8i_cipher_tfm_ctx), .cra_module = THIS_MODULE, .cra_alignmask = 0xf, .cra_init = sun8i_ss_cipher_init, .cra_exit = sun8i_ss_cipher_exit, }, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = sun8i_ss_des3_setkey, .encrypt = sun8i_ss_skencrypt, .decrypt = sun8i_ss_skdecrypt, }, .alg.skcipher.op = { .do_one_request = sun8i_ss_handle_cipher_request, }, }, #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_PRNG { .type = CRYPTO_ALG_TYPE_RNG, .alg.rng = { .base = { .cra_name = "stdrng", .cra_driver_name = "sun8i-ss-prng", .cra_priority = 300, .cra_ctxsize = sizeof(struct sun8i_ss_rng_tfm_ctx), .cra_module = THIS_MODULE, .cra_init = sun8i_ss_prng_init, .cra_exit = sun8i_ss_prng_exit, }, .generate = sun8i_ss_prng_generate, .seed = sun8i_ss_prng_seed, .seedsize = PRNG_SEED_SIZE, } }, #endif #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_HASH { .type = CRYPTO_ALG_TYPE_AHASH, .ss_algo_id = SS_ID_HASH_MD5, .alg.hash.base = { .init = sun8i_ss_hash_init, .update = sun8i_ss_hash_update, .final = sun8i_ss_hash_final, .finup = sun8i_ss_hash_finup, .digest = sun8i_ss_hash_digest, .export = sun8i_ss_hash_export, .import = sun8i_ss_hash_import, .init_tfm = sun8i_ss_hash_init_tfm, .exit_tfm = sun8i_ss_hash_exit_tfm, .halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct md5_state), .base = { .cra_name = "md5", .cra_driver_name = "md5-sun8i-ss", .cra_priority = 300, .cra_alignmask = 3, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = MD5_HMAC_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = sun8i_ss_hash_run, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ss_algo_id = SS_ID_HASH_SHA1, .alg.hash.base = { .init = sun8i_ss_hash_init, .update = sun8i_ss_hash_update, .final = sun8i_ss_hash_final, .finup = sun8i_ss_hash_finup, .digest = sun8i_ss_hash_digest, .export = sun8i_ss_hash_export, .import = sun8i_ss_hash_import, .init_tfm = sun8i_ss_hash_init_tfm, .exit_tfm = sun8i_ss_hash_exit_tfm, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name = "sha1-sun8i-ss", .cra_priority = 300, .cra_alignmask = 3, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = sun8i_ss_hash_run, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ss_algo_id = SS_ID_HASH_SHA224, .alg.hash.base = { .init = sun8i_ss_hash_init, .update = sun8i_ss_hash_update, .final = sun8i_ss_hash_final, .finup = sun8i_ss_hash_finup, .digest = sun8i_ss_hash_digest, .export = sun8i_ss_hash_export, .import = sun8i_ss_hash_import, .init_tfm = sun8i_ss_hash_init_tfm, .exit_tfm = sun8i_ss_hash_exit_tfm, .halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha224", .cra_driver_name = "sha224-sun8i-ss", .cra_priority = 300, .cra_alignmask = 3, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = sun8i_ss_hash_run, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ss_algo_id = SS_ID_HASH_SHA256, .alg.hash.base = { .init = sun8i_ss_hash_init, .update = sun8i_ss_hash_update, .final = sun8i_ss_hash_final, .finup = sun8i_ss_hash_finup, .digest = sun8i_ss_hash_digest, .export = sun8i_ss_hash_export, .import = sun8i_ss_hash_import, .init_tfm = sun8i_ss_hash_init_tfm, .exit_tfm = sun8i_ss_hash_exit_tfm, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct sha256_state), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-sun8i-ss", .cra_priority = 300, .cra_alignmask = 3, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = sun8i_ss_hash_run, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .ss_algo_id = SS_ID_HASH_SHA1, .alg.hash.base = { .init = sun8i_ss_hash_init, .update = sun8i_ss_hash_update, .final = sun8i_ss_hash_final, .finup = sun8i_ss_hash_finup, .digest = sun8i_ss_hash_digest, .export = sun8i_ss_hash_export, .import = sun8i_ss_hash_import, .init_tfm = sun8i_ss_hash_init_tfm, .exit_tfm = sun8i_ss_hash_exit_tfm, .setkey = sun8i_ss_hmac_setkey, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct sha1_state), .base = { .cra_name = "hmac(sha1)", .cra_driver_name = "hmac-sha1-sun8i-ss", .cra_priority = 300, .cra_alignmask = 3, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sun8i_ss_hash_tfm_ctx), .cra_module = THIS_MODULE, } } }, .alg.hash.op = { .do_one_request = sun8i_ss_hash_run, }, }, #endif }; static int sun8i_ss_debugfs_show(struct seq_file *seq, void *v) { struct sun8i_ss_dev *ss __maybe_unused = seq->private; unsigned int i; for (i = 0; i < MAXFLOW; i++) seq_printf(seq, "Channel %d: nreq %lu\n", i, #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG ss->flows[i].stat_req); #else 0ul); #endif for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { if (!ss_algs[i].ss) continue; switch (ss_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", ss_algs[i].alg.skcipher.base.base.cra_driver_name, ss_algs[i].alg.skcipher.base.base.cra_name, ss_algs[i].stat_req, ss_algs[i].stat_fb); seq_printf(seq, "\tLast fallback is: %s\n", ss_algs[i].fbname); seq_printf(seq, "\tFallback due to length: %lu\n", ss_algs[i].stat_fb_len); seq_printf(seq, "\tFallback due to SG length: %lu\n", ss_algs[i].stat_fb_sglen); seq_printf(seq, "\tFallback due to alignment: %lu\n", ss_algs[i].stat_fb_align); seq_printf(seq, "\tFallback due to SG numbers: %lu\n", ss_algs[i].stat_fb_sgnum); break; case CRYPTO_ALG_TYPE_RNG: seq_printf(seq, "%s %s reqs=%lu tsize=%lu\n", ss_algs[i].alg.rng.base.cra_driver_name, ss_algs[i].alg.rng.base.cra_name, ss_algs[i].stat_req, ss_algs[i].stat_bytes); break; case CRYPTO_ALG_TYPE_AHASH: seq_printf(seq, "%s %s reqs=%lu fallback=%lu\n", ss_algs[i].alg.hash.base.halg.base.cra_driver_name, ss_algs[i].alg.hash.base.halg.base.cra_name, ss_algs[i].stat_req, ss_algs[i].stat_fb); seq_printf(seq, "\tLast fallback is: %s\n", ss_algs[i].fbname); seq_printf(seq, "\tFallback due to length: %lu\n", ss_algs[i].stat_fb_len); seq_printf(seq, "\tFallback due to SG length: %lu\n", ss_algs[i].stat_fb_sglen); seq_printf(seq, "\tFallback due to alignment: %lu\n", ss_algs[i].stat_fb_align); seq_printf(seq, "\tFallback due to SG numbers: %lu\n", ss_algs[i].stat_fb_sgnum); break; } } return 0; } DEFINE_SHOW_ATTRIBUTE(sun8i_ss_debugfs); static void sun8i_ss_free_flows(struct sun8i_ss_dev *ss, int i) { while (i >= 0) { crypto_engine_exit(ss->flows[i].engine); i--; } } /* * Allocate the flow list structure */ static int allocate_flows(struct sun8i_ss_dev *ss) { int i, j, err; ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), GFP_KERNEL); if (!ss->flows) return -ENOMEM; for (i = 0; i < MAXFLOW; i++) { init_completion(&ss->flows[i].complete); ss->flows[i].biv = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, GFP_KERNEL); if (!ss->flows[i].biv) { err = -ENOMEM; goto error_engine; } for (j = 0; j < MAX_SG; j++) { ss->flows[i].iv[j] = devm_kmalloc(ss->dev, AES_BLOCK_SIZE, GFP_KERNEL); if (!ss->flows[i].iv[j]) { err = -ENOMEM; goto error_engine; } } /* the padding could be up to two block. */ ss->flows[i].pad = devm_kmalloc(ss->dev, MAX_PAD_SIZE, GFP_KERNEL); if (!ss->flows[i].pad) { err = -ENOMEM; goto error_engine; } ss->flows[i].result = devm_kmalloc(ss->dev, max(SHA256_DIGEST_SIZE, dma_get_cache_alignment()), GFP_KERNEL); if (!ss->flows[i].result) { err = -ENOMEM; goto error_engine; } ss->flows[i].engine = crypto_engine_alloc_init(ss->dev, true); if (!ss->flows[i].engine) { dev_err(ss->dev, "Cannot allocate engine\n"); i--; err = -ENOMEM; goto error_engine; } err = crypto_engine_start(ss->flows[i].engine); if (err) { dev_err(ss->dev, "Cannot start engine\n"); goto error_engine; } } return 0; error_engine: sun8i_ss_free_flows(ss, i); return err; } /* * Power management strategy: The device is suspended unless a TFM exists for * one of the algorithms proposed by this driver. */ static int sun8i_ss_pm_suspend(struct device *dev) { struct sun8i_ss_dev *ss = dev_get_drvdata(dev); int i; reset_control_assert(ss->reset); for (i = 0; i < SS_MAX_CLOCKS; i++) clk_disable_unprepare(ss->ssclks[i]); return 0; } static int sun8i_ss_pm_resume(struct device *dev) { struct sun8i_ss_dev *ss = dev_get_drvdata(dev); int err, i; for (i = 0; i < SS_MAX_CLOCKS; i++) { if (!ss->variant->ss_clks[i].name) continue; err = clk_prepare_enable(ss->ssclks[i]); if (err) { dev_err(ss->dev, "Cannot prepare_enable %s\n", ss->variant->ss_clks[i].name); goto error; } } err = reset_control_deassert(ss->reset); if (err) { dev_err(ss->dev, "Cannot deassert reset control\n"); goto error; } /* enable interrupts for all flows */ writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG); return 0; error: sun8i_ss_pm_suspend(dev); return err; } static const struct dev_pm_ops sun8i_ss_pm_ops = { SET_RUNTIME_PM_OPS(sun8i_ss_pm_suspend, sun8i_ss_pm_resume, NULL) }; static int sun8i_ss_pm_init(struct sun8i_ss_dev *ss) { int err; pm_runtime_use_autosuspend(ss->dev); pm_runtime_set_autosuspend_delay(ss->dev, 2000); err = pm_runtime_set_suspended(ss->dev); if (err) return err; pm_runtime_enable(ss->dev); return err; } static void sun8i_ss_pm_exit(struct sun8i_ss_dev *ss) { pm_runtime_disable(ss->dev); } static int sun8i_ss_register_algs(struct sun8i_ss_dev *ss) { int ss_method, err, id; unsigned int i; for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { ss_algs[i].ss = ss; switch (ss_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: id = ss_algs[i].ss_algo_id; ss_method = ss->variant->alg_cipher[id]; if (ss_method == SS_ID_NOTSUPP) { dev_info(ss->dev, "DEBUG: Algo of %s not supported\n", ss_algs[i].alg.skcipher.base.base.cra_name); ss_algs[i].ss = NULL; break; } id = ss_algs[i].ss_blockmode; ss_method = ss->variant->op_mode[id]; if (ss_method == SS_ID_NOTSUPP) { dev_info(ss->dev, "DEBUG: Blockmode of %s not supported\n", ss_algs[i].alg.skcipher.base.base.cra_name); ss_algs[i].ss = NULL; break; } dev_info(ss->dev, "DEBUG: Register %s\n", ss_algs[i].alg.skcipher.base.base.cra_name); err = crypto_engine_register_skcipher(&ss_algs[i].alg.skcipher); if (err) { dev_err(ss->dev, "Fail to register %s\n", ss_algs[i].alg.skcipher.base.base.cra_name); ss_algs[i].ss = NULL; return err; } break; case CRYPTO_ALG_TYPE_RNG: err = crypto_register_rng(&ss_algs[i].alg.rng); if (err) { dev_err(ss->dev, "Fail to register %s\n", ss_algs[i].alg.rng.base.cra_name); ss_algs[i].ss = NULL; } break; case CRYPTO_ALG_TYPE_AHASH: id = ss_algs[i].ss_algo_id; ss_method = ss->variant->alg_hash[id]; if (ss_method == SS_ID_NOTSUPP) { dev_info(ss->dev, "DEBUG: Algo of %s not supported\n", ss_algs[i].alg.hash.base.halg.base.cra_name); ss_algs[i].ss = NULL; break; } dev_info(ss->dev, "Register %s\n", ss_algs[i].alg.hash.base.halg.base.cra_name); err = crypto_engine_register_ahash(&ss_algs[i].alg.hash); if (err) { dev_err(ss->dev, "ERROR: Fail to register %s\n", ss_algs[i].alg.hash.base.halg.base.cra_name); ss_algs[i].ss = NULL; return err; } break; default: ss_algs[i].ss = NULL; dev_err(ss->dev, "ERROR: tried to register an unknown algo\n"); } } return 0; } static void sun8i_ss_unregister_algs(struct sun8i_ss_dev *ss) { unsigned int i; for (i = 0; i < ARRAY_SIZE(ss_algs); i++) { if (!ss_algs[i].ss) continue; switch (ss_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: dev_info(ss->dev, "Unregister %d %s\n", i, ss_algs[i].alg.skcipher.base.base.cra_name); crypto_engine_unregister_skcipher(&ss_algs[i].alg.skcipher); break; case CRYPTO_ALG_TYPE_RNG: dev_info(ss->dev, "Unregister %d %s\n", i, ss_algs[i].alg.rng.base.cra_name); crypto_unregister_rng(&ss_algs[i].alg.rng); break; case CRYPTO_ALG_TYPE_AHASH: dev_info(ss->dev, "Unregister %d %s\n", i, ss_algs[i].alg.hash.base.halg.base.cra_name); crypto_engine_unregister_ahash(&ss_algs[i].alg.hash); break; } } } static int sun8i_ss_get_clks(struct sun8i_ss_dev *ss) { unsigned long cr; int err, i; for (i = 0; i < SS_MAX_CLOCKS; i++) { if (!ss->variant->ss_clks[i].name) continue; ss->ssclks[i] = devm_clk_get(ss->dev, ss->variant->ss_clks[i].name); if (IS_ERR(ss->ssclks[i])) { err = PTR_ERR(ss->ssclks[i]); dev_err(ss->dev, "Cannot get %s SS clock err=%d\n", ss->variant->ss_clks[i].name, err); return err; } cr = clk_get_rate(ss->ssclks[i]); if (!cr) return -EINVAL; if (ss->variant->ss_clks[i].freq > 0 && cr != ss->variant->ss_clks[i].freq) { dev_info(ss->dev, "Set %s clock to %lu (%lu Mhz) from %lu (%lu Mhz)\n", ss->variant->ss_clks[i].name, ss->variant->ss_clks[i].freq, ss->variant->ss_clks[i].freq / 1000000, cr, cr / 1000000); err = clk_set_rate(ss->ssclks[i], ss->variant->ss_clks[i].freq); if (err) dev_err(ss->dev, "Fail to set %s clk speed to %lu hz\n", ss->variant->ss_clks[i].name, ss->variant->ss_clks[i].freq); } if (ss->variant->ss_clks[i].max_freq > 0 && cr > ss->variant->ss_clks[i].max_freq) dev_warn(ss->dev, "Frequency for %s (%lu hz) is higher than datasheet's recommendation (%lu hz)", ss->variant->ss_clks[i].name, cr, ss->variant->ss_clks[i].max_freq); } return 0; } static int sun8i_ss_probe(struct platform_device *pdev) { struct sun8i_ss_dev *ss; int err, irq; u32 v; ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL); if (!ss) return -ENOMEM; ss->dev = &pdev->dev; platform_set_drvdata(pdev, ss); ss->variant = of_device_get_match_data(&pdev->dev); if (!ss->variant) { dev_err(&pdev->dev, "Missing Crypto Engine variant\n"); return -EINVAL; } ss->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ss->base)) return PTR_ERR(ss->base); err = sun8i_ss_get_clks(ss); if (err) return err; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ss->reset = devm_reset_control_get(&pdev->dev, NULL); if (IS_ERR(ss->reset)) return dev_err_probe(&pdev->dev, PTR_ERR(ss->reset), "No reset control found\n"); mutex_init(&ss->mlock); err = allocate_flows(ss); if (err) return err; err = sun8i_ss_pm_init(ss); if (err) goto error_pm; err = devm_request_irq(&pdev->dev, irq, ss_irq_handler, 0, "sun8i-ss", ss); if (err) { dev_err(ss->dev, "Cannot request SecuritySystem IRQ (err=%d)\n", err); goto error_irq; } err = sun8i_ss_register_algs(ss); if (err) goto error_alg; err = pm_runtime_resume_and_get(ss->dev); if (err < 0) goto error_alg; v = readl(ss->base + SS_CTL_REG); v >>= SS_DIE_ID_SHIFT; v &= SS_DIE_ID_MASK; dev_info(&pdev->dev, "Security System Die ID %x\n", v); pm_runtime_put_sync(ss->dev); if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { struct dentry *dbgfs_dir __maybe_unused; struct dentry *dbgfs_stats __maybe_unused; /* Ignore error of debugfs */ dbgfs_dir = debugfs_create_dir("sun8i-ss", NULL); dbgfs_stats = debugfs_create_file("stats", 0444, dbgfs_dir, ss, &sun8i_ss_debugfs_fops); #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG ss->dbgfs_dir = dbgfs_dir; ss->dbgfs_stats = dbgfs_stats; #endif } return 0; error_alg: sun8i_ss_unregister_algs(ss); error_irq: sun8i_ss_pm_exit(ss); error_pm: sun8i_ss_free_flows(ss, MAXFLOW - 1); return err; } static int sun8i_ss_remove(struct platform_device *pdev) { struct sun8i_ss_dev *ss = platform_get_drvdata(pdev); sun8i_ss_unregister_algs(ss); #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG debugfs_remove_recursive(ss->dbgfs_dir); #endif sun8i_ss_free_flows(ss, MAXFLOW - 1); sun8i_ss_pm_exit(ss); return 0; } static const struct of_device_id sun8i_ss_crypto_of_match_table[] = { { .compatible = "allwinner,sun8i-a83t-crypto", .data = &ss_a83t_variant }, { .compatible = "allwinner,sun9i-a80-crypto", .data = &ss_a80_variant }, {} }; MODULE_DEVICE_TABLE(of, sun8i_ss_crypto_of_match_table); static struct platform_driver sun8i_ss_driver = { .probe = sun8i_ss_probe, .remove = sun8i_ss_remove, .driver = { .name = "sun8i-ss", .pm = &sun8i_ss_pm_ops, .of_match_table = sun8i_ss_crypto_of_match_table, }, }; module_platform_driver(sun8i_ss_driver); MODULE_DESCRIPTION("Allwinner SecuritySystem cryptographic offloader"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corentin Labbe <[email protected]>");
linux-master
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-core.c
// SPDX-License-Identifier: GPL-2.0 /* * sun8i-ss-hash.c - hardware cryptographic offloader for * Allwinner A80/A83T SoC * * Copyright (C) 2015-2020 Corentin Labbe <[email protected]> * * This file add support for MD5 and SHA1/SHA224/SHA256. * * You could find the datasheet in Documentation/arch/arm/sunxi.rst */ #include <crypto/hmac.h> #include <crypto/internal/hash.h> #include <crypto/md5.h> #include <crypto/scatterwalk.h> #include <crypto/sha1.h> #include <crypto/sha2.h> #include <linux/bottom_half.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/pm_runtime.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/string.h> #include "sun8i-ss.h" static int sun8i_ss_hashkey(struct sun8i_ss_hash_tfm_ctx *tfmctx, const u8 *key, unsigned int keylen) { struct crypto_shash *xtfm; struct shash_desc *sdesc; size_t len; int ret = 0; xtfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(xtfm)) return PTR_ERR(xtfm); len = sizeof(*sdesc) + crypto_shash_descsize(xtfm); sdesc = kmalloc(len, GFP_KERNEL); if (!sdesc) { ret = -ENOMEM; goto err_hashkey_sdesc; } sdesc->tfm = xtfm; ret = crypto_shash_init(sdesc); if (ret) { dev_err(tfmctx->ss->dev, "shash init error ret=%d\n", ret); goto err_hashkey; } ret = crypto_shash_finup(sdesc, key, keylen, tfmctx->key); if (ret) dev_err(tfmctx->ss->dev, "shash finup error\n"); err_hashkey: kfree(sdesc); err_hashkey_sdesc: crypto_free_shash(xtfm); return ret; } int sun8i_ss_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(ahash); int digestsize, i; int bs = crypto_ahash_blocksize(ahash); int ret; digestsize = crypto_ahash_digestsize(ahash); if (keylen > bs) { ret = sun8i_ss_hashkey(tfmctx, key, keylen); if (ret) return ret; tfmctx->keylen = digestsize; } else { tfmctx->keylen = keylen; memcpy(tfmctx->key, key, keylen); } tfmctx->ipad = kzalloc(bs, GFP_KERNEL); if (!tfmctx->ipad) return -ENOMEM; tfmctx->opad = kzalloc(bs, GFP_KERNEL); if (!tfmctx->opad) { ret = -ENOMEM; goto err_opad; } memset(tfmctx->key + tfmctx->keylen, 0, bs - tfmctx->keylen); memcpy(tfmctx->ipad, tfmctx->key, tfmctx->keylen); memcpy(tfmctx->opad, tfmctx->key, tfmctx->keylen); for (i = 0; i < bs; i++) { tfmctx->ipad[i] ^= HMAC_IPAD_VALUE; tfmctx->opad[i] ^= HMAC_OPAD_VALUE; } ret = crypto_ahash_setkey(tfmctx->fallback_tfm, key, keylen); if (!ret) return 0; memzero_explicit(tfmctx->key, keylen); kfree_sensitive(tfmctx->opad); err_opad: kfree_sensitive(tfmctx->ipad); return ret; } int sun8i_ss_hash_init_tfm(struct crypto_ahash *tfm) { struct sun8i_ss_hash_tfm_ctx *op = crypto_ahash_ctx(tfm); struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ss_alg_template *algt; int err; algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); op->ss = algt->ss; /* FALLBACK */ op->fallback_tfm = crypto_alloc_ahash(crypto_ahash_alg_name(tfm), 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(op->fallback_tfm)) { dev_err(algt->ss->dev, "Fallback driver could no be loaded\n"); return PTR_ERR(op->fallback_tfm); } crypto_ahash_set_statesize(tfm, crypto_ahash_statesize(op->fallback_tfm)); crypto_ahash_set_reqsize(tfm, sizeof(struct sun8i_ss_hash_reqctx) + crypto_ahash_reqsize(op->fallback_tfm)); memcpy(algt->fbname, crypto_ahash_driver_name(op->fallback_tfm), CRYPTO_MAX_ALG_NAME); err = pm_runtime_get_sync(op->ss->dev); if (err < 0) goto error_pm; return 0; error_pm: pm_runtime_put_noidle(op->ss->dev); crypto_free_ahash(op->fallback_tfm); return err; } void sun8i_ss_hash_exit_tfm(struct crypto_ahash *tfm) { struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); kfree_sensitive(tfmctx->ipad); kfree_sensitive(tfmctx->opad); crypto_free_ahash(tfmctx->fallback_tfm); pm_runtime_put_sync_suspend(tfmctx->ss->dev); } int sun8i_ss_hash_init(struct ahash_request *areq) { struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); memset(rctx, 0, sizeof(struct sun8i_ss_hash_reqctx)); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_ahash_init(&rctx->fallback_req); } int sun8i_ss_hash_export(struct ahash_request *areq, void *out) { struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_ahash_export(&rctx->fallback_req, out); } int sun8i_ss_hash_import(struct ahash_request *areq, const void *in) { struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_ahash_import(&rctx->fallback_req, in); } int sun8i_ss_hash_final(struct ahash_request *areq) { struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.result = areq->result; if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ss_alg_template *algt __maybe_unused; algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG algt->stat_fb++; #endif } return crypto_ahash_final(&rctx->fallback_req); } int sun8i_ss_hash_update(struct ahash_request *areq) { struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.nbytes = areq->nbytes; rctx->fallback_req.src = areq->src; return crypto_ahash_update(&rctx->fallback_req); } int sun8i_ss_hash_finup(struct ahash_request *areq) { struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.nbytes = areq->nbytes; rctx->fallback_req.src = areq->src; rctx->fallback_req.result = areq->result; if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ss_alg_template *algt __maybe_unused; algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG algt->stat_fb++; #endif } return crypto_ahash_finup(&rctx->fallback_req); } static int sun8i_ss_hash_digest_fb(struct ahash_request *areq) { struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); ahash_request_set_tfm(&rctx->fallback_req, tfmctx->fallback_tfm); rctx->fallback_req.base.flags = areq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; rctx->fallback_req.nbytes = areq->nbytes; rctx->fallback_req.src = areq->src; rctx->fallback_req.result = areq->result; if (IS_ENABLED(CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG)) { struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ss_alg_template *algt __maybe_unused; algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG algt->stat_fb++; #endif } return crypto_ahash_digest(&rctx->fallback_req); } static int sun8i_ss_run_hash_task(struct sun8i_ss_dev *ss, struct sun8i_ss_hash_reqctx *rctx, const char *name) { int flow = rctx->flow; u32 v = SS_START; int i; #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG ss->flows[flow].stat_req++; #endif /* choose between stream0/stream1 */ if (flow) v |= SS_FLOW1; else v |= SS_FLOW0; v |= rctx->method; for (i = 0; i < MAX_SG; i++) { if (!rctx->t_dst[i].addr) break; mutex_lock(&ss->mlock); if (i > 0) { v |= BIT(17); writel(rctx->t_dst[i - 1].addr, ss->base + SS_KEY_ADR_REG); writel(rctx->t_dst[i - 1].addr, ss->base + SS_IV_ADR_REG); } dev_dbg(ss->dev, "Processing SG %d on flow %d %s ctl=%x %d to %d method=%x src=%x dst=%x\n", i, flow, name, v, rctx->t_src[i].len, rctx->t_dst[i].len, rctx->method, rctx->t_src[i].addr, rctx->t_dst[i].addr); writel(rctx->t_src[i].addr, ss->base + SS_SRC_ADR_REG); writel(rctx->t_dst[i].addr, ss->base + SS_DST_ADR_REG); writel(rctx->t_src[i].len, ss->base + SS_LEN_ADR_REG); writel(BIT(0) | BIT(1), ss->base + SS_INT_CTL_REG); reinit_completion(&ss->flows[flow].complete); ss->flows[flow].status = 0; wmb(); writel(v, ss->base + SS_CTL_REG); mutex_unlock(&ss->mlock); wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, msecs_to_jiffies(2000)); if (ss->flows[flow].status == 0) { dev_err(ss->dev, "DMA timeout for %s\n", name); return -EFAULT; } } return 0; } static bool sun8i_ss_hash_need_fallback(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ss_alg_template *algt; struct scatterlist *sg; algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); if (areq->nbytes == 0) { algt->stat_fb_len++; return true; } if (areq->nbytes >= MAX_PAD_SIZE - 64) { algt->stat_fb_len++; return true; } /* we need to reserve one SG for the padding one */ if (sg_nents(areq->src) > MAX_SG - 1) { algt->stat_fb_sgnum++; return true; } sg = areq->src; while (sg) { /* SS can operate hash only on full block size * since SS support only MD5,sha1,sha224 and sha256, blocksize * is always 64 */ /* Only the last block could be bounced to the pad buffer */ if (sg->length % 64 && sg_next(sg)) { algt->stat_fb_sglen++; return true; } if (!IS_ALIGNED(sg->offset, sizeof(u32))) { algt->stat_fb_align++; return true; } if (sg->length % 4) { algt->stat_fb_sglen++; return true; } sg = sg_next(sg); } return false; } int sun8i_ss_hash_digest(struct ahash_request *areq) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ss_alg_template *algt; struct sun8i_ss_dev *ss; struct crypto_engine *engine; int e; if (sun8i_ss_hash_need_fallback(areq)) return sun8i_ss_hash_digest_fb(areq); algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); ss = algt->ss; e = sun8i_ss_get_engine_number(ss); rctx->flow = e; engine = ss->flows[e].engine; return crypto_transfer_hash_request_to_engine(engine, areq); } static u64 hash_pad(__le32 *buf, unsigned int bufsize, u64 padi, u64 byte_count, bool le, int bs) { u64 fill, min_fill, j, k; __be64 *bebits; __le64 *lebits; j = padi; buf[j++] = cpu_to_le32(0x80); if (bs == 64) { fill = 64 - (byte_count % 64); min_fill = 2 * sizeof(u32) + sizeof(u32); } else { fill = 128 - (byte_count % 128); min_fill = 4 * sizeof(u32) + sizeof(u32); } if (fill < min_fill) fill += bs; k = j; j += (fill - min_fill) / sizeof(u32); if (j * 4 > bufsize) { pr_err("%s OVERFLOW %llu\n", __func__, j); return 0; } for (; k < j; k++) buf[k] = 0; if (le) { /* MD5 */ lebits = (__le64 *)&buf[j]; *lebits = cpu_to_le64(byte_count << 3); j += 2; } else { if (bs == 64) { /* sha1 sha224 sha256 */ bebits = (__be64 *)&buf[j]; *bebits = cpu_to_be64(byte_count << 3); j += 2; } else { /* sha384 sha512*/ bebits = (__be64 *)&buf[j]; *bebits = cpu_to_be64(byte_count >> 61); j += 2; bebits = (__be64 *)&buf[j]; *bebits = cpu_to_be64(byte_count << 3); j += 2; } } if (j * 4 > bufsize) { pr_err("%s OVERFLOW %llu\n", __func__, j); return 0; } return j; } /* sun8i_ss_hash_run - run an ahash request * Send the data of the request to the SS along with an extra SG with padding */ int sun8i_ss_hash_run(struct crypto_engine *engine, void *breq) { struct ahash_request *areq = container_of(breq, struct ahash_request, base); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct sun8i_ss_hash_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm); struct sun8i_ss_hash_reqctx *rctx = ahash_request_ctx(areq); struct ahash_alg *alg = crypto_ahash_alg(tfm); struct sun8i_ss_alg_template *algt; struct sun8i_ss_dev *ss; struct scatterlist *sg; int bs = crypto_ahash_blocksize(tfm); int nr_sgs, err, digestsize; unsigned int len; u64 byte_count; void *pad, *result; int j, i, k, todo; dma_addr_t addr_res, addr_pad, addr_xpad; __le32 *bf; /* HMAC step: * 0: normal hashing * 1: IPAD * 2: OPAD */ int hmac = 0; algt = container_of(alg, struct sun8i_ss_alg_template, alg.hash.base); ss = algt->ss; digestsize = crypto_ahash_digestsize(tfm); if (digestsize == SHA224_DIGEST_SIZE) digestsize = SHA256_DIGEST_SIZE; result = ss->flows[rctx->flow].result; pad = ss->flows[rctx->flow].pad; bf = (__le32 *)pad; for (i = 0; i < MAX_SG; i++) { rctx->t_dst[i].addr = 0; rctx->t_dst[i].len = 0; } #ifdef CONFIG_CRYPTO_DEV_SUN8I_SS_DEBUG algt->stat_req++; #endif rctx->method = ss->variant->alg_hash[algt->ss_algo_id]; nr_sgs = dma_map_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); if (nr_sgs <= 0 || nr_sgs > MAX_SG) { dev_err(ss->dev, "Invalid sg number %d\n", nr_sgs); err = -EINVAL; goto theend; } addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE); if (dma_mapping_error(ss->dev, addr_res)) { dev_err(ss->dev, "DMA map dest\n"); err = -EINVAL; goto err_dma_result; } j = 0; len = areq->nbytes; sg = areq->src; i = 0; while (len > 0 && sg) { if (sg_dma_len(sg) == 0) { sg = sg_next(sg); continue; } todo = min(len, sg_dma_len(sg)); /* only the last SG could be with a size not modulo64 */ if (todo % 64 == 0) { rctx->t_src[i].addr = sg_dma_address(sg); rctx->t_src[i].len = todo / 4; rctx->t_dst[i].addr = addr_res; rctx->t_dst[i].len = digestsize / 4; len -= todo; } else { scatterwalk_map_and_copy(bf, sg, 0, todo, 0); j += todo / 4; len -= todo; } sg = sg_next(sg); i++; } if (len > 0) { dev_err(ss->dev, "remaining len %d\n", len); err = -EINVAL; goto theend; } if (j > 0) i--; retry: byte_count = areq->nbytes; if (tfmctx->keylen && hmac == 0) { hmac = 1; /* shift all SG one slot up, to free slot 0 for IPAD */ for (k = 6; k >= 0; k--) { rctx->t_src[k + 1].addr = rctx->t_src[k].addr; rctx->t_src[k + 1].len = rctx->t_src[k].len; rctx->t_dst[k + 1].addr = rctx->t_dst[k].addr; rctx->t_dst[k + 1].len = rctx->t_dst[k].len; } addr_xpad = dma_map_single(ss->dev, tfmctx->ipad, bs, DMA_TO_DEVICE); err = dma_mapping_error(ss->dev, addr_xpad); if (err) { dev_err(ss->dev, "Fail to create DMA mapping of ipad\n"); goto err_dma_xpad; } rctx->t_src[0].addr = addr_xpad; rctx->t_src[0].len = bs / 4; rctx->t_dst[0].addr = addr_res; rctx->t_dst[0].len = digestsize / 4; i++; byte_count = areq->nbytes + bs; } if (tfmctx->keylen && hmac == 2) { for (i = 0; i < MAX_SG; i++) { rctx->t_src[i].addr = 0; rctx->t_src[i].len = 0; rctx->t_dst[i].addr = 0; rctx->t_dst[i].len = 0; } addr_res = dma_map_single(ss->dev, result, digestsize, DMA_FROM_DEVICE); if (dma_mapping_error(ss->dev, addr_res)) { dev_err(ss->dev, "Fail to create DMA mapping of result\n"); err = -EINVAL; goto err_dma_result; } addr_xpad = dma_map_single(ss->dev, tfmctx->opad, bs, DMA_TO_DEVICE); err = dma_mapping_error(ss->dev, addr_xpad); if (err) { dev_err(ss->dev, "Fail to create DMA mapping of opad\n"); goto err_dma_xpad; } rctx->t_src[0].addr = addr_xpad; rctx->t_src[0].len = bs / 4; memcpy(bf, result, digestsize); j = digestsize / 4; i = 1; byte_count = digestsize + bs; rctx->t_dst[0].addr = addr_res; rctx->t_dst[0].len = digestsize / 4; } switch (algt->ss_algo_id) { case SS_ID_HASH_MD5: j = hash_pad(bf, 4096, j, byte_count, true, bs); break; case SS_ID_HASH_SHA1: case SS_ID_HASH_SHA224: case SS_ID_HASH_SHA256: j = hash_pad(bf, 4096, j, byte_count, false, bs); break; } if (!j) { err = -EINVAL; goto theend; } addr_pad = dma_map_single(ss->dev, pad, j * 4, DMA_TO_DEVICE); if (dma_mapping_error(ss->dev, addr_pad)) { dev_err(ss->dev, "DMA error on padding SG\n"); err = -EINVAL; goto err_dma_pad; } rctx->t_src[i].addr = addr_pad; rctx->t_src[i].len = j; rctx->t_dst[i].addr = addr_res; rctx->t_dst[i].len = digestsize / 4; err = sun8i_ss_run_hash_task(ss, rctx, crypto_tfm_alg_name(areq->base.tfm)); /* * mini helper for checking dma map/unmap * flow start for hmac = 0 (and HMAC = 1) * HMAC = 0 * MAP src * MAP res * * retry: * if hmac then hmac = 1 * MAP xpad (ipad) * if hmac == 2 * MAP res * MAP xpad (opad) * MAP pad * ACTION! * UNMAP pad * if hmac * UNMAP xpad * UNMAP res * if hmac < 2 * UNMAP SRC * * if hmac = 1 then hmac = 2 goto retry */ dma_unmap_single(ss->dev, addr_pad, j * 4, DMA_TO_DEVICE); err_dma_pad: if (hmac > 0) dma_unmap_single(ss->dev, addr_xpad, bs, DMA_TO_DEVICE); err_dma_xpad: dma_unmap_single(ss->dev, addr_res, digestsize, DMA_FROM_DEVICE); err_dma_result: if (hmac < 2) dma_unmap_sg(ss->dev, areq->src, sg_nents(areq->src), DMA_TO_DEVICE); if (hmac == 1 && !err) { hmac = 2; goto retry; } if (!err) memcpy(areq->result, result, crypto_ahash_digestsize(tfm)); theend: local_bh_disable(); crypto_finalize_hash_request(engine, breq, err); local_bh_enable(); return 0; }
linux-master
drivers/crypto/allwinner/sun8i-ss/sun8i-ss-hash.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for IBM PowerNV compression accelerator * * Copyright (C) 2015 Dan Streetman, IBM Corp */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "nx-842.h" #include <linux/timer.h> #include <asm/prom.h> #include <asm/icswx.h> #include <asm/vas.h> #include <asm/reg.h> #include <asm/opal-api.h> #include <asm/opal.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Dan Streetman <[email protected]>"); MODULE_DESCRIPTION("H/W Compression driver for IBM PowerNV processors"); MODULE_ALIAS_CRYPTO("842"); MODULE_ALIAS_CRYPTO("842-nx"); #define WORKMEM_ALIGN (CRB_ALIGN) #define CSB_WAIT_MAX (5000) /* ms */ #define VAS_RETRIES (10) struct nx842_workmem { /* Below fields must be properly aligned */ struct coprocessor_request_block crb; /* CRB_ALIGN align */ struct data_descriptor_entry ddl_in[DDL_LEN_MAX]; /* DDE_ALIGN align */ struct data_descriptor_entry ddl_out[DDL_LEN_MAX]; /* DDE_ALIGN align */ /* Above fields must be properly aligned */ ktime_t start; char padding[WORKMEM_ALIGN]; /* unused, to allow alignment */ } __packed __aligned(WORKMEM_ALIGN); struct nx_coproc { unsigned int chip_id; unsigned int ct; /* Can be 842 or GZIP high/normal*/ unsigned int ci; /* Coprocessor instance, used with icswx */ struct { struct vas_window *rxwin; int id; } vas; struct list_head list; }; /* * Send the request to NX engine on the chip for the corresponding CPU * where the process is executing. Use with VAS function. */ static DEFINE_PER_CPU(struct vas_window *, cpu_txwin); /* no cpu hotplug on powernv, so this list never changes after init */ static LIST_HEAD(nx_coprocs); static unsigned int nx842_ct; /* used in icswx function */ /* * Using same values as in skiboot or coprocessor type representing * in NX workbook. */ #define NX_CT_GZIP (2) /* on P9 and later */ #define NX_CT_842 (3) static int (*nx842_powernv_exec)(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlenp, void *workmem, int fc); /* * setup_indirect_dde - Setup an indirect DDE * * The DDE is setup with the DDE count, byte count, and address of * first direct DDE in the list. */ static void setup_indirect_dde(struct data_descriptor_entry *dde, struct data_descriptor_entry *ddl, unsigned int dde_count, unsigned int byte_count) { dde->flags = 0; dde->count = dde_count; dde->index = 0; dde->length = cpu_to_be32(byte_count); dde->address = cpu_to_be64(nx842_get_pa(ddl)); } /* * setup_direct_dde - Setup single DDE from buffer * * The DDE is setup with the buffer and length. The buffer must be properly * aligned. The used length is returned. * Returns: * N Successfully set up DDE with N bytes */ static unsigned int setup_direct_dde(struct data_descriptor_entry *dde, unsigned long pa, unsigned int len) { unsigned int l = min_t(unsigned int, len, LEN_ON_PAGE(pa)); dde->flags = 0; dde->count = 0; dde->index = 0; dde->length = cpu_to_be32(l); dde->address = cpu_to_be64(pa); return l; } /* * setup_ddl - Setup DDL from buffer * * Returns: * 0 Successfully set up DDL */ static int setup_ddl(struct data_descriptor_entry *dde, struct data_descriptor_entry *ddl, unsigned char *buf, unsigned int len, bool in) { unsigned long pa = nx842_get_pa(buf); int i, ret, total_len = len; if (!IS_ALIGNED(pa, DDE_BUFFER_ALIGN)) { pr_debug("%s buffer pa 0x%lx not 0x%x-byte aligned\n", in ? "input" : "output", pa, DDE_BUFFER_ALIGN); return -EINVAL; } /* only need to check last mult; since buffer must be * DDE_BUFFER_ALIGN aligned, and that is a multiple of * DDE_BUFFER_SIZE_MULT, and pre-last page DDE buffers * are guaranteed a multiple of DDE_BUFFER_SIZE_MULT. */ if (len % DDE_BUFFER_LAST_MULT) { pr_debug("%s buffer len 0x%x not a multiple of 0x%x\n", in ? "input" : "output", len, DDE_BUFFER_LAST_MULT); if (in) return -EINVAL; len = round_down(len, DDE_BUFFER_LAST_MULT); } /* use a single direct DDE */ if (len <= LEN_ON_PAGE(pa)) { ret = setup_direct_dde(dde, pa, len); WARN_ON(ret < len); return 0; } /* use the DDL */ for (i = 0; i < DDL_LEN_MAX && len > 0; i++) { ret = setup_direct_dde(&ddl[i], pa, len); buf += ret; len -= ret; pa = nx842_get_pa(buf); } if (len > 0) { pr_debug("0x%x total %s bytes 0x%x too many for DDL.\n", total_len, in ? "input" : "output", len); if (in) return -EMSGSIZE; total_len -= len; } setup_indirect_dde(dde, ddl, i, total_len); return 0; } #define CSB_ERR(csb, msg, ...) \ pr_err("ERROR: " msg " : %02x %02x %02x %02x %08x\n", \ ##__VA_ARGS__, (csb)->flags, \ (csb)->cs, (csb)->cc, (csb)->ce, \ be32_to_cpu((csb)->count)) #define CSB_ERR_ADDR(csb, msg, ...) \ CSB_ERR(csb, msg " at %lx", ##__VA_ARGS__, \ (unsigned long)be64_to_cpu((csb)->address)) static int wait_for_csb(struct nx842_workmem *wmem, struct coprocessor_status_block *csb) { ktime_t start = wmem->start, now = ktime_get(); ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); while (!(READ_ONCE(csb->flags) & CSB_V)) { cpu_relax(); now = ktime_get(); if (ktime_after(now, timeout)) break; } /* hw has updated csb and output buffer */ barrier(); /* check CSB flags */ if (!(csb->flags & CSB_V)) { CSB_ERR(csb, "CSB still not valid after %ld us, giving up", (long)ktime_us_delta(now, start)); return -ETIMEDOUT; } if (csb->flags & CSB_F) { CSB_ERR(csb, "Invalid CSB format"); return -EPROTO; } if (csb->flags & CSB_CH) { CSB_ERR(csb, "Invalid CSB chaining state"); return -EPROTO; } /* verify CSB completion sequence is 0 */ if (csb->cs) { CSB_ERR(csb, "Invalid CSB completion sequence"); return -EPROTO; } /* check CSB Completion Code */ switch (csb->cc) { /* no error */ case CSB_CC_SUCCESS: break; case CSB_CC_TPBC_GT_SPBC: /* not an error, but the compressed data is * larger than the uncompressed data :( */ break; /* input data errors */ case CSB_CC_OPERAND_OVERLAP: /* input and output buffers overlap */ CSB_ERR(csb, "Operand Overlap error"); return -EINVAL; case CSB_CC_INVALID_OPERAND: CSB_ERR(csb, "Invalid operand"); return -EINVAL; case CSB_CC_NOSPC: /* output buffer too small */ return -ENOSPC; case CSB_CC_ABORT: CSB_ERR(csb, "Function aborted"); return -EINTR; case CSB_CC_CRC_MISMATCH: CSB_ERR(csb, "CRC mismatch"); return -EINVAL; case CSB_CC_TEMPL_INVALID: CSB_ERR(csb, "Compressed data template invalid"); return -EINVAL; case CSB_CC_TEMPL_OVERFLOW: CSB_ERR(csb, "Compressed data template shows data past end"); return -EINVAL; case CSB_CC_EXCEED_BYTE_COUNT: /* P9 or later */ /* * DDE byte count exceeds the limit specified in Maximum * byte count register. */ CSB_ERR(csb, "DDE byte count exceeds the limit"); return -EINVAL; /* these should not happen */ case CSB_CC_INVALID_ALIGN: /* setup_ddl should have detected this */ CSB_ERR_ADDR(csb, "Invalid alignment"); return -EINVAL; case CSB_CC_DATA_LENGTH: /* setup_ddl should have detected this */ CSB_ERR(csb, "Invalid data length"); return -EINVAL; case CSB_CC_WR_TRANSLATION: case CSB_CC_TRANSLATION: case CSB_CC_TRANSLATION_DUP1: case CSB_CC_TRANSLATION_DUP2: case CSB_CC_TRANSLATION_DUP3: case CSB_CC_TRANSLATION_DUP4: case CSB_CC_TRANSLATION_DUP5: case CSB_CC_TRANSLATION_DUP6: /* should not happen, we use physical addrs */ CSB_ERR_ADDR(csb, "Translation error"); return -EPROTO; case CSB_CC_WR_PROTECTION: case CSB_CC_PROTECTION: case CSB_CC_PROTECTION_DUP1: case CSB_CC_PROTECTION_DUP2: case CSB_CC_PROTECTION_DUP3: case CSB_CC_PROTECTION_DUP4: case CSB_CC_PROTECTION_DUP5: case CSB_CC_PROTECTION_DUP6: /* should not happen, we use physical addrs */ CSB_ERR_ADDR(csb, "Protection error"); return -EPROTO; case CSB_CC_PRIVILEGE: /* shouldn't happen, we're in HYP mode */ CSB_ERR(csb, "Insufficient Privilege error"); return -EPROTO; case CSB_CC_EXCESSIVE_DDE: /* shouldn't happen, setup_ddl doesn't use many dde's */ CSB_ERR(csb, "Too many DDEs in DDL"); return -EINVAL; case CSB_CC_TRANSPORT: case CSB_CC_INVALID_CRB: /* P9 or later */ /* shouldn't happen, we setup CRB correctly */ CSB_ERR(csb, "Invalid CRB"); return -EINVAL; case CSB_CC_INVALID_DDE: /* P9 or later */ /* * shouldn't happen, setup_direct/indirect_dde creates * DDE right */ CSB_ERR(csb, "Invalid DDE"); return -EINVAL; case CSB_CC_SEGMENTED_DDL: /* shouldn't happen, setup_ddl creates DDL right */ CSB_ERR(csb, "Segmented DDL error"); return -EINVAL; case CSB_CC_DDE_OVERFLOW: /* shouldn't happen, setup_ddl creates DDL right */ CSB_ERR(csb, "DDE overflow error"); return -EINVAL; case CSB_CC_SESSION: /* should not happen with ICSWX */ CSB_ERR(csb, "Session violation error"); return -EPROTO; case CSB_CC_CHAIN: /* should not happen, we don't use chained CRBs */ CSB_ERR(csb, "Chained CRB error"); return -EPROTO; case CSB_CC_SEQUENCE: /* should not happen, we don't use chained CRBs */ CSB_ERR(csb, "CRB sequence number error"); return -EPROTO; case CSB_CC_UNKNOWN_CODE: CSB_ERR(csb, "Unknown subfunction code"); return -EPROTO; /* hardware errors */ case CSB_CC_RD_EXTERNAL: case CSB_CC_RD_EXTERNAL_DUP1: case CSB_CC_RD_EXTERNAL_DUP2: case CSB_CC_RD_EXTERNAL_DUP3: CSB_ERR_ADDR(csb, "Read error outside coprocessor"); return -EPROTO; case CSB_CC_WR_EXTERNAL: CSB_ERR_ADDR(csb, "Write error outside coprocessor"); return -EPROTO; case CSB_CC_INTERNAL: CSB_ERR(csb, "Internal error in coprocessor"); return -EPROTO; case CSB_CC_PROVISION: CSB_ERR(csb, "Storage provision error"); return -EPROTO; case CSB_CC_HW: CSB_ERR(csb, "Correctable hardware error"); return -EPROTO; case CSB_CC_HW_EXPIRED_TIMER: /* P9 or later */ CSB_ERR(csb, "Job did not finish within allowed time"); return -EPROTO; default: CSB_ERR(csb, "Invalid CC %d", csb->cc); return -EPROTO; } /* check Completion Extension state */ if (csb->ce & CSB_CE_TERMINATION) { CSB_ERR(csb, "CSB request was terminated"); return -EPROTO; } if (csb->ce & CSB_CE_INCOMPLETE) { CSB_ERR(csb, "CSB request not complete"); return -EPROTO; } if (!(csb->ce & CSB_CE_TPBC)) { CSB_ERR(csb, "TPBC not provided, unknown target length"); return -EPROTO; } /* successful completion */ pr_debug_ratelimited("Processed %u bytes in %lu us\n", be32_to_cpu(csb->count), (unsigned long)ktime_us_delta(now, start)); return 0; } static int nx842_config_crb(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int outlen, struct nx842_workmem *wmem) { struct coprocessor_request_block *crb; struct coprocessor_status_block *csb; u64 csb_addr; int ret; crb = &wmem->crb; csb = &crb->csb; /* Clear any previous values */ memset(crb, 0, sizeof(*crb)); /* set up DDLs */ ret = setup_ddl(&crb->source, wmem->ddl_in, (unsigned char *)in, inlen, true); if (ret) return ret; ret = setup_ddl(&crb->target, wmem->ddl_out, out, outlen, false); if (ret) return ret; /* set up CRB's CSB addr */ csb_addr = nx842_get_pa(csb) & CRB_CSB_ADDRESS; csb_addr |= CRB_CSB_AT; /* Addrs are phys */ crb->csb_addr = cpu_to_be64(csb_addr); return 0; } /** * nx842_exec_icswx - compress/decompress data using the 842 algorithm * * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems. * This compresses or decompresses the provided input buffer into the provided * output buffer. * * Upon return from this function @outlen contains the length of the * output data. If there is an error then @outlen will be 0 and an * error will be specified by the return code from this function. * * The @workmem buffer should only be used by one function call at a time. * * @in: input buffer pointer * @inlen: input buffer size * @out: output buffer pointer * @outlenp: output buffer size pointer * @workmem: working memory buffer pointer, size determined by * nx842_powernv_driver.workmem_size * @fc: function code, see CCW Function Codes in nx-842.h * * Returns: * 0 Success, output of length @outlenp stored in the buffer at @out * -ENODEV Hardware unavailable * -ENOSPC Output buffer is to small * -EMSGSIZE Input buffer too large * -EINVAL buffer constraints do not fix nx842_constraints * -EPROTO hardware error during operation * -ETIMEDOUT hardware did not complete operation in reasonable time * -EINTR operation was aborted */ static int nx842_exec_icswx(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlenp, void *workmem, int fc) { struct coprocessor_request_block *crb; struct coprocessor_status_block *csb; struct nx842_workmem *wmem; int ret; u32 ccw; unsigned int outlen = *outlenp; wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN); *outlenp = 0; /* shoudn't happen, we don't load without a coproc */ if (!nx842_ct) { pr_err_ratelimited("coprocessor CT is 0"); return -ENODEV; } ret = nx842_config_crb(in, inlen, out, outlen, wmem); if (ret) return ret; crb = &wmem->crb; csb = &crb->csb; /* set up CCW */ ccw = 0; ccw = SET_FIELD(CCW_CT, ccw, nx842_ct); ccw = SET_FIELD(CCW_CI_842, ccw, 0); /* use 0 for hw auto-selection */ ccw = SET_FIELD(CCW_FC_842, ccw, fc); wmem->start = ktime_get(); /* do ICSWX */ ret = icswx(cpu_to_be32(ccw), crb); pr_debug_ratelimited("icswx CR %x ccw %x crb->ccw %x\n", ret, (unsigned int)ccw, (unsigned int)be32_to_cpu(crb->ccw)); /* * NX842 coprocessor sets 3rd bit in CR register with XER[S0]. * XER[S0] is the integer summary overflow bit which is nothing * to do NX. Since this bit can be set with other return values, * mask this bit. */ ret &= ~ICSWX_XERS0; switch (ret) { case ICSWX_INITIATED: ret = wait_for_csb(wmem, csb); break; case ICSWX_BUSY: pr_debug_ratelimited("842 Coprocessor busy\n"); ret = -EBUSY; break; case ICSWX_REJECTED: pr_err_ratelimited("ICSWX rejected\n"); ret = -EPROTO; break; } if (!ret) *outlenp = be32_to_cpu(csb->count); return ret; } /** * nx842_exec_vas - compress/decompress data using the 842 algorithm * * (De)compression provided by the NX842 coprocessor on IBM PowerNV systems. * This compresses or decompresses the provided input buffer into the provided * output buffer. * * Upon return from this function @outlen contains the length of the * output data. If there is an error then @outlen will be 0 and an * error will be specified by the return code from this function. * * The @workmem buffer should only be used by one function call at a time. * * @in: input buffer pointer * @inlen: input buffer size * @out: output buffer pointer * @outlenp: output buffer size pointer * @workmem: working memory buffer pointer, size determined by * nx842_powernv_driver.workmem_size * @fc: function code, see CCW Function Codes in nx-842.h * * Returns: * 0 Success, output of length @outlenp stored in the buffer * at @out * -ENODEV Hardware unavailable * -ENOSPC Output buffer is to small * -EMSGSIZE Input buffer too large * -EINVAL buffer constraints do not fix nx842_constraints * -EPROTO hardware error during operation * -ETIMEDOUT hardware did not complete operation in reasonable time * -EINTR operation was aborted */ static int nx842_exec_vas(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlenp, void *workmem, int fc) { struct coprocessor_request_block *crb; struct coprocessor_status_block *csb; struct nx842_workmem *wmem; struct vas_window *txwin; int ret, i = 0; u32 ccw; unsigned int outlen = *outlenp; wmem = PTR_ALIGN(workmem, WORKMEM_ALIGN); *outlenp = 0; crb = &wmem->crb; csb = &crb->csb; ret = nx842_config_crb(in, inlen, out, outlen, wmem); if (ret) return ret; ccw = 0; ccw = SET_FIELD(CCW_FC_842, ccw, fc); crb->ccw = cpu_to_be32(ccw); do { wmem->start = ktime_get(); preempt_disable(); txwin = this_cpu_read(cpu_txwin); /* * VAS copy CRB into L2 cache. Refer <asm/vas.h>. * @crb and @offset. */ vas_copy_crb(crb, 0); /* * VAS paste previously copied CRB to NX. * @txwin, @offset and @last (must be true). */ ret = vas_paste_crb(txwin, 0, 1); preempt_enable(); /* * Retry copy/paste function for VAS failures. */ } while (ret && (i++ < VAS_RETRIES)); if (ret) { pr_err_ratelimited("VAS copy/paste failed\n"); return ret; } ret = wait_for_csb(wmem, csb); if (!ret) *outlenp = be32_to_cpu(csb->count); return ret; } /** * nx842_powernv_compress - Compress data using the 842 algorithm * * Compression provided by the NX842 coprocessor on IBM PowerNV systems. * The input buffer is compressed and the result is stored in the * provided output buffer. * * Upon return from this function @outlen contains the length of the * compressed data. If there is an error then @outlen will be 0 and an * error will be specified by the return code from this function. * * @in: input buffer pointer * @inlen: input buffer size * @out: output buffer pointer * @outlenp: output buffer size pointer * @wmem: working memory buffer pointer, size determined by * nx842_powernv_driver.workmem_size * * Returns: see @nx842_powernv_exec() */ static int nx842_powernv_compress(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlenp, void *wmem) { return nx842_powernv_exec(in, inlen, out, outlenp, wmem, CCW_FC_842_COMP_CRC); } /** * nx842_powernv_decompress - Decompress data using the 842 algorithm * * Decompression provided by the NX842 coprocessor on IBM PowerNV systems. * The input buffer is decompressed and the result is stored in the * provided output buffer. * * Upon return from this function @outlen contains the length of the * decompressed data. If there is an error then @outlen will be 0 and an * error will be specified by the return code from this function. * * @in: input buffer pointer * @inlen: input buffer size * @out: output buffer pointer * @outlenp: output buffer size pointer * @wmem: working memory buffer pointer, size determined by * nx842_powernv_driver.workmem_size * * Returns: see @nx842_powernv_exec() */ static int nx842_powernv_decompress(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlenp, void *wmem) { return nx842_powernv_exec(in, inlen, out, outlenp, wmem, CCW_FC_842_DECOMP_CRC); } static inline void nx_add_coprocs_list(struct nx_coproc *coproc, int chipid) { coproc->chip_id = chipid; INIT_LIST_HEAD(&coproc->list); list_add(&coproc->list, &nx_coprocs); } static struct vas_window *nx_alloc_txwin(struct nx_coproc *coproc) { struct vas_window *txwin = NULL; struct vas_tx_win_attr txattr; /* * Kernel requests will be high priority. So open send * windows only for high priority RxFIFO entries. */ vas_init_tx_win_attr(&txattr, coproc->ct); txattr.lpid = 0; /* lpid is 0 for kernel requests */ /* * Open a VAS send window which is used to send request to NX. */ txwin = vas_tx_win_open(coproc->vas.id, coproc->ct, &txattr); if (IS_ERR(txwin)) pr_err("ibm,nx-842: Can not open TX window: %ld\n", PTR_ERR(txwin)); return txwin; } /* * Identify chip ID for each CPU, open send wndow for the corresponding NX * engine and save txwin in percpu cpu_txwin. * cpu_txwin is used in copy/paste operation for each compression / * decompression request. */ static int nx_open_percpu_txwins(void) { struct nx_coproc *coproc, *n; unsigned int i, chip_id; for_each_possible_cpu(i) { struct vas_window *txwin = NULL; chip_id = cpu_to_chip_id(i); list_for_each_entry_safe(coproc, n, &nx_coprocs, list) { /* * Kernel requests use only high priority FIFOs. So * open send windows for these FIFOs. * GZIP is not supported in kernel right now. */ if (coproc->ct != VAS_COP_TYPE_842_HIPRI) continue; if (coproc->chip_id == chip_id) { txwin = nx_alloc_txwin(coproc); if (IS_ERR(txwin)) return PTR_ERR(txwin); per_cpu(cpu_txwin, i) = txwin; break; } } if (!per_cpu(cpu_txwin, i)) { /* shouldn't happen, Each chip will have NX engine */ pr_err("NX engine is not available for CPU %d\n", i); return -EINVAL; } } return 0; } static int __init nx_set_ct(struct nx_coproc *coproc, const char *priority, int high, int normal) { if (!strcmp(priority, "High")) coproc->ct = high; else if (!strcmp(priority, "Normal")) coproc->ct = normal; else { pr_err("Invalid RxFIFO priority value\n"); return -EINVAL; } return 0; } static int __init vas_cfg_coproc_info(struct device_node *dn, int chip_id, int vasid, int type, int *ct) { struct vas_window *rxwin = NULL; struct vas_rx_win_attr rxattr; u32 lpid, pid, tid, fifo_size; struct nx_coproc *coproc; u64 rx_fifo; const char *priority; int ret; ret = of_property_read_u64(dn, "rx-fifo-address", &rx_fifo); if (ret) { pr_err("Missing rx-fifo-address property\n"); return ret; } ret = of_property_read_u32(dn, "rx-fifo-size", &fifo_size); if (ret) { pr_err("Missing rx-fifo-size property\n"); return ret; } ret = of_property_read_u32(dn, "lpid", &lpid); if (ret) { pr_err("Missing lpid property\n"); return ret; } ret = of_property_read_u32(dn, "pid", &pid); if (ret) { pr_err("Missing pid property\n"); return ret; } ret = of_property_read_u32(dn, "tid", &tid); if (ret) { pr_err("Missing tid property\n"); return ret; } ret = of_property_read_string(dn, "priority", &priority); if (ret) { pr_err("Missing priority property\n"); return ret; } coproc = kzalloc(sizeof(*coproc), GFP_KERNEL); if (!coproc) return -ENOMEM; if (type == NX_CT_842) ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_842_HIPRI, VAS_COP_TYPE_842); else if (type == NX_CT_GZIP) ret = nx_set_ct(coproc, priority, VAS_COP_TYPE_GZIP_HIPRI, VAS_COP_TYPE_GZIP); if (ret) goto err_out; vas_init_rx_win_attr(&rxattr, coproc->ct); rxattr.rx_fifo = rx_fifo; rxattr.rx_fifo_size = fifo_size; rxattr.lnotify_lpid = lpid; rxattr.lnotify_pid = pid; rxattr.lnotify_tid = tid; /* * Maximum RX window credits can not be more than #CRBs in * RxFIFO. Otherwise, can get checkstop if RxFIFO overruns. */ rxattr.wcreds_max = fifo_size / CRB_SIZE; /* * Open a VAS receice window which is used to configure RxFIFO * for NX. */ rxwin = vas_rx_win_open(vasid, coproc->ct, &rxattr); if (IS_ERR(rxwin)) { ret = PTR_ERR(rxwin); pr_err("setting RxFIFO with VAS failed: %d\n", ret); goto err_out; } coproc->vas.rxwin = rxwin; coproc->vas.id = vasid; nx_add_coprocs_list(coproc, chip_id); /* * (lpid, pid, tid) combination has to be unique for each * coprocessor instance in the system. So to make it * unique, skiboot uses coprocessor type such as 842 or * GZIP for pid and provides this value to kernel in pid * device-tree property. */ *ct = pid; return 0; err_out: kfree(coproc); return ret; } static int __init nx_coproc_init(int chip_id, int ct_842, int ct_gzip) { int ret = 0; if (opal_check_token(OPAL_NX_COPROC_INIT)) { ret = opal_nx_coproc_init(chip_id, ct_842); if (!ret) ret = opal_nx_coproc_init(chip_id, ct_gzip); if (ret) { ret = opal_error_code(ret); pr_err("Failed to initialize NX for chip(%d): %d\n", chip_id, ret); } } else pr_warn("Firmware doesn't support NX initialization\n"); return ret; } static int __init find_nx_device_tree(struct device_node *dn, int chip_id, int vasid, int type, char *devname, int *ct) { int ret = 0; if (of_device_is_compatible(dn, devname)) { ret = vas_cfg_coproc_info(dn, chip_id, vasid, type, ct); if (ret) of_node_put(dn); } return ret; } static int __init nx_powernv_probe_vas(struct device_node *pn) { int chip_id, vasid, ret = 0; int ct_842 = 0, ct_gzip = 0; struct device_node *dn; chip_id = of_get_ibm_chip_id(pn); if (chip_id < 0) { pr_err("ibm,chip-id missing\n"); return -EINVAL; } vasid = chip_to_vas_id(chip_id); if (vasid < 0) { pr_err("Unable to map chip_id %d to vasid\n", chip_id); return -EINVAL; } for_each_child_of_node(pn, dn) { ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_842, "ibm,p9-nx-842", &ct_842); if (!ret) ret = find_nx_device_tree(dn, chip_id, vasid, NX_CT_GZIP, "ibm,p9-nx-gzip", &ct_gzip); if (ret) { of_node_put(dn); return ret; } } if (!ct_842 || !ct_gzip) { pr_err("NX FIFO nodes are missing\n"); return -EINVAL; } /* * Initialize NX instance for both high and normal priority FIFOs. */ ret = nx_coproc_init(chip_id, ct_842, ct_gzip); return ret; } static int __init nx842_powernv_probe(struct device_node *dn) { struct nx_coproc *coproc; unsigned int ct, ci; int chip_id; chip_id = of_get_ibm_chip_id(dn); if (chip_id < 0) { pr_err("ibm,chip-id missing\n"); return -EINVAL; } if (of_property_read_u32(dn, "ibm,842-coprocessor-type", &ct)) { pr_err("ibm,842-coprocessor-type missing\n"); return -EINVAL; } if (of_property_read_u32(dn, "ibm,842-coprocessor-instance", &ci)) { pr_err("ibm,842-coprocessor-instance missing\n"); return -EINVAL; } coproc = kzalloc(sizeof(*coproc), GFP_KERNEL); if (!coproc) return -ENOMEM; coproc->ct = ct; coproc->ci = ci; nx_add_coprocs_list(coproc, chip_id); pr_info("coprocessor found on chip %d, CT %d CI %d\n", chip_id, ct, ci); if (!nx842_ct) nx842_ct = ct; else if (nx842_ct != ct) pr_err("NX842 chip %d, CT %d != first found CT %d\n", chip_id, ct, nx842_ct); return 0; } static void nx_delete_coprocs(void) { struct nx_coproc *coproc, *n; struct vas_window *txwin; int i; /* * close percpu txwins that are opened for the corresponding coproc. */ for_each_possible_cpu(i) { txwin = per_cpu(cpu_txwin, i); if (txwin) vas_win_close(txwin); per_cpu(cpu_txwin, i) = NULL; } list_for_each_entry_safe(coproc, n, &nx_coprocs, list) { if (coproc->vas.rxwin) vas_win_close(coproc->vas.rxwin); list_del(&coproc->list); kfree(coproc); } } static struct nx842_constraints nx842_powernv_constraints = { .alignment = DDE_BUFFER_ALIGN, .multiple = DDE_BUFFER_LAST_MULT, .minimum = DDE_BUFFER_LAST_MULT, .maximum = (DDL_LEN_MAX - 1) * PAGE_SIZE, }; static struct nx842_driver nx842_powernv_driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, .workmem_size = sizeof(struct nx842_workmem), .constraints = &nx842_powernv_constraints, .compress = nx842_powernv_compress, .decompress = nx842_powernv_decompress, }; static int nx842_powernv_crypto_init(struct crypto_tfm *tfm) { return nx842_crypto_init(tfm, &nx842_powernv_driver); } static struct crypto_alg nx842_powernv_alg = { .cra_name = "842", .cra_driver_name = "842-nx", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct nx842_crypto_ctx), .cra_module = THIS_MODULE, .cra_init = nx842_powernv_crypto_init, .cra_exit = nx842_crypto_exit, .cra_u = { .compress = { .coa_compress = nx842_crypto_compress, .coa_decompress = nx842_crypto_decompress } } }; static __init int nx_compress_powernv_init(void) { struct device_node *dn; int ret; /* verify workmem size/align restrictions */ BUILD_BUG_ON(WORKMEM_ALIGN % CRB_ALIGN); BUILD_BUG_ON(CRB_ALIGN % DDE_ALIGN); BUILD_BUG_ON(CRB_SIZE % DDE_ALIGN); /* verify buffer size/align restrictions */ BUILD_BUG_ON(PAGE_SIZE % DDE_BUFFER_ALIGN); BUILD_BUG_ON(DDE_BUFFER_ALIGN % DDE_BUFFER_SIZE_MULT); BUILD_BUG_ON(DDE_BUFFER_SIZE_MULT % DDE_BUFFER_LAST_MULT); for_each_compatible_node(dn, NULL, "ibm,power9-nx") { ret = nx_powernv_probe_vas(dn); if (ret) { nx_delete_coprocs(); of_node_put(dn); return ret; } } if (list_empty(&nx_coprocs)) { for_each_compatible_node(dn, NULL, "ibm,power-nx") nx842_powernv_probe(dn); if (!nx842_ct) return -ENODEV; nx842_powernv_exec = nx842_exec_icswx; } else { /* * Register VAS user space API for NX GZIP so * that user space can use GZIP engine. * Using high FIFO priority for kernel requests and * normal FIFO priority is assigned for userspace. * 842 compression is supported only in kernel. */ ret = vas_register_api_powernv(THIS_MODULE, VAS_COP_TYPE_GZIP, "nx-gzip"); /* * GZIP is not supported in kernel right now. * So open tx windows only for 842. */ if (!ret) ret = nx_open_percpu_txwins(); if (ret) { nx_delete_coprocs(); return ret; } nx842_powernv_exec = nx842_exec_vas; } ret = crypto_register_alg(&nx842_powernv_alg); if (ret) { nx_delete_coprocs(); return ret; } return 0; } module_init(nx_compress_powernv_init); static void __exit nx_compress_powernv_exit(void) { /* * GZIP engine is supported only in power9 or later and nx842_ct * is used on power8 (icswx). * VAS API for NX GZIP is registered during init for user space * use. So delete this API use for GZIP engine. */ if (!nx842_ct) vas_unregister_api_powernv(); crypto_unregister_alg(&nx842_powernv_alg); nx_delete_coprocs(); } module_exit(nx_compress_powernv_exit);
linux-master
drivers/crypto/nx/nx-common-powernv.c
// SPDX-License-Identifier: GPL-2.0-only /* * debugfs routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. * * Author: Kent Yoder <[email protected]> */ #include <linux/device.h> #include <linux/kobject.h> #include <linux/string.h> #include <linux/debugfs.h> #include <linux/module.h> #include <linux/init.h> #include <linux/crypto.h> #include <crypto/hash.h> #include <asm/vio.h> #include "nx_csbcpb.h" #include "nx.h" #ifdef CONFIG_DEBUG_FS /* * debugfs * * For documentation on these attributes, please see: * * Documentation/ABI/testing/debugfs-pfo-nx-crypto */ void nx_debugfs_init(struct nx_crypto_driver *drv) { struct dentry *root; root = debugfs_create_dir(NX_NAME, NULL); drv->dfs_root = root; debugfs_create_u32("aes_ops", S_IRUSR | S_IRGRP | S_IROTH, root, &drv->stats.aes_ops.counter); debugfs_create_u32("sha256_ops", S_IRUSR | S_IRGRP | S_IROTH, root, &drv->stats.sha256_ops.counter); debugfs_create_u32("sha512_ops", S_IRUSR | S_IRGRP | S_IROTH, root, &drv->stats.sha512_ops.counter); debugfs_create_u64("aes_bytes", S_IRUSR | S_IRGRP | S_IROTH, root, &drv->stats.aes_bytes.counter); debugfs_create_u64("sha256_bytes", S_IRUSR | S_IRGRP | S_IROTH, root, &drv->stats.sha256_bytes.counter); debugfs_create_u64("sha512_bytes", S_IRUSR | S_IRGRP | S_IROTH, root, &drv->stats.sha512_bytes.counter); debugfs_create_u32("errors", S_IRUSR | S_IRGRP | S_IROTH, root, &drv->stats.errors.counter); debugfs_create_u32("last_error", S_IRUSR | S_IRGRP | S_IROTH, root, &drv->stats.last_error.counter); debugfs_create_u32("last_error_pid", S_IRUSR | S_IRGRP | S_IROTH, root, &drv->stats.last_error_pid.counter); } void nx_debugfs_fini(struct nx_crypto_driver *drv) { debugfs_remove_recursive(drv->dfs_root); } #endif
linux-master
drivers/crypto/nx/nx_debugfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * Routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. * * Author: Kent Yoder <[email protected]> */ #include <crypto/internal/aead.h> #include <crypto/internal/hash.h> #include <crypto/aes.h> #include <crypto/sha2.h> #include <crypto/algapi.h> #include <crypto/scatterwalk.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/scatterlist.h> #include <linux/device.h> #include <linux/of.h> #include <asm/hvcall.h> #include <asm/vio.h> #include "nx_csbcpb.h" #include "nx.h" /** * nx_hcall_sync - make an H_COP_OP hcall for the passed in op structure * * @nx_ctx: the crypto context handle * @op: PFO operation struct to pass in * @may_sleep: flag indicating the request can sleep * * Make the hcall, retrying while the hardware is busy. If we cannot yield * the thread, limit the number of retries to 10 here. */ int nx_hcall_sync(struct nx_crypto_ctx *nx_ctx, struct vio_pfo_op *op, u32 may_sleep) { int rc, retries = 10; struct vio_dev *viodev = nx_driver.viodev; atomic_inc(&(nx_ctx->stats->sync_ops)); do { rc = vio_h_cop_sync(viodev, op); } while (rc == -EBUSY && !may_sleep && retries--); if (rc) { dev_dbg(&viodev->dev, "vio_h_cop_sync failed: rc: %d " "hcall rc: %ld\n", rc, op->hcall_err); atomic_inc(&(nx_ctx->stats->errors)); atomic_set(&(nx_ctx->stats->last_error), op->hcall_err); atomic_set(&(nx_ctx->stats->last_error_pid), current->pid); } return rc; } /** * nx_build_sg_list - build an NX scatter list describing a single buffer * * @sg_head: pointer to the first scatter list element to build * @start_addr: pointer to the linear buffer * @len: length of the data at @start_addr * @sgmax: the largest number of scatter list elements we're allowed to create * * This function will start writing nx_sg elements at @sg_head and keep * writing them until all of the data from @start_addr is described or * until sgmax elements have been written. Scatter list elements will be * created such that none of the elements describes a buffer that crosses a 4K * boundary. */ struct nx_sg *nx_build_sg_list(struct nx_sg *sg_head, u8 *start_addr, unsigned int *len, u32 sgmax) { unsigned int sg_len = 0; struct nx_sg *sg; u64 sg_addr = (u64)start_addr; u64 end_addr; /* determine the start and end for this address range - slightly * different if this is in VMALLOC_REGION */ if (is_vmalloc_addr(start_addr)) sg_addr = page_to_phys(vmalloc_to_page(start_addr)) + offset_in_page(sg_addr); else sg_addr = __pa(sg_addr); end_addr = sg_addr + *len; /* each iteration will write one struct nx_sg element and add the * length of data described by that element to sg_len. Once @len bytes * have been described (or @sgmax elements have been written), the * loop ends. min_t is used to ensure @end_addr falls on the same page * as sg_addr, if not, we need to create another nx_sg element for the * data on the next page. * * Also when using vmalloc'ed data, every time that a system page * boundary is crossed the physical address needs to be re-calculated. */ for (sg = sg_head; sg_len < *len; sg++) { u64 next_page; sg->addr = sg_addr; sg_addr = min_t(u64, NX_PAGE_NUM(sg_addr + NX_PAGE_SIZE), end_addr); next_page = (sg->addr & PAGE_MASK) + PAGE_SIZE; sg->len = min_t(u64, sg_addr, next_page) - sg->addr; sg_len += sg->len; if (sg_addr >= next_page && is_vmalloc_addr(start_addr + sg_len)) { sg_addr = page_to_phys(vmalloc_to_page( start_addr + sg_len)); end_addr = sg_addr + *len - sg_len; } if ((sg - sg_head) == sgmax) { pr_err("nx: scatter/gather list overflow, pid: %d\n", current->pid); sg++; break; } } *len = sg_len; /* return the moved sg_head pointer */ return sg; } /** * nx_walk_and_build - walk a linux scatterlist and build an nx scatterlist * * @nx_dst: pointer to the first nx_sg element to write * @sglen: max number of nx_sg entries we're allowed to write * @sg_src: pointer to the source linux scatterlist to walk * @start: number of bytes to fast-forward past at the beginning of @sg_src * @src_len: number of bytes to walk in @sg_src */ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst, unsigned int sglen, struct scatterlist *sg_src, unsigned int start, unsigned int *src_len) { struct scatter_walk walk; struct nx_sg *nx_sg = nx_dst; unsigned int n, offset = 0, len = *src_len; char *dst; /* we need to fast forward through @start bytes first */ for (;;) { scatterwalk_start(&walk, sg_src); if (start < offset + sg_src->length) break; offset += sg_src->length; sg_src = sg_next(sg_src); } /* start - offset is the number of bytes to advance in the scatterlist * element we're currently looking at */ scatterwalk_advance(&walk, start - offset); while (len && (nx_sg - nx_dst) < sglen) { n = scatterwalk_clamp(&walk, len); if (!n) { /* In cases where we have scatterlist chain sg_next * handles with it properly */ scatterwalk_start(&walk, sg_next(walk.sg)); n = scatterwalk_clamp(&walk, len); } dst = scatterwalk_map(&walk); nx_sg = nx_build_sg_list(nx_sg, dst, &n, sglen - (nx_sg - nx_dst)); len -= n; scatterwalk_unmap(dst); scatterwalk_advance(&walk, n); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, len); } /* update to_process */ *src_len -= len; /* return the moved destination pointer */ return nx_sg; } /** * trim_sg_list - ensures the bound in sg list. * @sg: sg list head * @end: sg lisg end * @delta: is the amount we need to crop in order to bound the list. * @nbytes: length of data in the scatterlists or data length - whichever * is greater. */ static long int trim_sg_list(struct nx_sg *sg, struct nx_sg *end, unsigned int delta, unsigned int *nbytes) { long int oplen; long int data_back; unsigned int is_delta = delta; while (delta && end > sg) { struct nx_sg *last = end - 1; if (last->len > delta) { last->len -= delta; delta = 0; } else { end--; delta -= last->len; } } /* There are cases where we need to crop list in order to make it * a block size multiple, but we also need to align data. In order to * that we need to calculate how much we need to put back to be * processed */ oplen = (sg - end) * sizeof(struct nx_sg); if (is_delta) { data_back = (abs(oplen) / AES_BLOCK_SIZE) * sg->len; data_back = *nbytes - (data_back & ~(AES_BLOCK_SIZE - 1)); *nbytes -= data_back; } return oplen; } /** * nx_build_sg_lists - walk the input scatterlists and build arrays of NX * scatterlists based on them. * * @nx_ctx: NX crypto context for the lists we're building * @iv: iv data, if the algorithm requires it * @dst: destination scatterlist * @src: source scatterlist * @nbytes: length of data described in the scatterlists * @offset: number of bytes to fast-forward past at the beginning of * scatterlists. * @oiv: destination for the iv data, if the algorithm requires it * * This is common code shared by all the AES algorithms. It uses the crypto * scatterlist walk routines to traverse input and output scatterlists, building * corresponding NX scatterlists */ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, const u8 *iv, struct scatterlist *dst, struct scatterlist *src, unsigned int *nbytes, unsigned int offset, u8 *oiv) { unsigned int delta = 0; unsigned int total = *nbytes; struct nx_sg *nx_insg = nx_ctx->in_sg; struct nx_sg *nx_outsg = nx_ctx->out_sg; unsigned int max_sg_len; max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_driver.of.max_sg_len/sizeof(struct nx_sg)); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); if (oiv) memcpy(oiv, iv, AES_BLOCK_SIZE); *nbytes = min_t(u64, *nbytes, nx_ctx->ap->databytelen); nx_outsg = nx_walk_and_build(nx_outsg, max_sg_len, dst, offset, nbytes); nx_insg = nx_walk_and_build(nx_insg, max_sg_len, src, offset, nbytes); if (*nbytes < total) delta = *nbytes - (*nbytes & ~(AES_BLOCK_SIZE - 1)); /* these lengths should be negative, which will indicate to phyp that * the input and output parameters are scatterlists, not linear * buffers */ nx_ctx->op.inlen = trim_sg_list(nx_ctx->in_sg, nx_insg, delta, nbytes); nx_ctx->op.outlen = trim_sg_list(nx_ctx->out_sg, nx_outsg, delta, nbytes); return 0; } /** * nx_ctx_init - initialize an nx_ctx's vio_pfo_op struct * * @nx_ctx: the nx context to initialize * @function: the function code for the op */ void nx_ctx_init(struct nx_crypto_ctx *nx_ctx, unsigned int function) { spin_lock_init(&nx_ctx->lock); memset(nx_ctx->kmem, 0, nx_ctx->kmem_len); nx_ctx->csbcpb->csb.valid |= NX_CSB_VALID_BIT; nx_ctx->op.flags = function; nx_ctx->op.csbcpb = __pa(nx_ctx->csbcpb); nx_ctx->op.in = __pa(nx_ctx->in_sg); nx_ctx->op.out = __pa(nx_ctx->out_sg); if (nx_ctx->csbcpb_aead) { nx_ctx->csbcpb_aead->csb.valid |= NX_CSB_VALID_BIT; nx_ctx->op_aead.flags = function; nx_ctx->op_aead.csbcpb = __pa(nx_ctx->csbcpb_aead); nx_ctx->op_aead.in = __pa(nx_ctx->in_sg); nx_ctx->op_aead.out = __pa(nx_ctx->out_sg); } } static void nx_of_update_status(struct device *dev, struct property *p, struct nx_of *props) { if (!strncmp(p->value, "okay", p->length)) { props->status = NX_WAITING; props->flags |= NX_OF_FLAG_STATUS_SET; } else { dev_info(dev, "%s: status '%s' is not 'okay'\n", __func__, (char *)p->value); } } static void nx_of_update_sglen(struct device *dev, struct property *p, struct nx_of *props) { if (p->length != sizeof(props->max_sg_len)) { dev_err(dev, "%s: unexpected format for " "ibm,max-sg-len property\n", __func__); dev_dbg(dev, "%s: ibm,max-sg-len is %d bytes " "long, expected %zd bytes\n", __func__, p->length, sizeof(props->max_sg_len)); return; } props->max_sg_len = *(u32 *)p->value; props->flags |= NX_OF_FLAG_MAXSGLEN_SET; } static void nx_of_update_msc(struct device *dev, struct property *p, struct nx_of *props) { struct msc_triplet *trip; struct max_sync_cop *msc; unsigned int bytes_so_far, i, lenp; msc = (struct max_sync_cop *)p->value; lenp = p->length; /* You can't tell if the data read in for this property is sane by its * size alone. This is because there are sizes embedded in the data * structure. The best we can do is check lengths as we parse and bail * as soon as a length error is detected. */ bytes_so_far = 0; while ((bytes_so_far + sizeof(struct max_sync_cop)) <= lenp) { bytes_so_far += sizeof(struct max_sync_cop); trip = msc->trip; for (i = 0; ((bytes_so_far + sizeof(struct msc_triplet)) <= lenp) && i < msc->triplets; i++) { if (msc->fc >= NX_MAX_FC || msc->mode >= NX_MAX_MODE) { dev_err(dev, "unknown function code/mode " "combo: %d/%d (ignored)\n", msc->fc, msc->mode); goto next_loop; } if (!trip->sglen || trip->databytelen < NX_PAGE_SIZE) { dev_warn(dev, "bogus sglen/databytelen: " "%u/%u (ignored)\n", trip->sglen, trip->databytelen); goto next_loop; } switch (trip->keybitlen) { case 128: case 160: props->ap[msc->fc][msc->mode][0].databytelen = trip->databytelen; props->ap[msc->fc][msc->mode][0].sglen = trip->sglen; break; case 192: props->ap[msc->fc][msc->mode][1].databytelen = trip->databytelen; props->ap[msc->fc][msc->mode][1].sglen = trip->sglen; break; case 256: if (msc->fc == NX_FC_AES) { props->ap[msc->fc][msc->mode][2]. databytelen = trip->databytelen; props->ap[msc->fc][msc->mode][2].sglen = trip->sglen; } else if (msc->fc == NX_FC_AES_HMAC || msc->fc == NX_FC_SHA) { props->ap[msc->fc][msc->mode][1]. databytelen = trip->databytelen; props->ap[msc->fc][msc->mode][1].sglen = trip->sglen; } else { dev_warn(dev, "unknown function " "code/key bit len combo" ": (%u/256)\n", msc->fc); } break; case 512: props->ap[msc->fc][msc->mode][2].databytelen = trip->databytelen; props->ap[msc->fc][msc->mode][2].sglen = trip->sglen; break; default: dev_warn(dev, "unknown function code/key bit " "len combo: (%u/%u)\n", msc->fc, trip->keybitlen); break; } next_loop: bytes_so_far += sizeof(struct msc_triplet); trip++; } msc = (struct max_sync_cop *)trip; } props->flags |= NX_OF_FLAG_MAXSYNCCOP_SET; } /** * nx_of_init - read openFirmware values from the device tree * * @dev: device handle * @props: pointer to struct to hold the properties values * * Called once at driver probe time, this function will read out the * openFirmware properties we use at runtime. If all the OF properties are * acceptable, when we exit this function props->flags will indicate that * we're ready to register our crypto algorithms. */ static void nx_of_init(struct device *dev, struct nx_of *props) { struct device_node *base_node = dev->of_node; struct property *p; p = of_find_property(base_node, "status", NULL); if (!p) dev_info(dev, "%s: property 'status' not found\n", __func__); else nx_of_update_status(dev, p, props); p = of_find_property(base_node, "ibm,max-sg-len", NULL); if (!p) dev_info(dev, "%s: property 'ibm,max-sg-len' not found\n", __func__); else nx_of_update_sglen(dev, p, props); p = of_find_property(base_node, "ibm,max-sync-cop", NULL); if (!p) dev_info(dev, "%s: property 'ibm,max-sync-cop' not found\n", __func__); else nx_of_update_msc(dev, p, props); } static bool nx_check_prop(struct device *dev, u32 fc, u32 mode, int slot) { struct alg_props *props = &nx_driver.of.ap[fc][mode][slot]; if (!props->sglen || props->databytelen < NX_PAGE_SIZE) { if (dev) dev_warn(dev, "bogus sglen/databytelen for %u/%u/%u: " "%u/%u (ignored)\n", fc, mode, slot, props->sglen, props->databytelen); return false; } return true; } static bool nx_check_props(struct device *dev, u32 fc, u32 mode) { int i; for (i = 0; i < 3; i++) if (!nx_check_prop(dev, fc, mode, i)) return false; return true; } static int nx_register_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode) { return nx_check_props(&nx_driver.viodev->dev, fc, mode) ? crypto_register_skcipher(alg) : 0; } static int nx_register_aead(struct aead_alg *alg, u32 fc, u32 mode) { return nx_check_props(&nx_driver.viodev->dev, fc, mode) ? crypto_register_aead(alg) : 0; } static int nx_register_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot) { return (slot >= 0 ? nx_check_prop(&nx_driver.viodev->dev, fc, mode, slot) : nx_check_props(&nx_driver.viodev->dev, fc, mode)) ? crypto_register_shash(alg) : 0; } static void nx_unregister_skcipher(struct skcipher_alg *alg, u32 fc, u32 mode) { if (nx_check_props(NULL, fc, mode)) crypto_unregister_skcipher(alg); } static void nx_unregister_aead(struct aead_alg *alg, u32 fc, u32 mode) { if (nx_check_props(NULL, fc, mode)) crypto_unregister_aead(alg); } static void nx_unregister_shash(struct shash_alg *alg, u32 fc, u32 mode, int slot) { if (slot >= 0 ? nx_check_prop(NULL, fc, mode, slot) : nx_check_props(NULL, fc, mode)) crypto_unregister_shash(alg); } /** * nx_register_algs - register algorithms with the crypto API * * Called from nx_probe() * * If all OF properties are in an acceptable state, the driver flags will * indicate that we're ready and we'll create our debugfs files and register * out crypto algorithms. */ static int nx_register_algs(void) { int rc = -1; if (nx_driver.of.flags != NX_OF_FLAG_MASK_READY) goto out; memset(&nx_driver.stats, 0, sizeof(struct nx_stats)); NX_DEBUGFS_INIT(&nx_driver); nx_driver.of.status = NX_OKAY; rc = nx_register_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); if (rc) goto out; rc = nx_register_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); if (rc) goto out_unreg_ecb; rc = nx_register_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); if (rc) goto out_unreg_cbc; rc = nx_register_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); if (rc) goto out_unreg_ctr3686; rc = nx_register_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); if (rc) goto out_unreg_gcm; rc = nx_register_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); if (rc) goto out_unreg_gcm4106; rc = nx_register_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); if (rc) goto out_unreg_ccm; rc = nx_register_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256); if (rc) goto out_unreg_ccm4309; rc = nx_register_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512); if (rc) goto out_unreg_s256; rc = nx_register_shash(&nx_shash_aes_xcbc_alg, NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1); if (rc) goto out_unreg_s512; goto out; out_unreg_s512: nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512); out_unreg_s256: nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256); out_unreg_ccm4309: nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); out_unreg_ccm: nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); out_unreg_gcm4106: nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); out_unreg_gcm: nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); out_unreg_ctr3686: nx_unregister_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); out_unreg_cbc: nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); out_unreg_ecb: nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); out: return rc; } /** * nx_crypto_ctx_init - create and initialize a crypto api context * * @nx_ctx: the crypto api context * @fc: function code for the context * @mode: the function code specific mode for this context */ static int nx_crypto_ctx_init(struct nx_crypto_ctx *nx_ctx, u32 fc, u32 mode) { if (nx_driver.of.status != NX_OKAY) { pr_err("Attempt to initialize NX crypto context while device " "is not available!\n"); return -ENODEV; } /* we need an extra page for csbcpb_aead for these modes */ if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) nx_ctx->kmem_len = (5 * NX_PAGE_SIZE) + sizeof(struct nx_csbcpb); else nx_ctx->kmem_len = (4 * NX_PAGE_SIZE) + sizeof(struct nx_csbcpb); nx_ctx->kmem = kmalloc(nx_ctx->kmem_len, GFP_KERNEL); if (!nx_ctx->kmem) return -ENOMEM; /* the csbcpb and scatterlists must be 4K aligned pages */ nx_ctx->csbcpb = (struct nx_csbcpb *)(round_up((u64)nx_ctx->kmem, (u64)NX_PAGE_SIZE)); nx_ctx->in_sg = (struct nx_sg *)((u8 *)nx_ctx->csbcpb + NX_PAGE_SIZE); nx_ctx->out_sg = (struct nx_sg *)((u8 *)nx_ctx->in_sg + NX_PAGE_SIZE); if (mode == NX_MODE_AES_GCM || mode == NX_MODE_AES_CCM) nx_ctx->csbcpb_aead = (struct nx_csbcpb *)((u8 *)nx_ctx->out_sg + NX_PAGE_SIZE); /* give each context a pointer to global stats and their OF * properties */ nx_ctx->stats = &nx_driver.stats; memcpy(nx_ctx->props, nx_driver.of.ap[fc][mode], sizeof(struct alg_props) * 3); return 0; } /* entry points from the crypto tfm initializers */ int nx_crypto_ctx_aes_ccm_init(struct crypto_aead *tfm) { crypto_aead_set_reqsize(tfm, sizeof(struct nx_ccm_rctx)); return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, NX_MODE_AES_CCM); } int nx_crypto_ctx_aes_gcm_init(struct crypto_aead *tfm) { crypto_aead_set_reqsize(tfm, sizeof(struct nx_gcm_rctx)); return nx_crypto_ctx_init(crypto_aead_ctx(tfm), NX_FC_AES, NX_MODE_AES_GCM); } int nx_crypto_ctx_aes_ctr_init(struct crypto_skcipher *tfm) { return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES, NX_MODE_AES_CTR); } int nx_crypto_ctx_aes_cbc_init(struct crypto_skcipher *tfm) { return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES, NX_MODE_AES_CBC); } int nx_crypto_ctx_aes_ecb_init(struct crypto_skcipher *tfm) { return nx_crypto_ctx_init(crypto_skcipher_ctx(tfm), NX_FC_AES, NX_MODE_AES_ECB); } int nx_crypto_ctx_sha_init(struct crypto_tfm *tfm) { return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_SHA, NX_MODE_SHA); } int nx_crypto_ctx_aes_xcbc_init(struct crypto_tfm *tfm) { return nx_crypto_ctx_init(crypto_tfm_ctx(tfm), NX_FC_AES, NX_MODE_AES_XCBC_MAC); } /** * nx_crypto_ctx_exit - destroy a crypto api context * * @tfm: the crypto transform pointer for the context * * As crypto API contexts are destroyed, this exit hook is called to free the * memory associated with it. */ void nx_crypto_ctx_exit(struct crypto_tfm *tfm) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); kfree_sensitive(nx_ctx->kmem); nx_ctx->csbcpb = NULL; nx_ctx->csbcpb_aead = NULL; nx_ctx->in_sg = NULL; nx_ctx->out_sg = NULL; } void nx_crypto_ctx_skcipher_exit(struct crypto_skcipher *tfm) { nx_crypto_ctx_exit(crypto_skcipher_ctx(tfm)); } void nx_crypto_ctx_aead_exit(struct crypto_aead *tfm) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); kfree_sensitive(nx_ctx->kmem); } static int nx_probe(struct vio_dev *viodev, const struct vio_device_id *id) { dev_dbg(&viodev->dev, "driver probed: %s resource id: 0x%x\n", viodev->name, viodev->resource_id); if (nx_driver.viodev) { dev_err(&viodev->dev, "%s: Attempt to register more than one " "instance of the hardware\n", __func__); return -EINVAL; } nx_driver.viodev = viodev; nx_of_init(&viodev->dev, &nx_driver.of); return nx_register_algs(); } static void nx_remove(struct vio_dev *viodev) { dev_dbg(&viodev->dev, "entering nx_remove for UA 0x%x\n", viodev->unit_address); if (nx_driver.of.status == NX_OKAY) { NX_DEBUGFS_FINI(&nx_driver); nx_unregister_shash(&nx_shash_aes_xcbc_alg, NX_FC_AES, NX_MODE_AES_XCBC_MAC, -1); nx_unregister_shash(&nx_shash_sha512_alg, NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA256); nx_unregister_shash(&nx_shash_sha256_alg, NX_FC_SHA, NX_MODE_SHA, NX_PROPS_SHA512); nx_unregister_aead(&nx_ccm4309_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); nx_unregister_aead(&nx_ccm_aes_alg, NX_FC_AES, NX_MODE_AES_CCM); nx_unregister_aead(&nx_gcm4106_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); nx_unregister_aead(&nx_gcm_aes_alg, NX_FC_AES, NX_MODE_AES_GCM); nx_unregister_skcipher(&nx_ctr3686_aes_alg, NX_FC_AES, NX_MODE_AES_CTR); nx_unregister_skcipher(&nx_cbc_aes_alg, NX_FC_AES, NX_MODE_AES_CBC); nx_unregister_skcipher(&nx_ecb_aes_alg, NX_FC_AES, NX_MODE_AES_ECB); } } /* module wide initialization/cleanup */ static int __init nx_init(void) { return vio_register_driver(&nx_driver.viodriver); } static void __exit nx_fini(void) { vio_unregister_driver(&nx_driver.viodriver); } static const struct vio_device_id nx_crypto_driver_ids[] = { { "ibm,sym-encryption-v1", "ibm,sym-encryption" }, { "", "" } }; MODULE_DEVICE_TABLE(vio, nx_crypto_driver_ids); /* driver state structure */ struct nx_crypto_driver nx_driver = { .viodriver = { .id_table = nx_crypto_driver_ids, .probe = nx_probe, .remove = nx_remove, .name = NX_NAME, }, }; module_init(nx_init); module_exit(nx_fini); MODULE_AUTHOR("Kent Yoder <[email protected]>"); MODULE_DESCRIPTION(NX_STRING); MODULE_LICENSE("GPL"); MODULE_VERSION(NX_VERSION);
linux-master
drivers/crypto/nx/nx.c
// SPDX-License-Identifier: GPL-2.0-only /* * AES GCM routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2012 International Business Machines Inc. * * Author: Kent Yoder <[email protected]> */ #include <crypto/internal/aead.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/gcm.h> #include <crypto/scatterwalk.h> #include <linux/module.h> #include <linux/types.h> #include <asm/vio.h> #include "nx_csbcpb.h" #include "nx.h" static int gcm_aes_nx_set_key(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; nx_ctx_init(nx_ctx, HCOP_FC_AES); switch (key_len) { case AES_KEYSIZE_128: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; break; case AES_KEYSIZE_192: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; break; case AES_KEYSIZE_256: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; break; default: return -EINVAL; } csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len); csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA; memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len); return 0; } static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm); char *nonce = nx_ctx->priv.gcm.nonce; int rc; if (key_len < 4) return -EINVAL; key_len -= 4; rc = gcm_aes_nx_set_key(tfm, in_key, key_len); if (rc) goto out; memcpy(nonce, in_key + key_len, 4); out: return rc; } static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } return 0; } static int nx_gca(struct nx_crypto_ctx *nx_ctx, struct aead_request *req, u8 *out, unsigned int assoclen) { int rc; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; struct scatter_walk walk; struct nx_sg *nx_sg = nx_ctx->in_sg; unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; if (nbytes <= AES_BLOCK_SIZE) { scatterwalk_start(&walk, req->src); scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG); scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0); return 0; } NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION; /* page_limit: number of sg entries that fit on one page */ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), nx_ctx->ap->sglen); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); do { /* * to_process: the data chunk to process in this update. * This value is bound by sg list limits. */ to_process = min_t(u64, nbytes - processed, nx_ctx->ap->databytelen); to_process = min_t(u64, to_process, NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE; else NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) return rc; memcpy(csbcpb_aead->cpb.aes_gca.in_pat, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE); return rc; } static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen) { int rc; struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *nx_sg; unsigned int nbytes = assoclen; unsigned int processed = 0, to_process; unsigned int max_sg_len; /* Set GMAC mode */ csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC; NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; /* page_limit: number of sg entries that fit on one page */ max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), nx_ctx->ap->sglen); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); /* Copy IV */ memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE); do { /* * to_process: the data chunk to process in this update. * This value is bound by sg list limits. */ to_process = min_t(u64, nbytes - processed, nx_ctx->ap->databytelen); to_process = min_t(u64, to_process, NX_PAGE_SIZE * (max_sg_len - 1)); nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len, req->src, processed, &to_process); if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg) * sizeof(struct nx_sg); csbcpb->cpb.aes_gcm.bit_length_data = 0; csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_gcm.in_s0, csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); out: /* Restore GCM mode */ csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; return rc; } static int gcm_empty(struct aead_request *req, const u8 *iv, int enc) { int rc; struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; char out[AES_BLOCK_SIZE]; struct nx_sg *in_sg, *out_sg; int len; /* For scenarios where the input message is zero length, AES CTR mode * may be used. Set the source data to be a single block (16B) of all * zeros, and set the input IV value to be the same as the GMAC IV * value. - nx_wb 4.8.1.3 */ /* Change to ECB mode */ csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key, sizeof(csbcpb->cpb.aes_ecb.key)); if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; len = AES_BLOCK_SIZE; /* Encrypt the counter/IV */ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv, &len, nx_ctx->ap->sglen); if (len != AES_BLOCK_SIZE) return -EINVAL; len = sizeof(out); out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len, nx_ctx->ap->sglen); if (len != sizeof(out)) return -EINVAL; nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); /* Copy out the auth tag */ memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out, crypto_aead_authsize(crypto_aead_reqtfm(req))); out: /* Restore XCBC mode */ csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM; /* * ECB key uses the same region that GCM AAD and counter, so it's safe * to just fill it with zeroes. */ memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key)); return rc; } static int gcm_aes_nx_crypt(struct aead_request *req, int enc, unsigned int assoclen) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned int nbytes = req->cryptlen; unsigned int processed = 0, to_process; unsigned long irq_flags; int rc = -EINVAL; spin_lock_irqsave(&nx_ctx->lock, irq_flags); /* initialize the counter */ *(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1; if (nbytes == 0) { if (assoclen == 0) rc = gcm_empty(req, rctx->iv, enc); else rc = gmac(req, rctx->iv, assoclen); if (rc) goto out; else goto mac; } /* Process associated data */ csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8; if (assoclen) { rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad, assoclen); if (rc) goto out; } /* Set flags for encryption */ NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; if (enc) { NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; } else { NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); } do { to_process = nbytes - processed; csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst, req->src, &to_process, processed + req->assoclen, csbcpb->cpb.aes_gcm.iv_or_cnt); if (rc) goto out; if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad, csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_gcm.in_s0, csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count), &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); mac: if (enc) { /* copy out the auth tag */ scatterwalk_map_and_copy( csbcpb->cpb.aes_gcm.out_pat_or_mac, req->dst, req->assoclen + nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_TO_SG); } else { u8 *itag = nx_ctx->priv.gcm.iauth_tag; u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; scatterwalk_map_and_copy( itag, req->src, req->assoclen + nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_FROM_SG); rc = crypto_memneq(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; } out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int gcm_aes_nx_encrypt(struct aead_request *req) { struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; memcpy(iv, req->iv, GCM_AES_IV_SIZE); return gcm_aes_nx_crypt(req, 1, req->assoclen); } static int gcm_aes_nx_decrypt(struct aead_request *req) { struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; memcpy(iv, req->iv, GCM_AES_IV_SIZE); return gcm_aes_nx_crypt(req, 0, req->assoclen); } static int gcm4106_aes_nx_encrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); if (req->assoclen < 8) return -EINVAL; return gcm_aes_nx_crypt(req, 1, req->assoclen - 8); } static int gcm4106_aes_nx_decrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); struct nx_gcm_rctx *rctx = aead_request_ctx(req); char *iv = rctx->iv; char *nonce = nx_ctx->priv.gcm.nonce; memcpy(iv, nonce, NX_GCM4106_NONCE_LEN); memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); if (req->assoclen < 8) return -EINVAL; return gcm_aes_nx_crypt(req, 0, req->assoclen - 8); } struct aead_alg nx_gcm_aes_alg = { .base = { .cra_name = "gcm(aes)", .cra_driver_name = "gcm-aes-nx", .cra_priority = 300, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_module = THIS_MODULE, }, .init = nx_crypto_ctx_aes_gcm_init, .exit = nx_crypto_ctx_aead_exit, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, .setkey = gcm_aes_nx_set_key, .encrypt = gcm_aes_nx_encrypt, .decrypt = gcm_aes_nx_decrypt, }; struct aead_alg nx_gcm4106_aes_alg = { .base = { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "rfc4106-gcm-aes-nx", .cra_priority = 300, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_module = THIS_MODULE, }, .init = nx_crypto_ctx_aes_gcm_init, .exit = nx_crypto_ctx_aead_exit, .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, .setkey = gcm4106_aes_nx_set_key, .setauthsize = gcm4106_aes_nx_setauthsize, .encrypt = gcm4106_aes_nx_encrypt, .decrypt = gcm4106_aes_nx_decrypt, };
linux-master
drivers/crypto/nx/nx-aes-gcm.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for IBM Power 842 compression accelerator * * Copyright (C) IBM Corporation, 2012 * * Authors: Robert Jennings <[email protected]> * Seth Jennings <[email protected]> */ #include <asm/vio.h> #include <asm/hvcall.h> #include <asm/vas.h> #include "nx-842.h" #include "nx_csbcpb.h" /* struct nx_csbcpb */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Robert Jennings <[email protected]>"); MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors"); MODULE_ALIAS_CRYPTO("842"); MODULE_ALIAS_CRYPTO("842-nx"); /* * Coprocessor type specific capabilities from the hypervisor. */ struct hv_nx_cop_caps { __be64 descriptor; __be64 req_max_processed_len; /* Max bytes in one GZIP request */ __be64 min_compress_len; /* Min compression size in bytes */ __be64 min_decompress_len; /* Min decompression size in bytes */ } __packed __aligned(0x1000); /* * Coprocessor type specific capabilities. */ struct nx_cop_caps { u64 descriptor; u64 req_max_processed_len; /* Max bytes in one GZIP request */ u64 min_compress_len; /* Min compression in bytes */ u64 min_decompress_len; /* Min decompression in bytes */ }; static u64 caps_feat; static struct nx_cop_caps nx_cop_caps; static struct nx842_constraints nx842_pseries_constraints = { .alignment = DDE_BUFFER_ALIGN, .multiple = DDE_BUFFER_LAST_MULT, .minimum = DDE_BUFFER_LAST_MULT, .maximum = PAGE_SIZE, /* dynamic, max_sync_size */ }; static int check_constraints(unsigned long buf, unsigned int *len, bool in) { if (!IS_ALIGNED(buf, nx842_pseries_constraints.alignment)) { pr_debug("%s buffer 0x%lx not aligned to 0x%x\n", in ? "input" : "output", buf, nx842_pseries_constraints.alignment); return -EINVAL; } if (*len % nx842_pseries_constraints.multiple) { pr_debug("%s buffer len 0x%x not multiple of 0x%x\n", in ? "input" : "output", *len, nx842_pseries_constraints.multiple); if (in) return -EINVAL; *len = round_down(*len, nx842_pseries_constraints.multiple); } if (*len < nx842_pseries_constraints.minimum) { pr_debug("%s buffer len 0x%x under minimum 0x%x\n", in ? "input" : "output", *len, nx842_pseries_constraints.minimum); return -EINVAL; } if (*len > nx842_pseries_constraints.maximum) { pr_debug("%s buffer len 0x%x over maximum 0x%x\n", in ? "input" : "output", *len, nx842_pseries_constraints.maximum); if (in) return -EINVAL; *len = nx842_pseries_constraints.maximum; } return 0; } /* I assume we need to align the CSB? */ #define WORKMEM_ALIGN (256) struct nx842_workmem { /* scatterlist */ char slin[4096]; char slout[4096]; /* coprocessor status/parameter block */ struct nx_csbcpb csbcpb; char padding[WORKMEM_ALIGN]; } __aligned(WORKMEM_ALIGN); /* Macros for fields within nx_csbcpb */ /* Check the valid bit within the csbcpb valid field */ #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7)) /* CE macros operate on the completion_extension field bits in the csbcpb. * CE0 0=full completion, 1=partial completion * CE1 0=CE0 indicates completion, 1=termination (output may be modified) * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */ #define NX842_CSBCPB_CE0(x) (x & BIT_MASK(7)) #define NX842_CSBCPB_CE1(x) (x & BIT_MASK(6)) #define NX842_CSBCPB_CE2(x) (x & BIT_MASK(5)) /* The NX unit accepts data only on 4K page boundaries */ #define NX842_HW_PAGE_SIZE (4096) #define NX842_HW_PAGE_MASK (~(NX842_HW_PAGE_SIZE-1)) struct ibm_nx842_counters { atomic64_t comp_complete; atomic64_t comp_failed; atomic64_t decomp_complete; atomic64_t decomp_failed; atomic64_t swdecomp; atomic64_t comp_times[32]; atomic64_t decomp_times[32]; }; struct nx842_devdata { struct vio_dev *vdev; struct device *dev; struct ibm_nx842_counters *counters; unsigned int max_sg_len; unsigned int max_sync_size; unsigned int max_sync_sg; }; static struct nx842_devdata __rcu *devdata; static DEFINE_SPINLOCK(devdata_mutex); #define NX842_COUNTER_INC(_x) \ static inline void nx842_inc_##_x( \ const struct nx842_devdata *dev) { \ if (dev) \ atomic64_inc(&dev->counters->_x); \ } NX842_COUNTER_INC(comp_complete); NX842_COUNTER_INC(comp_failed); NX842_COUNTER_INC(decomp_complete); NX842_COUNTER_INC(decomp_failed); NX842_COUNTER_INC(swdecomp); #define NX842_HIST_SLOTS 16 static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time) { int bucket = fls(time); if (bucket) bucket = min((NX842_HIST_SLOTS - 1), bucket - 1); atomic64_inc(&times[bucket]); } /* NX unit operation flags */ #define NX842_OP_COMPRESS 0x0 #define NX842_OP_CRC 0x1 #define NX842_OP_DECOMPRESS 0x2 #define NX842_OP_COMPRESS_CRC (NX842_OP_COMPRESS | NX842_OP_CRC) #define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC) #define NX842_OP_ASYNC (1<<23) #define NX842_OP_NOTIFY (1<<22) #define NX842_OP_NOTIFY_INT(x) ((x & 0xff)<<8) static unsigned long nx842_get_desired_dma(struct vio_dev *viodev) { /* No use of DMA mappings within the driver. */ return 0; } struct nx842_slentry { __be64 ptr; /* Real address (use __pa()) */ __be64 len; }; /* pHyp scatterlist entry */ struct nx842_scatterlist { int entry_nr; /* number of slentries */ struct nx842_slentry *entries; /* ptr to array of slentries */ }; /* Does not include sizeof(entry_nr) in the size */ static inline unsigned long nx842_get_scatterlist_size( struct nx842_scatterlist *sl) { return sl->entry_nr * sizeof(struct nx842_slentry); } static int nx842_build_scatterlist(unsigned long buf, int len, struct nx842_scatterlist *sl) { unsigned long entrylen; struct nx842_slentry *entry; sl->entry_nr = 0; entry = sl->entries; while (len) { entry->ptr = cpu_to_be64(nx842_get_pa((void *)buf)); entrylen = min_t(int, len, LEN_ON_SIZE(buf, NX842_HW_PAGE_SIZE)); entry->len = cpu_to_be64(entrylen); len -= entrylen; buf += entrylen; sl->entry_nr++; entry++; } return 0; } static int nx842_validate_result(struct device *dev, struct cop_status_block *csb) { /* The csb must be valid after returning from vio_h_cop_sync */ if (!NX842_CSBCBP_VALID_CHK(csb->valid)) { dev_err(dev, "%s: cspcbp not valid upon completion.\n", __func__); dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n", csb->valid, csb->crb_seq_number, csb->completion_code, csb->completion_extension); dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n", be32_to_cpu(csb->processed_byte_count), (unsigned long)be64_to_cpu(csb->address)); return -EIO; } /* Check return values from the hardware in the CSB */ switch (csb->completion_code) { case 0: /* Completed without error */ break; case 64: /* Compression ok, but output larger than input */ dev_dbg(dev, "%s: output size larger than input size\n", __func__); break; case 13: /* Output buffer too small */ dev_dbg(dev, "%s: Out of space in output buffer\n", __func__); return -ENOSPC; case 65: /* Calculated CRC doesn't match the passed value */ dev_dbg(dev, "%s: CRC mismatch for decompression\n", __func__); return -EINVAL; case 66: /* Input data contains an illegal template field */ case 67: /* Template indicates data past the end of the input stream */ dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n", __func__, csb->completion_code); return -EINVAL; default: dev_dbg(dev, "%s: Unspecified error (code:%d)\n", __func__, csb->completion_code); return -EIO; } /* Hardware sanity check */ if (!NX842_CSBCPB_CE2(csb->completion_extension)) { dev_err(dev, "%s: No error returned by hardware, but " "data returned is unusable, contact support.\n" "(Additional info: csbcbp->processed bytes " "does not specify processed bytes for the " "target buffer.)\n", __func__); return -EIO; } return 0; } /** * nx842_pseries_compress - Compress data using the 842 algorithm * * Compression provide by the NX842 coprocessor on IBM Power systems. * The input buffer is compressed and the result is stored in the * provided output buffer. * * Upon return from this function @outlen contains the length of the * compressed data. If there is an error then @outlen will be 0 and an * error will be specified by the return code from this function. * * @in: Pointer to input buffer * @inlen: Length of input buffer * @out: Pointer to output buffer * @outlen: Length of output buffer * @wmem: ptr to buffer for working memory, size determined by * nx842_pseries_driver.workmem_size * * Returns: * 0 Success, output of length @outlen stored in the buffer at @out * -ENOMEM Unable to allocate internal buffers * -ENOSPC Output buffer is to small * -EIO Internal error * -ENODEV Hardware unavailable */ static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlen, void *wmem) { struct nx842_devdata *local_devdata; struct device *dev = NULL; struct nx842_workmem *workmem; struct nx842_scatterlist slin, slout; struct nx_csbcpb *csbcpb; int ret = 0; unsigned long inbuf, outbuf; struct vio_pfo_op op = { .done = NULL, .handle = 0, .timeout = 0, }; unsigned long start = get_tb(); inbuf = (unsigned long)in; if (check_constraints(inbuf, &inlen, true)) return -EINVAL; outbuf = (unsigned long)out; if (check_constraints(outbuf, outlen, false)) return -EINVAL; rcu_read_lock(); local_devdata = rcu_dereference(devdata); if (!local_devdata || !local_devdata->dev) { rcu_read_unlock(); return -ENODEV; } dev = local_devdata->dev; /* Init scatterlist */ workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN); slin.entries = (struct nx842_slentry *)workmem->slin; slout.entries = (struct nx842_slentry *)workmem->slout; /* Init operation */ op.flags = NX842_OP_COMPRESS_CRC; csbcpb = &workmem->csbcpb; memset(csbcpb, 0, sizeof(*csbcpb)); op.csbcpb = nx842_get_pa(csbcpb); if ((inbuf & NX842_HW_PAGE_MASK) == ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) { /* Create direct DDE */ op.in = nx842_get_pa((void *)inbuf); op.inlen = inlen; } else { /* Create indirect DDE (scatterlist) */ nx842_build_scatterlist(inbuf, inlen, &slin); op.in = nx842_get_pa(slin.entries); op.inlen = -nx842_get_scatterlist_size(&slin); } if ((outbuf & NX842_HW_PAGE_MASK) == ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) { /* Create direct DDE */ op.out = nx842_get_pa((void *)outbuf); op.outlen = *outlen; } else { /* Create indirect DDE (scatterlist) */ nx842_build_scatterlist(outbuf, *outlen, &slout); op.out = nx842_get_pa(slout.entries); op.outlen = -nx842_get_scatterlist_size(&slout); } dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n", __func__, (unsigned long)op.in, (long)op.inlen, (unsigned long)op.out, (long)op.outlen); /* Send request to pHyp */ ret = vio_h_cop_sync(local_devdata->vdev, &op); /* Check for pHyp error */ if (ret) { dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", __func__, ret, op.hcall_err); ret = -EIO; goto unlock; } /* Check for hardware error */ ret = nx842_validate_result(dev, &csbcpb->csb); if (ret) goto unlock; *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count); dev_dbg(dev, "%s: processed_bytes=%d\n", __func__, *outlen); unlock: if (ret) nx842_inc_comp_failed(local_devdata); else { nx842_inc_comp_complete(local_devdata); ibm_nx842_incr_hist(local_devdata->counters->comp_times, (get_tb() - start) / tb_ticks_per_usec); } rcu_read_unlock(); return ret; } /** * nx842_pseries_decompress - Decompress data using the 842 algorithm * * Decompression provide by the NX842 coprocessor on IBM Power systems. * The input buffer is decompressed and the result is stored in the * provided output buffer. The size allocated to the output buffer is * provided by the caller of this function in @outlen. Upon return from * this function @outlen contains the length of the decompressed data. * If there is an error then @outlen will be 0 and an error will be * specified by the return code from this function. * * @in: Pointer to input buffer * @inlen: Length of input buffer * @out: Pointer to output buffer * @outlen: Length of output buffer * @wmem: ptr to buffer for working memory, size determined by * nx842_pseries_driver.workmem_size * * Returns: * 0 Success, output of length @outlen stored in the buffer at @out * -ENODEV Hardware decompression device is unavailable * -ENOMEM Unable to allocate internal buffers * -ENOSPC Output buffer is to small * -EINVAL Bad input data encountered when attempting decompress * -EIO Internal error */ static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen, unsigned char *out, unsigned int *outlen, void *wmem) { struct nx842_devdata *local_devdata; struct device *dev = NULL; struct nx842_workmem *workmem; struct nx842_scatterlist slin, slout; struct nx_csbcpb *csbcpb; int ret = 0; unsigned long inbuf, outbuf; struct vio_pfo_op op = { .done = NULL, .handle = 0, .timeout = 0, }; unsigned long start = get_tb(); /* Ensure page alignment and size */ inbuf = (unsigned long)in; if (check_constraints(inbuf, &inlen, true)) return -EINVAL; outbuf = (unsigned long)out; if (check_constraints(outbuf, outlen, false)) return -EINVAL; rcu_read_lock(); local_devdata = rcu_dereference(devdata); if (!local_devdata || !local_devdata->dev) { rcu_read_unlock(); return -ENODEV; } dev = local_devdata->dev; workmem = PTR_ALIGN(wmem, WORKMEM_ALIGN); /* Init scatterlist */ slin.entries = (struct nx842_slentry *)workmem->slin; slout.entries = (struct nx842_slentry *)workmem->slout; /* Init operation */ op.flags = NX842_OP_DECOMPRESS_CRC; csbcpb = &workmem->csbcpb; memset(csbcpb, 0, sizeof(*csbcpb)); op.csbcpb = nx842_get_pa(csbcpb); if ((inbuf & NX842_HW_PAGE_MASK) == ((inbuf + inlen - 1) & NX842_HW_PAGE_MASK)) { /* Create direct DDE */ op.in = nx842_get_pa((void *)inbuf); op.inlen = inlen; } else { /* Create indirect DDE (scatterlist) */ nx842_build_scatterlist(inbuf, inlen, &slin); op.in = nx842_get_pa(slin.entries); op.inlen = -nx842_get_scatterlist_size(&slin); } if ((outbuf & NX842_HW_PAGE_MASK) == ((outbuf + *outlen - 1) & NX842_HW_PAGE_MASK)) { /* Create direct DDE */ op.out = nx842_get_pa((void *)outbuf); op.outlen = *outlen; } else { /* Create indirect DDE (scatterlist) */ nx842_build_scatterlist(outbuf, *outlen, &slout); op.out = nx842_get_pa(slout.entries); op.outlen = -nx842_get_scatterlist_size(&slout); } dev_dbg(dev, "%s: op.in %lx op.inlen %ld op.out %lx op.outlen %ld\n", __func__, (unsigned long)op.in, (long)op.inlen, (unsigned long)op.out, (long)op.outlen); /* Send request to pHyp */ ret = vio_h_cop_sync(local_devdata->vdev, &op); /* Check for pHyp error */ if (ret) { dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n", __func__, ret, op.hcall_err); goto unlock; } /* Check for hardware error */ ret = nx842_validate_result(dev, &csbcpb->csb); if (ret) goto unlock; *outlen = be32_to_cpu(csbcpb->csb.processed_byte_count); unlock: if (ret) /* decompress fail */ nx842_inc_decomp_failed(local_devdata); else { nx842_inc_decomp_complete(local_devdata); ibm_nx842_incr_hist(local_devdata->counters->decomp_times, (get_tb() - start) / tb_ticks_per_usec); } rcu_read_unlock(); return ret; } /** * nx842_OF_set_defaults -- Set default (disabled) values for devdata * * @devdata: struct nx842_devdata to update * * Returns: * 0 on success * -ENOENT if @devdata ptr is NULL */ static int nx842_OF_set_defaults(struct nx842_devdata *devdata) { if (devdata) { devdata->max_sync_size = 0; devdata->max_sync_sg = 0; devdata->max_sg_len = 0; return 0; } else return -ENOENT; } /** * nx842_OF_upd_status -- Check the device info from OF status prop * * The status property indicates if the accelerator is enabled. If the * device is in the OF tree it indicates that the hardware is present. * The status field indicates if the device is enabled when the status * is 'okay'. Otherwise the device driver will be disabled. * * @devdata: struct nx842_devdata to use for dev_info * @prop: struct property point containing the maxsyncop for the update * * Returns: * 0 - Device is available * -ENODEV - Device is not available */ static int nx842_OF_upd_status(struct nx842_devdata *devdata, struct property *prop) { const char *status = (const char *)prop->value; if (!strncmp(status, "okay", (size_t)prop->length)) return 0; if (!strncmp(status, "disabled", (size_t)prop->length)) return -ENODEV; dev_info(devdata->dev, "%s: unknown status '%s'\n", __func__, status); return -EINVAL; } /** * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop * * Definition of the 'ibm,max-sg-len' OF property: * This field indicates the maximum byte length of a scatter list * for the platform facility. It is a single cell encoded as with encode-int. * * Example: * # od -x ibm,max-sg-len * 0000000 0000 0ff0 * * In this example, the maximum byte length of a scatter list is * 0x0ff0 (4,080). * * @devdata: struct nx842_devdata to update * @prop: struct property point containing the maxsyncop for the update * * Returns: * 0 on success * -EINVAL on failure */ static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata, struct property *prop) { int ret = 0; const unsigned int maxsglen = of_read_number(prop->value, 1); if (prop->length != sizeof(maxsglen)) { dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__); dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__, prop->length, sizeof(maxsglen)); ret = -EINVAL; } else { devdata->max_sg_len = min_t(unsigned int, maxsglen, NX842_HW_PAGE_SIZE); } return ret; } /** * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop * * Definition of the 'ibm,max-sync-cop' OF property: * Two series of cells. The first series of cells represents the maximums * that can be synchronously compressed. The second series of cells * represents the maximums that can be synchronously decompressed. * 1. The first cell in each series contains the count of the number of * data length, scatter list elements pairs that follow – each being * of the form * a. One cell data byte length * b. One cell total number of scatter list elements * * Example: * # od -x ibm,max-sync-cop * 0000000 0000 0001 0000 1000 0000 01fe 0000 0001 * 0000020 0000 1000 0000 01fe * * In this example, compression supports 0x1000 (4,096) data byte length * and 0x1fe (510) total scatter list elements. Decompression supports * 0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list * elements. * * @devdata: struct nx842_devdata to update * @prop: struct property point containing the maxsyncop for the update * * Returns: * 0 on success * -EINVAL on failure */ static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata, struct property *prop) { int ret = 0; unsigned int comp_data_limit, decomp_data_limit; unsigned int comp_sg_limit, decomp_sg_limit; const struct maxsynccop_t { __be32 comp_elements; __be32 comp_data_limit; __be32 comp_sg_limit; __be32 decomp_elements; __be32 decomp_data_limit; __be32 decomp_sg_limit; } *maxsynccop; if (prop->length != sizeof(*maxsynccop)) { dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__); dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length, sizeof(*maxsynccop)); ret = -EINVAL; goto out; } maxsynccop = (const struct maxsynccop_t *)prop->value; comp_data_limit = be32_to_cpu(maxsynccop->comp_data_limit); comp_sg_limit = be32_to_cpu(maxsynccop->comp_sg_limit); decomp_data_limit = be32_to_cpu(maxsynccop->decomp_data_limit); decomp_sg_limit = be32_to_cpu(maxsynccop->decomp_sg_limit); /* Use one limit rather than separate limits for compression and * decompression. Set a maximum for this so as not to exceed the * size that the header can support and round the value down to * the hardware page size (4K) */ devdata->max_sync_size = min(comp_data_limit, decomp_data_limit); devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size, 65536); if (devdata->max_sync_size < 4096) { dev_err(devdata->dev, "%s: hardware max data size (%u) is " "less than the driver minimum, unable to use " "the hardware device\n", __func__, devdata->max_sync_size); ret = -EINVAL; goto out; } nx842_pseries_constraints.maximum = devdata->max_sync_size; devdata->max_sync_sg = min(comp_sg_limit, decomp_sg_limit); if (devdata->max_sync_sg < 1) { dev_err(devdata->dev, "%s: hardware max sg size (%u) is " "less than the driver minimum, unable to use " "the hardware device\n", __func__, devdata->max_sync_sg); ret = -EINVAL; goto out; } out: return ret; } /** * nx842_OF_upd -- Handle OF properties updates for the device. * * Set all properties from the OF tree. Optionally, a new property * can be provided by the @new_prop pointer to overwrite an existing value. * The device will remain disabled until all values are valid, this function * will return an error for updates unless all values are valid. * * @new_prop: If not NULL, this property is being updated. If NULL, update * all properties from the current values in the OF tree. * * Returns: * 0 - Success * -ENOMEM - Could not allocate memory for new devdata structure * -EINVAL - property value not found, new_prop is not a recognized * property for the device or property value is not valid. * -ENODEV - Device is not available */ static int nx842_OF_upd(struct property *new_prop) { struct nx842_devdata *old_devdata = NULL; struct nx842_devdata *new_devdata = NULL; struct device_node *of_node = NULL; struct property *status = NULL; struct property *maxsglen = NULL; struct property *maxsyncop = NULL; int ret = 0; unsigned long flags; new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); if (!new_devdata) return -ENOMEM; spin_lock_irqsave(&devdata_mutex, flags); old_devdata = rcu_dereference_check(devdata, lockdep_is_held(&devdata_mutex)); if (old_devdata) of_node = old_devdata->dev->of_node; if (!old_devdata || !of_node) { pr_err("%s: device is not available\n", __func__); spin_unlock_irqrestore(&devdata_mutex, flags); kfree(new_devdata); return -ENODEV; } memcpy(new_devdata, old_devdata, sizeof(*old_devdata)); new_devdata->counters = old_devdata->counters; /* Set ptrs for existing properties */ status = of_find_property(of_node, "status", NULL); maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL); maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL); if (!status || !maxsglen || !maxsyncop) { dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__); ret = -EINVAL; goto error_out; } /* * If this is a property update, there are only certain properties that * we care about. Bail if it isn't in the below list */ if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) || strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) || strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length))) goto out; /* Perform property updates */ ret = nx842_OF_upd_status(new_devdata, status); if (ret) goto error_out; ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen); if (ret) goto error_out; ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop); if (ret) goto error_out; out: dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n", __func__, new_devdata->max_sync_size, old_devdata->max_sync_size); dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n", __func__, new_devdata->max_sync_sg, old_devdata->max_sync_sg); dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n", __func__, new_devdata->max_sg_len, old_devdata->max_sg_len); rcu_assign_pointer(devdata, new_devdata); spin_unlock_irqrestore(&devdata_mutex, flags); synchronize_rcu(); dev_set_drvdata(new_devdata->dev, new_devdata); kfree(old_devdata); return 0; error_out: if (new_devdata) { dev_info(old_devdata->dev, "%s: device disabled\n", __func__); nx842_OF_set_defaults(new_devdata); rcu_assign_pointer(devdata, new_devdata); spin_unlock_irqrestore(&devdata_mutex, flags); synchronize_rcu(); dev_set_drvdata(new_devdata->dev, new_devdata); kfree(old_devdata); } else { dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__); spin_unlock_irqrestore(&devdata_mutex, flags); } if (!ret) ret = -EINVAL; return ret; } /** * nx842_OF_notifier - Process updates to OF properties for the device * * @np: notifier block * @action: notifier action * @data: struct of_reconfig_data pointer * * Returns: * NOTIFY_OK on success * NOTIFY_BAD encoded with error number on failure, use * notifier_to_errno() to decode this value */ static int nx842_OF_notifier(struct notifier_block *np, unsigned long action, void *data) { struct of_reconfig_data *upd = data; struct nx842_devdata *local_devdata; struct device_node *node = NULL; rcu_read_lock(); local_devdata = rcu_dereference(devdata); if (local_devdata) node = local_devdata->dev->of_node; if (local_devdata && action == OF_RECONFIG_UPDATE_PROPERTY && !strcmp(upd->dn->name, node->name)) { rcu_read_unlock(); nx842_OF_upd(upd->prop); } else rcu_read_unlock(); return NOTIFY_OK; } static struct notifier_block nx842_of_nb = { .notifier_call = nx842_OF_notifier, }; #define nx842_counter_read(_name) \ static ssize_t nx842_##_name##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) { \ struct nx842_devdata *local_devdata; \ int p = 0; \ rcu_read_lock(); \ local_devdata = rcu_dereference(devdata); \ if (local_devdata) \ p = snprintf(buf, PAGE_SIZE, "%lld\n", \ atomic64_read(&local_devdata->counters->_name)); \ rcu_read_unlock(); \ return p; \ } #define NX842DEV_COUNTER_ATTR_RO(_name) \ nx842_counter_read(_name); \ static struct device_attribute dev_attr_##_name = __ATTR(_name, \ 0444, \ nx842_##_name##_show,\ NULL); NX842DEV_COUNTER_ATTR_RO(comp_complete); NX842DEV_COUNTER_ATTR_RO(comp_failed); NX842DEV_COUNTER_ATTR_RO(decomp_complete); NX842DEV_COUNTER_ATTR_RO(decomp_failed); NX842DEV_COUNTER_ATTR_RO(swdecomp); static ssize_t nx842_timehist_show(struct device *, struct device_attribute *, char *); static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444, nx842_timehist_show, NULL); static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times, 0444, nx842_timehist_show, NULL); static ssize_t nx842_timehist_show(struct device *dev, struct device_attribute *attr, char *buf) { char *p = buf; struct nx842_devdata *local_devdata; atomic64_t *times; int bytes_remain = PAGE_SIZE; int bytes; int i; rcu_read_lock(); local_devdata = rcu_dereference(devdata); if (!local_devdata) { rcu_read_unlock(); return 0; } if (attr == &dev_attr_comp_times) times = local_devdata->counters->comp_times; else if (attr == &dev_attr_decomp_times) times = local_devdata->counters->decomp_times; else { rcu_read_unlock(); return 0; } for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) { bytes = snprintf(p, bytes_remain, "%u-%uus:\t%lld\n", i ? (2<<(i-1)) : 0, (2<<i)-1, atomic64_read(&times[i])); bytes_remain -= bytes; p += bytes; } /* The last bucket holds everything over * 2<<(NX842_HIST_SLOTS - 2) us */ bytes = snprintf(p, bytes_remain, "%uus - :\t%lld\n", 2<<(NX842_HIST_SLOTS - 2), atomic64_read(&times[(NX842_HIST_SLOTS - 1)])); p += bytes; rcu_read_unlock(); return p - buf; } static struct attribute *nx842_sysfs_entries[] = { &dev_attr_comp_complete.attr, &dev_attr_comp_failed.attr, &dev_attr_decomp_complete.attr, &dev_attr_decomp_failed.attr, &dev_attr_swdecomp.attr, &dev_attr_comp_times.attr, &dev_attr_decomp_times.attr, NULL, }; static const struct attribute_group nx842_attribute_group = { .name = NULL, /* put in device directory */ .attrs = nx842_sysfs_entries, }; #define nxcop_caps_read(_name) \ static ssize_t nxcop_##_name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ return sprintf(buf, "%lld\n", nx_cop_caps._name); \ } #define NXCT_ATTR_RO(_name) \ nxcop_caps_read(_name); \ static struct device_attribute dev_attr_##_name = __ATTR(_name, \ 0444, \ nxcop_##_name##_show, \ NULL); NXCT_ATTR_RO(req_max_processed_len); NXCT_ATTR_RO(min_compress_len); NXCT_ATTR_RO(min_decompress_len); static struct attribute *nxcop_caps_sysfs_entries[] = { &dev_attr_req_max_processed_len.attr, &dev_attr_min_compress_len.attr, &dev_attr_min_decompress_len.attr, NULL, }; static const struct attribute_group nxcop_caps_attr_group = { .name = "nx_gzip_caps", .attrs = nxcop_caps_sysfs_entries, }; static struct nx842_driver nx842_pseries_driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, .workmem_size = sizeof(struct nx842_workmem), .constraints = &nx842_pseries_constraints, .compress = nx842_pseries_compress, .decompress = nx842_pseries_decompress, }; static int nx842_pseries_crypto_init(struct crypto_tfm *tfm) { return nx842_crypto_init(tfm, &nx842_pseries_driver); } static struct crypto_alg nx842_pseries_alg = { .cra_name = "842", .cra_driver_name = "842-nx", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct nx842_crypto_ctx), .cra_module = THIS_MODULE, .cra_init = nx842_pseries_crypto_init, .cra_exit = nx842_crypto_exit, .cra_u = { .compress = { .coa_compress = nx842_crypto_compress, .coa_decompress = nx842_crypto_decompress } } }; static int nx842_probe(struct vio_dev *viodev, const struct vio_device_id *id) { struct nx842_devdata *old_devdata, *new_devdata = NULL; unsigned long flags; int ret = 0; new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS); if (!new_devdata) return -ENOMEM; new_devdata->counters = kzalloc(sizeof(*new_devdata->counters), GFP_NOFS); if (!new_devdata->counters) { kfree(new_devdata); return -ENOMEM; } spin_lock_irqsave(&devdata_mutex, flags); old_devdata = rcu_dereference_check(devdata, lockdep_is_held(&devdata_mutex)); if (old_devdata && old_devdata->vdev != NULL) { dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__); ret = -1; goto error_unlock; } dev_set_drvdata(&viodev->dev, NULL); new_devdata->vdev = viodev; new_devdata->dev = &viodev->dev; nx842_OF_set_defaults(new_devdata); rcu_assign_pointer(devdata, new_devdata); spin_unlock_irqrestore(&devdata_mutex, flags); synchronize_rcu(); kfree(old_devdata); of_reconfig_notifier_register(&nx842_of_nb); ret = nx842_OF_upd(NULL); if (ret) goto error; ret = crypto_register_alg(&nx842_pseries_alg); if (ret) { dev_err(&viodev->dev, "could not register comp alg: %d\n", ret); goto error; } rcu_read_lock(); dev_set_drvdata(&viodev->dev, rcu_dereference(devdata)); rcu_read_unlock(); if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) { dev_err(&viodev->dev, "could not create sysfs device attributes\n"); ret = -1; goto error; } if (caps_feat) { if (sysfs_create_group(&viodev->dev.kobj, &nxcop_caps_attr_group)) { dev_err(&viodev->dev, "Could not create sysfs NX capability entries\n"); ret = -1; goto error; } } return 0; error_unlock: spin_unlock_irqrestore(&devdata_mutex, flags); if (new_devdata) kfree(new_devdata->counters); kfree(new_devdata); error: return ret; } static void nx842_remove(struct vio_dev *viodev) { struct nx842_devdata *old_devdata; unsigned long flags; pr_info("Removing IBM Power 842 compression device\n"); sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group); if (caps_feat) sysfs_remove_group(&viodev->dev.kobj, &nxcop_caps_attr_group); crypto_unregister_alg(&nx842_pseries_alg); spin_lock_irqsave(&devdata_mutex, flags); old_devdata = rcu_dereference_check(devdata, lockdep_is_held(&devdata_mutex)); of_reconfig_notifier_unregister(&nx842_of_nb); RCU_INIT_POINTER(devdata, NULL); spin_unlock_irqrestore(&devdata_mutex, flags); synchronize_rcu(); dev_set_drvdata(&viodev->dev, NULL); if (old_devdata) kfree(old_devdata->counters); kfree(old_devdata); } /* * Get NX capabilities from the hypervisor. * Only NXGZIP capabilities are provided by the hypersvisor right * now and these values are available to user space with sysfs. */ static void __init nxcop_get_capabilities(void) { struct hv_vas_all_caps *hv_caps; struct hv_nx_cop_caps *hv_nxc; int rc; hv_caps = kmalloc(sizeof(*hv_caps), GFP_KERNEL); if (!hv_caps) return; /* * Get NX overall capabilities with feature type=0 */ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, 0, (u64)virt_to_phys(hv_caps)); if (rc) goto out; caps_feat = be64_to_cpu(hv_caps->feat_type); /* * NX-GZIP feature available */ if (caps_feat & VAS_NX_GZIP_FEAT_BIT) { hv_nxc = kmalloc(sizeof(*hv_nxc), GFP_KERNEL); if (!hv_nxc) goto out; /* * Get capabilities for NX-GZIP feature */ rc = h_query_vas_capabilities(H_QUERY_NX_CAPABILITIES, VAS_NX_GZIP_FEAT, (u64)virt_to_phys(hv_nxc)); } else { pr_err("NX-GZIP feature is not available\n"); rc = -EINVAL; } if (!rc) { nx_cop_caps.descriptor = be64_to_cpu(hv_nxc->descriptor); nx_cop_caps.req_max_processed_len = be64_to_cpu(hv_nxc->req_max_processed_len); nx_cop_caps.min_compress_len = be64_to_cpu(hv_nxc->min_compress_len); nx_cop_caps.min_decompress_len = be64_to_cpu(hv_nxc->min_decompress_len); } else { caps_feat = 0; } kfree(hv_nxc); out: kfree(hv_caps); } static const struct vio_device_id nx842_vio_driver_ids[] = { {"ibm,compression-v1", "ibm,compression"}, {"", ""}, }; MODULE_DEVICE_TABLE(vio, nx842_vio_driver_ids); static struct vio_driver nx842_vio_driver = { .name = KBUILD_MODNAME, .probe = nx842_probe, .remove = nx842_remove, .get_desired_dma = nx842_get_desired_dma, .id_table = nx842_vio_driver_ids, }; static int __init nx842_pseries_init(void) { struct nx842_devdata *new_devdata; struct device_node *np; int ret; np = of_find_compatible_node(NULL, NULL, "ibm,compression"); if (!np) return -ENODEV; of_node_put(np); RCU_INIT_POINTER(devdata, NULL); new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL); if (!new_devdata) return -ENOMEM; RCU_INIT_POINTER(devdata, new_devdata); /* * Get NX capabilities from the hypervisor. */ nxcop_get_capabilities(); ret = vio_register_driver(&nx842_vio_driver); if (ret) { pr_err("Could not register VIO driver %d\n", ret); kfree(new_devdata); return ret; } ret = vas_register_api_pseries(THIS_MODULE, VAS_COP_TYPE_GZIP, "nx-gzip"); if (ret) pr_err("NX-GZIP is not supported. Returned=%d\n", ret); return 0; } module_init(nx842_pseries_init); static void __exit nx842_pseries_exit(void) { struct nx842_devdata *old_devdata; unsigned long flags; vas_unregister_api_pseries(); crypto_unregister_alg(&nx842_pseries_alg); spin_lock_irqsave(&devdata_mutex, flags); old_devdata = rcu_dereference_check(devdata, lockdep_is_held(&devdata_mutex)); RCU_INIT_POINTER(devdata, NULL); spin_unlock_irqrestore(&devdata_mutex, flags); synchronize_rcu(); if (old_devdata && old_devdata->dev) dev_set_drvdata(old_devdata->dev, NULL); kfree(old_devdata); vio_unregister_driver(&nx842_vio_driver); } module_exit(nx842_pseries_exit);
linux-master
drivers/crypto/nx/nx-common-pseries.c
// SPDX-License-Identifier: GPL-2.0-only /* * AES CBC routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. * * Author: Kent Yoder <[email protected]> */ #include <crypto/aes.h> #include <crypto/algapi.h> #include <linux/module.h> #include <linux/types.h> #include <linux/crypto.h> #include <asm/vio.h> #include "nx_csbcpb.h" #include "nx.h" static int cbc_aes_nx_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; nx_ctx_init(nx_ctx, HCOP_FC_AES); switch (key_len) { case AES_KEYSIZE_128: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; break; case AES_KEYSIZE_192: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; break; case AES_KEYSIZE_256: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; break; default: return -EINVAL; } csbcpb->cpb.hdr.mode = NX_MODE_AES_CBC; memcpy(csbcpb->cpb.aes_cbc.key, in_key, key_len); return 0; } static int cbc_aes_nx_crypt(struct skcipher_request *req, int enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned long irq_flags; unsigned int processed = 0, to_process; int rc; spin_lock_irqsave(&nx_ctx->lock, irq_flags); if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; do { to_process = req->cryptlen - processed; rc = nx_build_sg_lists(nx_ctx, req->iv, req->dst, req->src, &to_process, processed, csbcpb->cpb.aes_cbc.iv); if (rc) goto out; if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; memcpy(req->iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count), &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < req->cryptlen); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int cbc_aes_nx_encrypt(struct skcipher_request *req) { return cbc_aes_nx_crypt(req, 1); } static int cbc_aes_nx_decrypt(struct skcipher_request *req) { return cbc_aes_nx_crypt(req, 0); } struct skcipher_alg nx_cbc_aes_alg = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-nx", .base.cra_priority = 300, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct nx_crypto_ctx), .base.cra_alignmask = 0xf, .base.cra_module = THIS_MODULE, .init = nx_crypto_ctx_aes_cbc_init, .exit = nx_crypto_ctx_skcipher_exit, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = cbc_aes_nx_set_key, .encrypt = cbc_aes_nx_encrypt, .decrypt = cbc_aes_nx_decrypt, };
linux-master
drivers/crypto/nx/nx-aes-cbc.c
// SPDX-License-Identifier: GPL-2.0-only /* * AES CTR routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. * * Author: Kent Yoder <[email protected]> */ #include <crypto/aes.h> #include <crypto/ctr.h> #include <crypto/algapi.h> #include <linux/module.h> #include <linux/types.h> #include <linux/crypto.h> #include <asm/vio.h> #include "nx_csbcpb.h" #include "nx.h" static int ctr_aes_nx_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; nx_ctx_init(nx_ctx, HCOP_FC_AES); switch (key_len) { case AES_KEYSIZE_128: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; break; case AES_KEYSIZE_192: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; break; case AES_KEYSIZE_256: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; break; default: return -EINVAL; } csbcpb->cpb.hdr.mode = NX_MODE_AES_CTR; memcpy(csbcpb->cpb.aes_ctr.key, in_key, key_len); return 0; } static int ctr3686_aes_nx_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm); if (key_len < CTR_RFC3686_NONCE_SIZE) return -EINVAL; memcpy(nx_ctx->priv.ctr.nonce, in_key + key_len - CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); key_len -= CTR_RFC3686_NONCE_SIZE; return ctr_aes_nx_set_key(tfm, in_key, key_len); } static int ctr_aes_nx_crypt(struct skcipher_request *req, u8 *iv) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned long irq_flags; unsigned int processed = 0, to_process; int rc; spin_lock_irqsave(&nx_ctx->lock, irq_flags); do { to_process = req->cryptlen - processed; rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src, &to_process, processed, csbcpb->cpb.aes_ctr.iv); if (rc) goto out; if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; memcpy(iv, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count), &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < req->cryptlen); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int ctr3686_aes_nx_crypt(struct skcipher_request *req) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm); u8 iv[16]; memcpy(iv, nx_ctx->priv.ctr.nonce, CTR_RFC3686_NONCE_SIZE); memcpy(iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE); iv[12] = iv[13] = iv[14] = 0; iv[15] = 1; return ctr_aes_nx_crypt(req, iv); } struct skcipher_alg nx_ctr3686_aes_alg = { .base.cra_name = "rfc3686(ctr(aes))", .base.cra_driver_name = "rfc3686-ctr-aes-nx", .base.cra_priority = 300, .base.cra_blocksize = 1, .base.cra_ctxsize = sizeof(struct nx_crypto_ctx), .base.cra_module = THIS_MODULE, .init = nx_crypto_ctx_aes_ctr_init, .exit = nx_crypto_ctx_skcipher_exit, .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .ivsize = CTR_RFC3686_IV_SIZE, .setkey = ctr3686_aes_nx_set_key, .encrypt = ctr3686_aes_nx_crypt, .decrypt = ctr3686_aes_nx_crypt, .chunksize = AES_BLOCK_SIZE, };
linux-master
drivers/crypto/nx/nx-aes-ctr.c
// SPDX-License-Identifier: GPL-2.0-only /* * AES CCM routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2012 International Business Machines Inc. * * Author: Kent Yoder <[email protected]> */ #include <crypto/internal/aead.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/scatterwalk.h> #include <linux/module.h> #include <linux/types.h> #include <linux/crypto.h> #include <asm/vio.h> #include "nx_csbcpb.h" #include "nx.h" static int ccm_aes_nx_set_key(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead; nx_ctx_init(nx_ctx, HCOP_FC_AES); switch (key_len) { case AES_KEYSIZE_128: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; break; default: return -EINVAL; } csbcpb->cpb.hdr.mode = NX_MODE_AES_CCM; memcpy(csbcpb->cpb.aes_ccm.key, in_key, key_len); csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_CCA; memcpy(csbcpb_aead->cpb.aes_cca.key, in_key, key_len); return 0; } static int ccm4309_aes_nx_set_key(struct crypto_aead *tfm, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&tfm->base); if (key_len < 3) return -EINVAL; key_len -= 3; memcpy(nx_ctx->priv.ccm.nonce, in_key + key_len, 3); return ccm_aes_nx_set_key(tfm, in_key, key_len); } static int ccm_aes_nx_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { switch (authsize) { case 4: case 6: case 8: case 10: case 12: case 14: case 16: break; default: return -EINVAL; } return 0; } static int ccm4309_aes_nx_setauthsize(struct crypto_aead *tfm, unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } return 0; } /* taken from crypto/ccm.c */ static int set_msg_len(u8 *block, unsigned int msglen, int csize) { __be32 data; memset(block, 0, csize); block += csize; if (csize >= 4) csize = 4; else if (msglen > (unsigned int)(1 << (8 * csize))) return -EOVERFLOW; data = cpu_to_be32(msglen); memcpy(block - csize, (u8 *)&data + 4 - csize, csize); return 0; } /* taken from crypto/ccm.c */ static inline int crypto_ccm_check_iv(const u8 *iv) { /* 2 <= L <= 8, so 1 <= L' <= 7. */ if (1 > iv[0] || iv[0] > 7) return -EINVAL; return 0; } /* based on code from crypto/ccm.c */ static int generate_b0(u8 *iv, unsigned int assoclen, unsigned int authsize, unsigned int cryptlen, u8 *b0) { unsigned int l, lp, m = authsize; memcpy(b0, iv, 16); lp = b0[0]; l = lp + 1; /* set m, bits 3-5 */ *b0 |= (8 * ((m - 2) / 2)); /* set adata, bit 6, if associated data is used */ if (assoclen) *b0 |= 64; return set_msg_len(b0 + 16 - l, cryptlen, l); } static int generate_pat(u8 *iv, struct aead_request *req, struct nx_crypto_ctx *nx_ctx, unsigned int authsize, unsigned int nbytes, unsigned int assoclen, u8 *out) { struct nx_sg *nx_insg = nx_ctx->in_sg; struct nx_sg *nx_outsg = nx_ctx->out_sg; unsigned int iauth_len = 0; u8 tmp[16], *b1 = NULL, *b0 = NULL, *result = NULL; int rc; unsigned int max_sg_len; /* zero the ctr value */ memset(iv + 15 - iv[0], 0, iv[0] + 1); /* page 78 of nx_wb.pdf has, * Note: RFC3610 allows the AAD data to be up to 2^64 -1 bytes * in length. If a full message is used, the AES CCA implementation * restricts the maximum AAD length to 2^32 -1 bytes. * If partial messages are used, the implementation supports * 2^64 -1 bytes maximum AAD length. * * However, in the cryptoapi's aead_request structure, * assoclen is an unsigned int, thus it cannot hold a length * value greater than 2^32 - 1. * Thus the AAD is further constrained by this and is never * greater than 2^32. */ if (!assoclen) { b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; } else if (assoclen <= 14) { /* if associated data is 14 bytes or less, we do 1 GCM * operation on 2 AES blocks, B0 (stored in the csbcpb) and B1, * which is fed in through the source buffers here */ b0 = nx_ctx->csbcpb->cpb.aes_ccm.in_pat_or_b0; b1 = nx_ctx->priv.ccm.iauth_tag; iauth_len = assoclen; } else if (assoclen <= 65280) { /* if associated data is less than (2^16 - 2^8), we construct * B1 differently and feed in the associated data to a CCA * operation */ b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; iauth_len = 14; } else { b0 = nx_ctx->csbcpb_aead->cpb.aes_cca.b0; b1 = nx_ctx->csbcpb_aead->cpb.aes_cca.b1; iauth_len = 10; } /* generate B0 */ rc = generate_b0(iv, assoclen, authsize, nbytes, b0); if (rc) return rc; /* generate B1: * add control info for associated data * RFC 3610 and NIST Special Publication 800-38C */ if (b1) { memset(b1, 0, 16); if (assoclen <= 65280) { *(u16 *)b1 = assoclen; scatterwalk_map_and_copy(b1 + 2, req->src, 0, iauth_len, SCATTERWALK_FROM_SG); } else { *(u16 *)b1 = (u16)(0xfffe); *(u32 *)&b1[2] = assoclen; scatterwalk_map_and_copy(b1 + 6, req->src, 0, iauth_len, SCATTERWALK_FROM_SG); } } /* now copy any remaining AAD to scatterlist and call nx... */ if (!assoclen) { return rc; } else if (assoclen <= 14) { unsigned int len = 16; nx_insg = nx_build_sg_list(nx_insg, b1, &len, nx_ctx->ap->sglen); if (len != 16) return -EINVAL; nx_outsg = nx_build_sg_list(nx_outsg, tmp, &len, nx_ctx->ap->sglen); if (len != 16) return -EINVAL; /* inlen should be negative, indicating to phyp that its a * pointer to an sg list */ nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_ENDE_ENCRYPT; NX_CPB_FDM(nx_ctx->csbcpb) |= NX_FDM_INTERMEDIATE; result = nx_ctx->csbcpb->cpb.aes_ccm.out_pat_or_mac; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) return rc; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(assoclen, &nx_ctx->stats->aes_bytes); } else { unsigned int processed = 0, to_process; processed += iauth_len; /* page_limit: number of sg entries that fit on one page */ max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_driver.of.max_sg_len/sizeof(struct nx_sg)); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); do { to_process = min_t(u32, assoclen - processed, nx_ctx->ap->databytelen); nx_insg = nx_walk_and_build(nx_ctx->in_sg, nx_ctx->ap->sglen, req->src, processed, &to_process); if ((to_process + processed) < assoclen) { NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_INTERMEDIATE; } else { NX_CPB_FDM(nx_ctx->csbcpb_aead) &= ~NX_FDM_INTERMEDIATE; } nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) return rc; memcpy(nx_ctx->csbcpb_aead->cpb.aes_cca.b0, nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0, AES_BLOCK_SIZE); NX_CPB_FDM(nx_ctx->csbcpb_aead) |= NX_FDM_CONTINUATION; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(assoclen, &nx_ctx->stats->aes_bytes); processed += to_process; } while (processed < assoclen); result = nx_ctx->csbcpb_aead->cpb.aes_cca.out_pat_or_b0; } memcpy(out, result, AES_BLOCK_SIZE); return rc; } static int ccm_nx_decrypt(struct aead_request *req, u8 *iv, unsigned int assoclen) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned int nbytes = req->cryptlen; unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); struct nx_ccm_priv *priv = &nx_ctx->priv.ccm; unsigned long irq_flags; unsigned int processed = 0, to_process; int rc = -1; spin_lock_irqsave(&nx_ctx->lock, irq_flags); nbytes -= authsize; /* copy out the auth tag to compare with later */ scatterwalk_map_and_copy(priv->oauth_tag, req->src, nbytes + req->assoclen, authsize, SCATTERWALK_FROM_SG); rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen, csbcpb->cpb.aes_ccm.in_pat_or_b0); if (rc) goto out; do { /* to_process: the AES_BLOCK_SIZE data chunk to process in this * update. This value is bound by sg list limits. */ to_process = nbytes - processed; if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(nx_ctx->csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src, &to_process, processed + req->assoclen, csbcpb->cpb.aes_ccm.iv_or_ctr); if (rc) goto out; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; /* for partial completion, copy following for next * entry into loop... */ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_ccm.in_s0, csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; /* update stats */ atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count), &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); rc = crypto_memneq(csbcpb->cpb.aes_ccm.out_pat_or_mac, priv->oauth_tag, authsize) ? -EBADMSG : 0; out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int ccm_nx_encrypt(struct aead_request *req, u8 *iv, unsigned int assoclen) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned int nbytes = req->cryptlen; unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); unsigned long irq_flags; unsigned int processed = 0, to_process; int rc = -1; spin_lock_irqsave(&nx_ctx->lock, irq_flags); rc = generate_pat(iv, req, nx_ctx, authsize, nbytes, assoclen, csbcpb->cpb.aes_ccm.in_pat_or_b0); if (rc) goto out; do { /* to process: the AES_BLOCK_SIZE data chunk to process in this * update. This value is bound by sg list limits. */ to_process = nbytes - processed; if ((to_process + processed) < nbytes) NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; rc = nx_build_sg_lists(nx_ctx, iv, req->dst, req->src, &to_process, processed + req->assoclen, csbcpb->cpb.aes_ccm.iv_or_ctr); if (rc) goto out; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; /* for partial completion, copy following for next * entry into loop... */ memcpy(iv, csbcpb->cpb.aes_ccm.out_ctr, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_ccm.in_pat_or_b0, csbcpb->cpb.aes_ccm.out_pat_or_mac, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_ccm.in_s0, csbcpb->cpb.aes_ccm.out_s0, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; /* update stats */ atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count), &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < nbytes); /* copy out the auth tag */ scatterwalk_map_and_copy(csbcpb->cpb.aes_ccm.out_pat_or_mac, req->dst, nbytes + req->assoclen, authsize, SCATTERWALK_TO_SG); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int ccm4309_aes_nx_encrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_gcm_rctx *rctx = aead_request_ctx(req); u8 *iv = rctx->iv; iv[0] = 3; memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); memcpy(iv + 4, req->iv, 8); return ccm_nx_encrypt(req, iv, req->assoclen - 8); } static int ccm_aes_nx_encrypt(struct aead_request *req) { int rc; rc = crypto_ccm_check_iv(req->iv); if (rc) return rc; return ccm_nx_encrypt(req, req->iv, req->assoclen); } static int ccm4309_aes_nx_decrypt(struct aead_request *req) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_gcm_rctx *rctx = aead_request_ctx(req); u8 *iv = rctx->iv; iv[0] = 3; memcpy(iv + 1, nx_ctx->priv.ccm.nonce, 3); memcpy(iv + 4, req->iv, 8); return ccm_nx_decrypt(req, iv, req->assoclen - 8); } static int ccm_aes_nx_decrypt(struct aead_request *req) { int rc; rc = crypto_ccm_check_iv(req->iv); if (rc) return rc; return ccm_nx_decrypt(req, req->iv, req->assoclen); } struct aead_alg nx_ccm_aes_alg = { .base = { .cra_name = "ccm(aes)", .cra_driver_name = "ccm-aes-nx", .cra_priority = 300, .cra_flags = CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_module = THIS_MODULE, }, .init = nx_crypto_ctx_aes_ccm_init, .exit = nx_crypto_ctx_aead_exit, .ivsize = AES_BLOCK_SIZE, .maxauthsize = AES_BLOCK_SIZE, .setkey = ccm_aes_nx_set_key, .setauthsize = ccm_aes_nx_setauthsize, .encrypt = ccm_aes_nx_encrypt, .decrypt = ccm_aes_nx_decrypt, }; struct aead_alg nx_ccm4309_aes_alg = { .base = { .cra_name = "rfc4309(ccm(aes))", .cra_driver_name = "rfc4309-ccm-aes-nx", .cra_priority = 300, .cra_flags = CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_module = THIS_MODULE, }, .init = nx_crypto_ctx_aes_ccm_init, .exit = nx_crypto_ctx_aead_exit, .ivsize = 8, .maxauthsize = AES_BLOCK_SIZE, .setkey = ccm4309_aes_nx_set_key, .setauthsize = ccm4309_aes_nx_setauthsize, .encrypt = ccm4309_aes_nx_encrypt, .decrypt = ccm4309_aes_nx_decrypt, };
linux-master
drivers/crypto/nx/nx-aes-ccm.c
// SPDX-License-Identifier: GPL-2.0-only /* * AES ECB routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. * * Author: Kent Yoder <[email protected]> */ #include <crypto/aes.h> #include <crypto/algapi.h> #include <linux/module.h> #include <linux/types.h> #include <linux/crypto.h> #include <asm/vio.h> #include "nx_csbcpb.h" #include "nx.h" static int ecb_aes_nx_set_key(struct crypto_skcipher *tfm, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; nx_ctx_init(nx_ctx, HCOP_FC_AES); switch (key_len) { case AES_KEYSIZE_128: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; break; case AES_KEYSIZE_192: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192]; break; case AES_KEYSIZE_256: NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256); nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256]; break; default: return -EINVAL; } csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; memcpy(csbcpb->cpb.aes_ecb.key, in_key, key_len); return 0; } static int ecb_aes_nx_crypt(struct skcipher_request *req, int enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct nx_crypto_ctx *nx_ctx = crypto_skcipher_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; unsigned long irq_flags; unsigned int processed = 0, to_process; int rc; spin_lock_irqsave(&nx_ctx->lock, irq_flags); if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; do { to_process = req->cryptlen - processed; rc = nx_build_sg_lists(nx_ctx, NULL, req->dst, req->src, &to_process, processed, NULL); if (rc) goto out; if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count), &(nx_ctx->stats->aes_bytes)); processed += to_process; } while (processed < req->cryptlen); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int ecb_aes_nx_encrypt(struct skcipher_request *req) { return ecb_aes_nx_crypt(req, 1); } static int ecb_aes_nx_decrypt(struct skcipher_request *req) { return ecb_aes_nx_crypt(req, 0); } struct skcipher_alg nx_ecb_aes_alg = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-aes-nx", .base.cra_priority = 300, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_alignmask = 0xf, .base.cra_ctxsize = sizeof(struct nx_crypto_ctx), .base.cra_module = THIS_MODULE, .init = nx_crypto_ctx_aes_ecb_init, .exit = nx_crypto_ctx_skcipher_exit, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = ecb_aes_nx_set_key, .encrypt = ecb_aes_nx_encrypt, .decrypt = ecb_aes_nx_decrypt, };
linux-master
drivers/crypto/nx/nx-aes-ecb.c
// SPDX-License-Identifier: GPL-2.0-only /* * SHA-256 routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. * * Author: Kent Yoder <[email protected]> */ #include <crypto/internal/hash.h> #include <crypto/sha2.h> #include <linux/module.h> #include <asm/vio.h> #include <asm/byteorder.h> #include "nx_csbcpb.h" #include "nx.h" struct sha256_state_be { __be32 state[SHA256_DIGEST_SIZE / 4]; u64 count; u8 buf[SHA256_BLOCK_SIZE]; }; static int nx_crypto_ctx_sha256_init(struct crypto_tfm *tfm) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); int err; err = nx_crypto_ctx_sha_init(tfm); if (err) return err; nx_ctx_init(nx_ctx, HCOP_FC_SHA); nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA256]; NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA256); return 0; } static int nx_sha256_init(struct shash_desc *desc) { struct sha256_state_be *sctx = shash_desc_ctx(desc); memset(sctx, 0, sizeof *sctx); sctx->state[0] = __cpu_to_be32(SHA256_H0); sctx->state[1] = __cpu_to_be32(SHA256_H1); sctx->state[2] = __cpu_to_be32(SHA256_H2); sctx->state[3] = __cpu_to_be32(SHA256_H3); sctx->state[4] = __cpu_to_be32(SHA256_H4); sctx->state[5] = __cpu_to_be32(SHA256_H5); sctx->state[6] = __cpu_to_be32(SHA256_H6); sctx->state[7] = __cpu_to_be32(SHA256_H7); sctx->count = 0; return 0; } static int nx_sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha256_state_be *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *out_sg; u64 to_process = 0, leftover, total; unsigned long irq_flags; int rc = 0; int data_len; u32 max_sg_len; u64 buf_len = (sctx->count % SHA256_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); /* 2 cases for total data len: * 1: < SHA256_BLOCK_SIZE: copy into state, return 0 * 2: >= SHA256_BLOCK_SIZE: process X blocks, copy in leftover */ total = (sctx->count % SHA256_BLOCK_SIZE) + len; if (total < SHA256_BLOCK_SIZE) { memcpy(sctx->buf + buf_len, data, len); sctx->count += len; goto out; } memcpy(csbcpb->cpb.sha256.message_digest, sctx->state, SHA256_DIGEST_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_driver.of.max_sg_len/sizeof(struct nx_sg)); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); data_len = SHA256_DIGEST_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, &data_len, max_sg_len); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (data_len != SHA256_DIGEST_SIZE) { rc = -EINVAL; goto out; } do { int used_sgs = 0; struct nx_sg *in_sg = nx_ctx->in_sg; if (buf_len) { data_len = buf_len; in_sg = nx_build_sg_list(in_sg, (u8 *) sctx->buf, &data_len, max_sg_len); if (data_len != buf_len) { rc = -EINVAL; goto out; } used_sgs = in_sg - nx_ctx->in_sg; } /* to_process: SHA256_BLOCK_SIZE aligned chunk to be * processed in this iteration. This value is restricted * by sg list limits and number of sgs we already used * for leftover data. (see above) * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len, * but because data may not be aligned, we need to account * for that too. */ to_process = min_t(u64, total, (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE); to_process = to_process & ~(SHA256_BLOCK_SIZE - 1); data_len = to_process - buf_len; in_sg = nx_build_sg_list(in_sg, (u8 *) data, &data_len, max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); to_process = data_len + buf_len; leftover = total - to_process; /* * we've hit the nx chip previously and we're updating * again, so copy over the partial digest. */ memcpy(csbcpb->cpb.sha256.input_partial_digest, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); if (rc) goto out; atomic_inc(&(nx_ctx->stats->sha256_ops)); total -= to_process; data += to_process - buf_len; buf_len = 0; } while (leftover >= SHA256_BLOCK_SIZE); /* copy the leftover back into the state struct */ if (leftover) memcpy(sctx->buf, data, leftover); sctx->count += len; memcpy(sctx->state, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int nx_sha256_final(struct shash_desc *desc, u8 *out) { struct sha256_state_be *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; unsigned long irq_flags; u32 max_sg_len; int rc = 0; int len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_driver.of.max_sg_len/sizeof(struct nx_sg)); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); /* final is represented by continuing the operation and indicating that * this is not an intermediate operation */ if (sctx->count >= SHA256_BLOCK_SIZE) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ memcpy(csbcpb->cpb.sha256.input_partial_digest, sctx->state, SHA256_DIGEST_SIZE); NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; } else { NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; } csbcpb->cpb.sha256.message_bit_length = (u64) (sctx->count * 8); len = sctx->count & (SHA256_BLOCK_SIZE - 1); in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buf, &len, max_sg_len); if (len != (sctx->count & (SHA256_BLOCK_SIZE - 1))) { rc = -EINVAL; goto out; } len = SHA256_DIGEST_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len); if (len != SHA256_DIGEST_SIZE) { rc = -EINVAL; goto out; } nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (!nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); if (rc) goto out; atomic_inc(&(nx_ctx->stats->sha256_ops)); atomic64_add(sctx->count, &(nx_ctx->stats->sha256_bytes)); memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int nx_sha256_export(struct shash_desc *desc, void *out) { struct sha256_state_be *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int nx_sha256_import(struct shash_desc *desc, const void *in) { struct sha256_state_be *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } struct shash_alg nx_shash_sha256_alg = { .digestsize = SHA256_DIGEST_SIZE, .init = nx_sha256_init, .update = nx_sha256_update, .final = nx_sha256_final, .export = nx_sha256_export, .import = nx_sha256_import, .descsize = sizeof(struct sha256_state_be), .statesize = sizeof(struct sha256_state_be), .base = { .cra_name = "sha256", .cra_driver_name = "sha256-nx", .cra_priority = 300, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_init = nx_crypto_ctx_sha256_init, .cra_exit = nx_crypto_ctx_exit, } };
linux-master
drivers/crypto/nx/nx-sha256.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Cryptographic API for the NX-842 hardware compression. * * Copyright (C) IBM Corporation, 2011-2015 * * Designer of the Power data compression engine: * Bulent Abali <[email protected]> * * Original Authors: Robert Jennings <[email protected]> * Seth Jennings <[email protected]> * * Rewrite: Dan Streetman <[email protected]> * * This is an interface to the NX-842 compression hardware in PowerPC * processors. Most of the complexity of this drvier is due to the fact that * the NX-842 compression hardware requires the input and output data buffers * to be specifically aligned, to be a specific multiple in length, and within * specific minimum and maximum lengths. Those restrictions, provided by the * nx-842 driver via nx842_constraints, mean this driver must use bounce * buffers and headers to correct misaligned in or out buffers, and to split * input buffers that are too large. * * This driver will fall back to software decompression if the hardware * decompression fails, so this driver's decompression should never fail as * long as the provided compressed buffer is valid. Any compressed buffer * created by this driver will have a header (except ones where the input * perfectly matches the constraints); so users of this driver cannot simply * pass a compressed buffer created by this driver over to the 842 software * decompression library. Instead, users must use this driver to decompress; * if the hardware fails or is unavailable, the compressed buffer will be * parsed and the header removed, and the raw 842 buffer(s) passed to the 842 * software decompression library. * * This does not fall back to software compression, however, since the caller * of this function is specifically requesting hardware compression; if the * hardware compression fails, the caller can fall back to software * compression, and the raw 842 compressed buffer that the software compressor * creates can be passed to this driver for hardware decompression; any * buffer without our specific header magic is assumed to be a raw 842 buffer * and passed directly to the hardware. Note that the software compression * library will produce a compressed buffer that is incompatible with the * hardware decompressor if the original input buffer length is not a multiple * of 8; if such a compressed buffer is passed to this driver for * decompression, the hardware will reject it and this driver will then pass * it over to the software library for decompression. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/vmalloc.h> #include <linux/sw842.h> #include <linux/spinlock.h> #include "nx-842.h" /* The first 5 bits of this magic are 0x1f, which is an invalid 842 5-bit * template (see lib/842/842.h), so this magic number will never appear at * the start of a raw 842 compressed buffer. That is important, as any buffer * passed to us without this magic is assumed to be a raw 842 compressed * buffer, and passed directly to the hardware to decompress. */ #define NX842_CRYPTO_MAGIC (0xf842) #define NX842_CRYPTO_HEADER_SIZE(g) \ (sizeof(struct nx842_crypto_header) + \ sizeof(struct nx842_crypto_header_group) * (g)) #define NX842_CRYPTO_HEADER_MAX_SIZE \ NX842_CRYPTO_HEADER_SIZE(NX842_CRYPTO_GROUP_MAX) /* bounce buffer size */ #define BOUNCE_BUFFER_ORDER (2) #define BOUNCE_BUFFER_SIZE \ ((unsigned int)(PAGE_SIZE << BOUNCE_BUFFER_ORDER)) /* try longer on comp because we can fallback to sw decomp if hw is busy */ #define COMP_BUSY_TIMEOUT (250) /* ms */ #define DECOMP_BUSY_TIMEOUT (50) /* ms */ struct nx842_crypto_param { u8 *in; unsigned int iremain; u8 *out; unsigned int oremain; unsigned int ototal; }; static int update_param(struct nx842_crypto_param *p, unsigned int slen, unsigned int dlen) { if (p->iremain < slen) return -EOVERFLOW; if (p->oremain < dlen) return -ENOSPC; p->in += slen; p->iremain -= slen; p->out += dlen; p->oremain -= dlen; p->ototal += dlen; return 0; } int nx842_crypto_init(struct crypto_tfm *tfm, struct nx842_driver *driver) { struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); spin_lock_init(&ctx->lock); ctx->driver = driver; ctx->wmem = kmalloc(driver->workmem_size, GFP_KERNEL); ctx->sbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); ctx->dbounce = (u8 *)__get_free_pages(GFP_KERNEL, BOUNCE_BUFFER_ORDER); if (!ctx->wmem || !ctx->sbounce || !ctx->dbounce) { kfree(ctx->wmem); free_page((unsigned long)ctx->sbounce); free_page((unsigned long)ctx->dbounce); return -ENOMEM; } return 0; } EXPORT_SYMBOL_GPL(nx842_crypto_init); void nx842_crypto_exit(struct crypto_tfm *tfm) { struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); kfree(ctx->wmem); free_page((unsigned long)ctx->sbounce); free_page((unsigned long)ctx->dbounce); } EXPORT_SYMBOL_GPL(nx842_crypto_exit); static void check_constraints(struct nx842_constraints *c) { /* limit maximum, to always have enough bounce buffer to decompress */ if (c->maximum > BOUNCE_BUFFER_SIZE) c->maximum = BOUNCE_BUFFER_SIZE; } static int nx842_crypto_add_header(struct nx842_crypto_header *hdr, u8 *buf) { int s = NX842_CRYPTO_HEADER_SIZE(hdr->groups); /* compress should have added space for header */ if (s > be16_to_cpu(hdr->group[0].padding)) { pr_err("Internal error: no space for header\n"); return -EINVAL; } memcpy(buf, hdr, s); print_hex_dump_debug("header ", DUMP_PREFIX_OFFSET, 16, 1, buf, s, 0); return 0; } static int compress(struct nx842_crypto_ctx *ctx, struct nx842_crypto_param *p, struct nx842_crypto_header_group *g, struct nx842_constraints *c, u16 *ignore, unsigned int hdrsize) { unsigned int slen = p->iremain, dlen = p->oremain, tmplen; unsigned int adj_slen = slen; u8 *src = p->in, *dst = p->out; int ret, dskip = 0; ktime_t timeout; if (p->iremain == 0) return -EOVERFLOW; if (p->oremain == 0 || hdrsize + c->minimum > dlen) return -ENOSPC; if (slen % c->multiple) adj_slen = round_up(slen, c->multiple); if (slen < c->minimum) adj_slen = c->minimum; if (slen > c->maximum) adj_slen = slen = c->maximum; if (adj_slen > slen || (u64)src % c->alignment) { adj_slen = min(adj_slen, BOUNCE_BUFFER_SIZE); slen = min(slen, BOUNCE_BUFFER_SIZE); if (adj_slen > slen) memset(ctx->sbounce + slen, 0, adj_slen - slen); memcpy(ctx->sbounce, src, slen); src = ctx->sbounce; slen = adj_slen; pr_debug("using comp sbounce buffer, len %x\n", slen); } dst += hdrsize; dlen -= hdrsize; if ((u64)dst % c->alignment) { dskip = (int)(PTR_ALIGN(dst, c->alignment) - dst); dst += dskip; dlen -= dskip; } if (dlen % c->multiple) dlen = round_down(dlen, c->multiple); if (dlen < c->minimum) { nospc: dst = ctx->dbounce; dlen = min(p->oremain, BOUNCE_BUFFER_SIZE); dlen = round_down(dlen, c->multiple); dskip = 0; pr_debug("using comp dbounce buffer, len %x\n", dlen); } if (dlen > c->maximum) dlen = c->maximum; tmplen = dlen; timeout = ktime_add_ms(ktime_get(), COMP_BUSY_TIMEOUT); do { dlen = tmplen; /* reset dlen, if we're retrying */ ret = ctx->driver->compress(src, slen, dst, &dlen, ctx->wmem); /* possibly we should reduce the slen here, instead of * retrying with the dbounce buffer? */ if (ret == -ENOSPC && dst != ctx->dbounce) goto nospc; } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); if (ret) return ret; dskip += hdrsize; if (dst == ctx->dbounce) memcpy(p->out + dskip, dst, dlen); g->padding = cpu_to_be16(dskip); g->compressed_length = cpu_to_be32(dlen); g->uncompressed_length = cpu_to_be32(slen); if (p->iremain < slen) { *ignore = slen - p->iremain; slen = p->iremain; } pr_debug("compress slen %x ignore %x dlen %x padding %x\n", slen, *ignore, dlen, dskip); return update_param(p, slen, dskip + dlen); } int nx842_crypto_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); struct nx842_crypto_header *hdr = &ctx->header; struct nx842_crypto_param p; struct nx842_constraints c = *ctx->driver->constraints; unsigned int groups, hdrsize, h; int ret, n; bool add_header; u16 ignore = 0; check_constraints(&c); p.in = (u8 *)src; p.iremain = slen; p.out = dst; p.oremain = *dlen; p.ototal = 0; *dlen = 0; groups = min_t(unsigned int, NX842_CRYPTO_GROUP_MAX, DIV_ROUND_UP(p.iremain, c.maximum)); hdrsize = NX842_CRYPTO_HEADER_SIZE(groups); spin_lock_bh(&ctx->lock); /* skip adding header if the buffers meet all constraints */ add_header = (p.iremain % c.multiple || p.iremain < c.minimum || p.iremain > c.maximum || (u64)p.in % c.alignment || p.oremain % c.multiple || p.oremain < c.minimum || p.oremain > c.maximum || (u64)p.out % c.alignment); hdr->magic = cpu_to_be16(NX842_CRYPTO_MAGIC); hdr->groups = 0; hdr->ignore = 0; while (p.iremain > 0) { n = hdr->groups++; ret = -ENOSPC; if (hdr->groups > NX842_CRYPTO_GROUP_MAX) goto unlock; /* header goes before first group */ h = !n && add_header ? hdrsize : 0; if (ignore) pr_warn("internal error, ignore is set %x\n", ignore); ret = compress(ctx, &p, &hdr->group[n], &c, &ignore, h); if (ret) goto unlock; } if (!add_header && hdr->groups > 1) { pr_err("Internal error: No header but multiple groups\n"); ret = -EINVAL; goto unlock; } /* ignore indicates the input stream needed to be padded */ hdr->ignore = cpu_to_be16(ignore); if (ignore) pr_debug("marked %d bytes as ignore\n", ignore); if (add_header) ret = nx842_crypto_add_header(hdr, dst); if (ret) goto unlock; *dlen = p.ototal; pr_debug("compress total slen %x dlen %x\n", slen, *dlen); unlock: spin_unlock_bh(&ctx->lock); return ret; } EXPORT_SYMBOL_GPL(nx842_crypto_compress); static int decompress(struct nx842_crypto_ctx *ctx, struct nx842_crypto_param *p, struct nx842_crypto_header_group *g, struct nx842_constraints *c, u16 ignore) { unsigned int slen = be32_to_cpu(g->compressed_length); unsigned int required_len = be32_to_cpu(g->uncompressed_length); unsigned int dlen = p->oremain, tmplen; unsigned int adj_slen = slen; u8 *src = p->in, *dst = p->out; u16 padding = be16_to_cpu(g->padding); int ret, spadding = 0; ktime_t timeout; if (!slen || !required_len) return -EINVAL; if (p->iremain <= 0 || padding + slen > p->iremain) return -EOVERFLOW; if (p->oremain <= 0 || required_len - ignore > p->oremain) return -ENOSPC; src += padding; if (slen % c->multiple) adj_slen = round_up(slen, c->multiple); if (slen < c->minimum) adj_slen = c->minimum; if (slen > c->maximum) goto usesw; if (slen < adj_slen || (u64)src % c->alignment) { /* we can append padding bytes because the 842 format defines * an "end" template (see lib/842/842_decompress.c) and will * ignore any bytes following it. */ if (slen < adj_slen) memset(ctx->sbounce + slen, 0, adj_slen - slen); memcpy(ctx->sbounce, src, slen); src = ctx->sbounce; spadding = adj_slen - slen; slen = adj_slen; pr_debug("using decomp sbounce buffer, len %x\n", slen); } if (dlen % c->multiple) dlen = round_down(dlen, c->multiple); if (dlen < required_len || (u64)dst % c->alignment) { dst = ctx->dbounce; dlen = min(required_len, BOUNCE_BUFFER_SIZE); pr_debug("using decomp dbounce buffer, len %x\n", dlen); } if (dlen < c->minimum) goto usesw; if (dlen > c->maximum) dlen = c->maximum; tmplen = dlen; timeout = ktime_add_ms(ktime_get(), DECOMP_BUSY_TIMEOUT); do { dlen = tmplen; /* reset dlen, if we're retrying */ ret = ctx->driver->decompress(src, slen, dst, &dlen, ctx->wmem); } while (ret == -EBUSY && ktime_before(ktime_get(), timeout)); if (ret) { usesw: /* reset everything, sw doesn't have constraints */ src = p->in + padding; slen = be32_to_cpu(g->compressed_length); spadding = 0; dst = p->out; dlen = p->oremain; if (dlen < required_len) { /* have ignore bytes */ dst = ctx->dbounce; dlen = BOUNCE_BUFFER_SIZE; } pr_info_ratelimited("using software 842 decompression\n"); ret = sw842_decompress(src, slen, dst, &dlen); } if (ret) return ret; slen -= spadding; dlen -= ignore; if (ignore) pr_debug("ignoring last %x bytes\n", ignore); if (dst == ctx->dbounce) memcpy(p->out, dst, dlen); pr_debug("decompress slen %x padding %x dlen %x ignore %x\n", slen, padding, dlen, ignore); return update_param(p, slen + padding, dlen); } int nx842_crypto_decompress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { struct nx842_crypto_ctx *ctx = crypto_tfm_ctx(tfm); struct nx842_crypto_header *hdr; struct nx842_crypto_param p; struct nx842_constraints c = *ctx->driver->constraints; int n, ret, hdr_len; u16 ignore = 0; check_constraints(&c); p.in = (u8 *)src; p.iremain = slen; p.out = dst; p.oremain = *dlen; p.ototal = 0; *dlen = 0; hdr = (struct nx842_crypto_header *)src; spin_lock_bh(&ctx->lock); /* If it doesn't start with our header magic number, assume it's a raw * 842 compressed buffer and pass it directly to the hardware driver */ if (be16_to_cpu(hdr->magic) != NX842_CRYPTO_MAGIC) { struct nx842_crypto_header_group g = { .padding = 0, .compressed_length = cpu_to_be32(p.iremain), .uncompressed_length = cpu_to_be32(p.oremain), }; ret = decompress(ctx, &p, &g, &c, 0); if (ret) goto unlock; goto success; } if (!hdr->groups) { pr_err("header has no groups\n"); ret = -EINVAL; goto unlock; } if (hdr->groups > NX842_CRYPTO_GROUP_MAX) { pr_err("header has too many groups %x, max %x\n", hdr->groups, NX842_CRYPTO_GROUP_MAX); ret = -EINVAL; goto unlock; } hdr_len = NX842_CRYPTO_HEADER_SIZE(hdr->groups); if (hdr_len > slen) { ret = -EOVERFLOW; goto unlock; } memcpy(&ctx->header, src, hdr_len); hdr = &ctx->header; for (n = 0; n < hdr->groups; n++) { /* ignore applies to last group */ if (n + 1 == hdr->groups) ignore = be16_to_cpu(hdr->ignore); ret = decompress(ctx, &p, &hdr->group[n], &c, ignore); if (ret) goto unlock; } success: *dlen = p.ototal; pr_debug("decompress total slen %x dlen %x\n", slen, *dlen); ret = 0; unlock: spin_unlock_bh(&ctx->lock); return ret; } EXPORT_SYMBOL_GPL(nx842_crypto_decompress); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("IBM PowerPC Nest (NX) 842 Hardware Compression Driver"); MODULE_AUTHOR("Dan Streetman <[email protected]>");
linux-master
drivers/crypto/nx/nx-842.c
// SPDX-License-Identifier: GPL-2.0-only /* * SHA-512 routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. * * Author: Kent Yoder <[email protected]> */ #include <crypto/internal/hash.h> #include <crypto/sha2.h> #include <linux/module.h> #include <asm/vio.h> #include "nx_csbcpb.h" #include "nx.h" struct sha512_state_be { __be64 state[SHA512_DIGEST_SIZE / 8]; u64 count[2]; u8 buf[SHA512_BLOCK_SIZE]; }; static int nx_crypto_ctx_sha512_init(struct crypto_tfm *tfm) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); int err; err = nx_crypto_ctx_sha_init(tfm); if (err) return err; nx_ctx_init(nx_ctx, HCOP_FC_SHA); nx_ctx->ap = &nx_ctx->props[NX_PROPS_SHA512]; NX_CPB_SET_DIGEST_SIZE(nx_ctx->csbcpb, NX_DS_SHA512); return 0; } static int nx_sha512_init(struct shash_desc *desc) { struct sha512_state_be *sctx = shash_desc_ctx(desc); memset(sctx, 0, sizeof *sctx); sctx->state[0] = __cpu_to_be64(SHA512_H0); sctx->state[1] = __cpu_to_be64(SHA512_H1); sctx->state[2] = __cpu_to_be64(SHA512_H2); sctx->state[3] = __cpu_to_be64(SHA512_H3); sctx->state[4] = __cpu_to_be64(SHA512_H4); sctx->state[5] = __cpu_to_be64(SHA512_H5); sctx->state[6] = __cpu_to_be64(SHA512_H6); sctx->state[7] = __cpu_to_be64(SHA512_H7); sctx->count[0] = 0; return 0; } static int nx_sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha512_state_be *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *out_sg; u64 to_process, leftover = 0, total; unsigned long irq_flags; int rc = 0; int data_len; u32 max_sg_len; u64 buf_len = (sctx->count[0] % SHA512_BLOCK_SIZE); spin_lock_irqsave(&nx_ctx->lock, irq_flags); /* 2 cases for total data len: * 1: < SHA512_BLOCK_SIZE: copy into state, return 0 * 2: >= SHA512_BLOCK_SIZE: process X blocks, copy in leftover */ total = (sctx->count[0] % SHA512_BLOCK_SIZE) + len; if (total < SHA512_BLOCK_SIZE) { memcpy(sctx->buf + buf_len, data, len); sctx->count[0] += len; goto out; } memcpy(csbcpb->cpb.sha512.message_digest, sctx->state, SHA512_DIGEST_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_driver.of.max_sg_len/sizeof(struct nx_sg)); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); data_len = SHA512_DIGEST_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, &data_len, max_sg_len); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (data_len != SHA512_DIGEST_SIZE) { rc = -EINVAL; goto out; } do { int used_sgs = 0; struct nx_sg *in_sg = nx_ctx->in_sg; if (buf_len) { data_len = buf_len; in_sg = nx_build_sg_list(in_sg, (u8 *) sctx->buf, &data_len, max_sg_len); if (data_len != buf_len) { rc = -EINVAL; goto out; } used_sgs = in_sg - nx_ctx->in_sg; } /* to_process: SHA512_BLOCK_SIZE aligned chunk to be * processed in this iteration. This value is restricted * by sg list limits and number of sgs we already used * for leftover data. (see above) * In ideal case, we could allow NX_PAGE_SIZE * max_sg_len, * but because data may not be aligned, we need to account * for that too. */ to_process = min_t(u64, total, (max_sg_len - 1 - used_sgs) * NX_PAGE_SIZE); to_process = to_process & ~(SHA512_BLOCK_SIZE - 1); data_len = to_process - buf_len; in_sg = nx_build_sg_list(in_sg, (u8 *) data, &data_len, max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); if (data_len != (to_process - buf_len)) { rc = -EINVAL; goto out; } to_process = data_len + buf_len; leftover = total - to_process; /* * we've hit the nx chip previously and we're updating * again, so copy over the partial digest. */ memcpy(csbcpb->cpb.sha512.input_partial_digest, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); if (rc) goto out; atomic_inc(&(nx_ctx->stats->sha512_ops)); total -= to_process; data += to_process - buf_len; buf_len = 0; } while (leftover >= SHA512_BLOCK_SIZE); /* copy the leftover back into the state struct */ if (leftover) memcpy(sctx->buf, data, leftover); sctx->count[0] += len; memcpy(sctx->state, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int nx_sha512_final(struct shash_desc *desc, u8 *out) { struct sha512_state_be *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = (struct nx_csbcpb *)nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; u32 max_sg_len; u64 count0; unsigned long irq_flags; int rc = 0; int len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); max_sg_len = min_t(u64, nx_ctx->ap->sglen, nx_driver.of.max_sg_len/sizeof(struct nx_sg)); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); /* final is represented by continuing the operation and indicating that * this is not an intermediate operation */ if (sctx->count[0] >= SHA512_BLOCK_SIZE) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ memcpy(csbcpb->cpb.sha512.input_partial_digest, sctx->state, SHA512_DIGEST_SIZE); NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; } else { NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION; } NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; count0 = sctx->count[0] * 8; csbcpb->cpb.sha512.message_bit_length_lo = count0; len = sctx->count[0] & (SHA512_BLOCK_SIZE - 1); in_sg = nx_build_sg_list(nx_ctx->in_sg, sctx->buf, &len, max_sg_len); if (len != (sctx->count[0] & (SHA512_BLOCK_SIZE - 1))) { rc = -EINVAL; goto out; } len = SHA512_DIGEST_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, max_sg_len); nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (!nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); if (rc) goto out; atomic_inc(&(nx_ctx->stats->sha512_ops)); atomic64_add(sctx->count[0], &(nx_ctx->stats->sha512_bytes)); memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int nx_sha512_export(struct shash_desc *desc, void *out) { struct sha512_state_be *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int nx_sha512_import(struct shash_desc *desc, const void *in) { struct sha512_state_be *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } struct shash_alg nx_shash_sha512_alg = { .digestsize = SHA512_DIGEST_SIZE, .init = nx_sha512_init, .update = nx_sha512_update, .final = nx_sha512_final, .export = nx_sha512_export, .import = nx_sha512_import, .descsize = sizeof(struct sha512_state_be), .statesize = sizeof(struct sha512_state_be), .base = { .cra_name = "sha512", .cra_driver_name = "sha512-nx", .cra_priority = 300, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_init = nx_crypto_ctx_sha512_init, .cra_exit = nx_crypto_ctx_exit, } };
linux-master
drivers/crypto/nx/nx-sha512.c
// SPDX-License-Identifier: GPL-2.0-only /* * AES XCBC routines supporting the Power 7+ Nest Accelerators driver * * Copyright (C) 2011-2012 International Business Machines Inc. * * Author: Kent Yoder <[email protected]> */ #include <crypto/internal/hash.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <linux/module.h> #include <linux/types.h> #include <linux/crypto.h> #include <asm/vio.h> #include "nx_csbcpb.h" #include "nx.h" struct xcbc_state { u8 state[AES_BLOCK_SIZE]; unsigned int count; u8 buffer[AES_BLOCK_SIZE]; }; static int nx_xcbc_set_key(struct crypto_shash *desc, const u8 *in_key, unsigned int key_len) { struct nx_crypto_ctx *nx_ctx = crypto_shash_ctx(desc); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; switch (key_len) { case AES_KEYSIZE_128: nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128]; break; default: return -EINVAL; } memcpy(csbcpb->cpb.aes_xcbc.key, in_key, key_len); return 0; } /* * Based on RFC 3566, for a zero-length message: * * n = 1 * K1 = E(K, 0x01010101010101010101010101010101) * K3 = E(K, 0x03030303030303030303030303030303) * E[0] = 0x00000000000000000000000000000000 * M[1] = 0x80000000000000000000000000000000 (0 length message with padding) * E[1] = (K1, M[1] ^ E[0] ^ K3) * Tag = M[1] */ static int nx_xcbc_empty(struct shash_desc *desc, u8 *out) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; u8 keys[2][AES_BLOCK_SIZE]; u8 key[32]; int rc = 0; int len; /* Change to ECB mode */ csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB; memcpy(key, csbcpb->cpb.aes_xcbc.key, AES_BLOCK_SIZE); memcpy(csbcpb->cpb.aes_ecb.key, key, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; /* K1 and K3 base patterns */ memset(keys[0], 0x01, sizeof(keys[0])); memset(keys[1], 0x03, sizeof(keys[1])); len = sizeof(keys); /* Generate K1 and K3 encrypting the patterns */ in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys, &len, nx_ctx->ap->sglen); if (len != sizeof(keys)) return -EINVAL; out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) keys, &len, nx_ctx->ap->sglen); if (len != sizeof(keys)) return -EINVAL; nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); /* XOr K3 with the padding for a 0 length message */ keys[1][0] ^= 0x80; len = sizeof(keys[1]); /* Encrypt the final result */ memcpy(csbcpb->cpb.aes_ecb.key, keys[0], AES_BLOCK_SIZE); in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) keys[1], &len, nx_ctx->ap->sglen); if (len != sizeof(keys[1])) return -EINVAL; len = AES_BLOCK_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, nx_ctx->ap->sglen); if (len != AES_BLOCK_SIZE) return -EINVAL; nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); out: /* Restore XCBC mode */ csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; memcpy(csbcpb->cpb.aes_xcbc.key, key, AES_BLOCK_SIZE); NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT; return rc; } static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm *tfm) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; int err; err = nx_crypto_ctx_aes_xcbc_init(tfm); if (err) return err; nx_ctx_init(nx_ctx, HCOP_FC_AES); NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128); csbcpb->cpb.hdr.mode = NX_MODE_AES_XCBC_MAC; return 0; } static int nx_xcbc_init(struct shash_desc *desc) { struct xcbc_state *sctx = shash_desc_ctx(desc); memset(sctx, 0, sizeof *sctx); return 0; } static int nx_xcbc_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct xcbc_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg; struct nx_sg *out_sg; u32 to_process = 0, leftover, total; unsigned int max_sg_len; unsigned long irq_flags; int rc = 0; int data_len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); total = sctx->count + len; /* 2 cases for total data len: * 1: <= AES_BLOCK_SIZE: copy into state, return 0 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover */ if (total <= AES_BLOCK_SIZE) { memcpy(sctx->buffer + sctx->count, data, len); sctx->count += len; goto out; } in_sg = nx_ctx->in_sg; max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg), nx_ctx->ap->sglen); max_sg_len = min_t(u64, max_sg_len, nx_ctx->ap->databytelen/NX_PAGE_SIZE); data_len = AES_BLOCK_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *)sctx->state, &len, nx_ctx->ap->sglen); if (data_len != AES_BLOCK_SIZE) { rc = -EINVAL; goto out; } nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); do { to_process = total - to_process; to_process = to_process & ~(AES_BLOCK_SIZE - 1); leftover = total - to_process; /* the hardware will not accept a 0 byte operation for this * algorithm and the operation MUST be finalized to be correct. * So if we happen to get an update that falls on a block sized * boundary, we must save off the last block to finalize with * later. */ if (!leftover) { to_process -= AES_BLOCK_SIZE; leftover = AES_BLOCK_SIZE; } if (sctx->count) { data_len = sctx->count; in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) sctx->buffer, &data_len, max_sg_len); if (data_len != sctx->count) { rc = -EINVAL; goto out; } } data_len = to_process - sctx->count; in_sg = nx_build_sg_list(in_sg, (u8 *) data, &data_len, max_sg_len); if (data_len != to_process - sctx->count) { rc = -EINVAL; goto out; } nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); /* we've hit the nx chip previously and we're updating again, * so copy over the partial digest */ if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { memcpy(csbcpb->cpb.aes_xcbc.cv, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); } NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE; if (!nx_ctx->op.inlen || !nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); /* everything after the first update is continuation */ NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION; total -= to_process; data += to_process - sctx->count; sctx->count = 0; in_sg = nx_ctx->in_sg; } while (leftover > AES_BLOCK_SIZE); /* copy the leftover back into the state struct */ memcpy(sctx->buffer, data, leftover); sctx->count = leftover; out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } static int nx_xcbc_final(struct shash_desc *desc, u8 *out) { struct xcbc_state *sctx = shash_desc_ctx(desc); struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(&desc->tfm->base); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct nx_sg *in_sg, *out_sg; unsigned long irq_flags; int rc = 0; int len; spin_lock_irqsave(&nx_ctx->lock, irq_flags); if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ memcpy(csbcpb->cpb.aes_xcbc.cv, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); } else if (sctx->count == 0) { /* * we've never seen an update, so this is a 0 byte op. The * hardware cannot handle a 0 byte op, so just ECB to * generate the hash. */ rc = nx_xcbc_empty(desc, out); goto out; } /* final is represented by continuing the operation and indicating that * this is not an intermediate operation */ NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE; len = sctx->count; in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *)sctx->buffer, &len, nx_ctx->ap->sglen); if (len != sctx->count) { rc = -EINVAL; goto out; } len = AES_BLOCK_SIZE; out_sg = nx_build_sg_list(nx_ctx->out_sg, out, &len, nx_ctx->ap->sglen); if (len != AES_BLOCK_SIZE) { rc = -EINVAL; goto out; } nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg); if (!nx_ctx->op.outlen) { rc = -EINVAL; goto out; } rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, 0); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); memcpy(out, csbcpb->cpb.aes_xcbc.out_cv_mac, AES_BLOCK_SIZE); out: spin_unlock_irqrestore(&nx_ctx->lock, irq_flags); return rc; } struct shash_alg nx_shash_aes_xcbc_alg = { .digestsize = AES_BLOCK_SIZE, .init = nx_xcbc_init, .update = nx_xcbc_update, .final = nx_xcbc_final, .setkey = nx_xcbc_set_key, .descsize = sizeof(struct xcbc_state), .statesize = sizeof(struct xcbc_state), .base = { .cra_name = "xcbc(aes)", .cra_driver_name = "xcbc-aes-nx", .cra_priority = 300, .cra_blocksize = AES_BLOCK_SIZE, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_init = nx_crypto_ctx_aes_xcbc_init2, .cra_exit = nx_crypto_ctx_exit, } };
linux-master
drivers/crypto/nx/nx-aes-xcbc.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api. * * Copyright (C) 2014-2017 Axis Communications AB */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitfield.h> #include <linux/crypto.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/fault-inject.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <crypto/aes.h> #include <crypto/gcm.h> #include <crypto/internal/aead.h> #include <crypto/internal/hash.h> #include <crypto/internal/skcipher.h> #include <crypto/scatterwalk.h> #include <crypto/sha1.h> #include <crypto/sha2.h> #include <crypto/xts.h> /* Max length of a line in all cache levels for Artpec SoCs. */ #define ARTPEC_CACHE_LINE_MAX 32 #define PDMA_OUT_CFG 0x0000 #define PDMA_OUT_BUF_CFG 0x0004 #define PDMA_OUT_CMD 0x0008 #define PDMA_OUT_DESCRQ_PUSH 0x0010 #define PDMA_OUT_DESCRQ_STAT 0x0014 #define A6_PDMA_IN_CFG 0x0028 #define A6_PDMA_IN_BUF_CFG 0x002c #define A6_PDMA_IN_CMD 0x0030 #define A6_PDMA_IN_STATQ_PUSH 0x0038 #define A6_PDMA_IN_DESCRQ_PUSH 0x0044 #define A6_PDMA_IN_DESCRQ_STAT 0x0048 #define A6_PDMA_INTR_MASK 0x0068 #define A6_PDMA_ACK_INTR 0x006c #define A6_PDMA_MASKED_INTR 0x0074 #define A7_PDMA_IN_CFG 0x002c #define A7_PDMA_IN_BUF_CFG 0x0030 #define A7_PDMA_IN_CMD 0x0034 #define A7_PDMA_IN_STATQ_PUSH 0x003c #define A7_PDMA_IN_DESCRQ_PUSH 0x0048 #define A7_PDMA_IN_DESCRQ_STAT 0x004C #define A7_PDMA_INTR_MASK 0x006c #define A7_PDMA_ACK_INTR 0x0070 #define A7_PDMA_MASKED_INTR 0x0078 #define PDMA_OUT_CFG_EN BIT(0) #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0) #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5) #define PDMA_OUT_CMD_START BIT(0) #define A6_PDMA_OUT_CMD_STOP BIT(3) #define A7_PDMA_OUT_CMD_STOP BIT(2) #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0) #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6) #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0) #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4) #define PDMA_IN_CFG_EN BIT(0) #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0) #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5) #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10) #define PDMA_IN_CMD_START BIT(0) #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2) #define A6_PDMA_IN_CMD_STOP BIT(3) #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1) #define A7_PDMA_IN_CMD_STOP BIT(2) #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0) #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6) #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0) #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6) #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0) #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4) #define A6_PDMA_INTR_MASK_IN_DATA BIT(2) #define A6_PDMA_INTR_MASK_IN_EOP BIT(3) #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4) #define A7_PDMA_INTR_MASK_IN_DATA BIT(3) #define A7_PDMA_INTR_MASK_IN_EOP BIT(4) #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5) #define A6_CRY_MD_OPER GENMASK(19, 16) #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20) #define A6_CRY_MD_HASH_HMAC_FIN BIT(23) #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20) #define A6_CRY_MD_CIPHER_DECR BIT(22) #define A6_CRY_MD_CIPHER_TWEAK BIT(23) #define A6_CRY_MD_CIPHER_DSEQ BIT(24) #define A7_CRY_MD_OPER GENMASK(11, 8) #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12) #define A7_CRY_MD_HASH_HMAC_FIN BIT(15) #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12) #define A7_CRY_MD_CIPHER_DECR BIT(14) #define A7_CRY_MD_CIPHER_TWEAK BIT(15) #define A7_CRY_MD_CIPHER_DSEQ BIT(16) /* DMA metadata constants */ #define regk_crypto_aes_cbc 0x00000002 #define regk_crypto_aes_ctr 0x00000003 #define regk_crypto_aes_ecb 0x00000001 #define regk_crypto_aes_gcm 0x00000004 #define regk_crypto_aes_xts 0x00000005 #define regk_crypto_cache 0x00000002 #define a6_regk_crypto_dlkey 0x0000000a #define a7_regk_crypto_dlkey 0x0000000e #define regk_crypto_ext 0x00000001 #define regk_crypto_hmac_sha1 0x00000007 #define regk_crypto_hmac_sha256 0x00000009 #define regk_crypto_init 0x00000000 #define regk_crypto_key_128 0x00000000 #define regk_crypto_key_192 0x00000001 #define regk_crypto_key_256 0x00000002 #define regk_crypto_null 0x00000000 #define regk_crypto_sha1 0x00000006 #define regk_crypto_sha256 0x00000008 /* DMA descriptor structures */ struct pdma_descr_ctrl { unsigned char short_descr : 1; unsigned char pad1 : 1; unsigned char eop : 1; unsigned char intr : 1; unsigned char short_len : 3; unsigned char pad2 : 1; } __packed; struct pdma_data_descr { unsigned int len : 24; unsigned int buf : 32; } __packed; struct pdma_short_descr { unsigned char data[7]; } __packed; struct pdma_descr { struct pdma_descr_ctrl ctrl; union { struct pdma_data_descr data; struct pdma_short_descr shrt; }; }; struct pdma_stat_descr { unsigned char pad1 : 1; unsigned char pad2 : 1; unsigned char eop : 1; unsigned char pad3 : 5; unsigned int len : 24; }; /* Each descriptor array can hold max 64 entries */ #define PDMA_DESCR_COUNT 64 #define MODULE_NAME "Artpec-6 CA" /* Hash modes (including HMAC variants) */ #define ARTPEC6_CRYPTO_HASH_SHA1 1 #define ARTPEC6_CRYPTO_HASH_SHA256 2 /* Crypto modes */ #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine. * It operates on a descriptor array with up to 64 descriptor entries. * The arrays must be 64 byte aligned in memory. * * The ciphering unit has no registers and is completely controlled by * a 4-byte metadata that is inserted at the beginning of each dma packet. * * A dma packet is a sequence of descriptors terminated by setting the .eop * field in the final descriptor of the packet. * * Multiple packets are used for providing context data, key data and * the plain/ciphertext. * * PDMA Descriptors (Array) * +------+------+------+~~+-------+------+---- * | 0 | 1 | 2 |~~| 11 EOP| 12 | .... * +--+---+--+---+----+-+~~+-------+----+-+---- * | | | | | * | | | | | * __|__ +-------++-------++-------+ +----+ * | MD | |Payload||Payload||Payload| | MD | * +-----+ +-------++-------++-------+ +----+ */ struct artpec6_crypto_bounce_buffer { struct list_head list; size_t length; struct scatterlist *sg; size_t offset; /* buf is aligned to ARTPEC_CACHE_LINE_MAX and * holds up to ARTPEC_CACHE_LINE_MAX bytes data. */ void *buf; }; struct artpec6_crypto_dma_map { dma_addr_t dma_addr; size_t size; enum dma_data_direction dir; }; struct artpec6_crypto_dma_descriptors { struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64); struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64); u32 stat[PDMA_DESCR_COUNT] __aligned(64); struct list_head bounce_buffers; /* Enough maps for all out/in buffers, and all three descr. arrays */ struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2]; dma_addr_t out_dma_addr; dma_addr_t in_dma_addr; dma_addr_t stat_dma_addr; size_t out_cnt; size_t in_cnt; size_t map_count; }; enum artpec6_crypto_variant { ARTPEC6_CRYPTO, ARTPEC7_CRYPTO, }; struct artpec6_crypto { void __iomem *base; spinlock_t queue_lock; struct list_head queue; /* waiting for pdma fifo space */ struct list_head pending; /* submitted to pdma fifo */ struct tasklet_struct task; struct kmem_cache *dma_cache; int pending_count; struct timer_list timer; enum artpec6_crypto_variant variant; void *pad_buffer; /* cache-aligned block padding buffer */ void *zero_buffer; }; enum artpec6_crypto_hash_flags { HASH_FLAG_INIT_CTX = 2, HASH_FLAG_UPDATE = 4, HASH_FLAG_FINALIZE = 8, HASH_FLAG_HMAC = 16, HASH_FLAG_UPDATE_KEY = 32, }; struct artpec6_crypto_req_common { struct list_head list; struct list_head complete_in_progress; struct artpec6_crypto_dma_descriptors *dma; struct crypto_async_request *req; void (*complete)(struct crypto_async_request *req); gfp_t gfp_flags; }; struct artpec6_hash_request_context { char partial_buffer[SHA256_BLOCK_SIZE]; char partial_buffer_out[SHA256_BLOCK_SIZE]; char key_buffer[SHA256_BLOCK_SIZE]; char pad_buffer[SHA256_BLOCK_SIZE + 32]; unsigned char digeststate[SHA256_DIGEST_SIZE]; size_t partial_bytes; u64 digcnt; u32 key_md; u32 hash_md; enum artpec6_crypto_hash_flags hash_flags; struct artpec6_crypto_req_common common; }; struct artpec6_hash_export_state { char partial_buffer[SHA256_BLOCK_SIZE]; unsigned char digeststate[SHA256_DIGEST_SIZE]; size_t partial_bytes; u64 digcnt; int oper; unsigned int hash_flags; }; struct artpec6_hashalg_context { char hmac_key[SHA256_BLOCK_SIZE]; size_t hmac_key_length; struct crypto_shash *child_hash; }; struct artpec6_crypto_request_context { u32 cipher_md; bool decrypt; struct artpec6_crypto_req_common common; }; struct artpec6_cryptotfm_context { unsigned char aes_key[2*AES_MAX_KEY_SIZE]; size_t key_length; u32 key_md; int crypto_type; struct crypto_sync_skcipher *fallback; }; struct artpec6_crypto_aead_hw_ctx { __be64 aad_length_bits; __be64 text_length_bits; __u8 J0[AES_BLOCK_SIZE]; }; struct artpec6_crypto_aead_req_ctx { struct artpec6_crypto_aead_hw_ctx hw_ctx; u32 cipher_md; bool decrypt; struct artpec6_crypto_req_common common; __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned; }; /* The crypto framework makes it hard to avoid this global. */ static struct device *artpec6_crypto_dev; #ifdef CONFIG_FAULT_INJECTION static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); #endif enum { ARTPEC6_CRYPTO_PREPARE_HASH_NO_START, ARTPEC6_CRYPTO_PREPARE_HASH_START, }; static int artpec6_crypto_prepare_aead(struct aead_request *areq); static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq); static int artpec6_crypto_prepare_hash(struct ahash_request *areq); static void artpec6_crypto_complete_crypto(struct crypto_async_request *req); static void artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req); static void artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req); static void artpec6_crypto_complete_aead(struct crypto_async_request *req); static void artpec6_crypto_complete_hash(struct crypto_async_request *req); static int artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common); static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common); struct artpec6_crypto_walk { struct scatterlist *sg; size_t offset; }; static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk, struct scatterlist *sg) { awalk->sg = sg; awalk->offset = 0; } static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk, size_t nbytes) { while (nbytes && awalk->sg) { size_t piece; WARN_ON(awalk->offset > awalk->sg->length); piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset); nbytes -= piece; awalk->offset += piece; if (awalk->offset == awalk->sg->length) { awalk->sg = sg_next(awalk->sg); awalk->offset = 0; } } return nbytes; } static size_t artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk) { WARN_ON(awalk->sg->length == awalk->offset); return awalk->sg->length - awalk->offset; } static dma_addr_t artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk) { return sg_phys(awalk->sg) + awalk->offset; } static void artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common) { struct artpec6_crypto_dma_descriptors *dma = common->dma; struct artpec6_crypto_bounce_buffer *b; struct artpec6_crypto_bounce_buffer *next; list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) { pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n", b, b->length, b->offset, b->buf); sg_pcopy_from_buffer(b->sg, 1, b->buf, b->length, b->offset); list_del(&b->list); kfree(b); } } static inline bool artpec6_crypto_busy(void) { struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); int fifo_count = ac->pending_count; return fifo_count > 6; } static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req) { struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); int ret = -EBUSY; spin_lock_bh(&ac->queue_lock); if (!artpec6_crypto_busy()) { list_add_tail(&req->list, &ac->pending); artpec6_crypto_start_dma(req); ret = -EINPROGRESS; } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) { list_add_tail(&req->list, &ac->queue); } else { artpec6_crypto_common_destroy(req); } spin_unlock_bh(&ac->queue_lock); return ret; } static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common) { struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); enum artpec6_crypto_variant variant = ac->variant; void __iomem *base = ac->base; struct artpec6_crypto_dma_descriptors *dma = common->dma; u32 ind, statd, outd; /* Make descriptor content visible to the DMA before starting it. */ wmb(); ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) | FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6); statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) | FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6); outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) | FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6); if (variant == ARTPEC6_CRYPTO) { writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH); writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH); writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD); } else { writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH); writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH); writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD); } writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH); writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD); ac->pending_count++; } static void artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common) { struct artpec6_crypto_dma_descriptors *dma = common->dma; dma->out_cnt = 0; dma->in_cnt = 0; dma->map_count = 0; INIT_LIST_HEAD(&dma->bounce_buffers); } static bool fault_inject_dma_descr(void) { #ifdef CONFIG_FAULT_INJECTION return should_fail(&artpec6_crypto_fail_dma_array_full, 1); #else return false; #endif } /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a * physical address * * @addr: The physical address of the data buffer * @len: The length of the data buffer * @eop: True if this is the last buffer in the packet * * @return 0 on success or -ENOSPC if there are no more descriptors available */ static int artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common, dma_addr_t addr, size_t len, bool eop) { struct artpec6_crypto_dma_descriptors *dma = common->dma; struct pdma_descr *d; if (dma->out_cnt >= PDMA_DESCR_COUNT || fault_inject_dma_descr()) { pr_err("No free OUT DMA descriptors available!\n"); return -ENOSPC; } d = &dma->out[dma->out_cnt++]; memset(d, 0, sizeof(*d)); d->ctrl.short_descr = 0; d->ctrl.eop = eop; d->data.len = len; d->data.buf = addr; return 0; } /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor * * @dst: The virtual address of the data * @len: The length of the data, must be between 1 to 7 bytes * @eop: True if this is the last buffer in the packet * * @return 0 on success * -ENOSPC if no more descriptors are available * -EINVAL if the data length exceeds 7 bytes */ static int artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common, void *dst, unsigned int len, bool eop) { struct artpec6_crypto_dma_descriptors *dma = common->dma; struct pdma_descr *d; if (dma->out_cnt >= PDMA_DESCR_COUNT || fault_inject_dma_descr()) { pr_err("No free OUT DMA descriptors available!\n"); return -ENOSPC; } else if (len > 7 || len < 1) { return -EINVAL; } d = &dma->out[dma->out_cnt++]; memset(d, 0, sizeof(*d)); d->ctrl.short_descr = 1; d->ctrl.short_len = len; d->ctrl.eop = eop; memcpy(d->shrt.data, dst, len); return 0; } static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common, struct page *page, size_t offset, size_t size, enum dma_data_direction dir, dma_addr_t *dma_addr_out) { struct artpec6_crypto_dma_descriptors *dma = common->dma; struct device *dev = artpec6_crypto_dev; struct artpec6_crypto_dma_map *map; dma_addr_t dma_addr; *dma_addr_out = 0; if (dma->map_count >= ARRAY_SIZE(dma->maps)) return -ENOMEM; dma_addr = dma_map_page(dev, page, offset, size, dir); if (dma_mapping_error(dev, dma_addr)) return -ENOMEM; map = &dma->maps[dma->map_count++]; map->size = size; map->dma_addr = dma_addr; map->dir = dir; *dma_addr_out = dma_addr; return 0; } static int artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common, void *ptr, size_t size, enum dma_data_direction dir, dma_addr_t *dma_addr_out) { struct page *page = virt_to_page(ptr); size_t offset = (uintptr_t)ptr & ~PAGE_MASK; return artpec6_crypto_dma_map_page(common, page, offset, size, dir, dma_addr_out); } static int artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common) { struct artpec6_crypto_dma_descriptors *dma = common->dma; int ret; ret = artpec6_crypto_dma_map_single(common, dma->in, sizeof(dma->in[0]) * dma->in_cnt, DMA_TO_DEVICE, &dma->in_dma_addr); if (ret) return ret; ret = artpec6_crypto_dma_map_single(common, dma->out, sizeof(dma->out[0]) * dma->out_cnt, DMA_TO_DEVICE, &dma->out_dma_addr); if (ret) return ret; /* We only read one stat descriptor */ dma->stat[dma->in_cnt - 1] = 0; /* * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor * to be written. */ return artpec6_crypto_dma_map_single(common, dma->stat, sizeof(dma->stat[0]) * dma->in_cnt, DMA_BIDIRECTIONAL, &dma->stat_dma_addr); } static void artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common) { struct artpec6_crypto_dma_descriptors *dma = common->dma; struct device *dev = artpec6_crypto_dev; int i; for (i = 0; i < dma->map_count; i++) { struct artpec6_crypto_dma_map *map = &dma->maps[i]; dma_unmap_page(dev, map->dma_addr, map->size, map->dir); } dma->map_count = 0; } /** artpec6_crypto_setup_out_descr - Setup an out descriptor * * @dst: The virtual address of the data * @len: The length of the data * @eop: True if this is the last buffer in the packet * @use_short: If this is true and the data length is 7 bytes or less then * a short descriptor will be used * * @return 0 on success * Any errors from artpec6_crypto_setup_out_descr_short() or * setup_out_descr_phys() */ static int artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common, void *dst, unsigned int len, bool eop, bool use_short) { if (use_short && len < 7) { return artpec6_crypto_setup_out_descr_short(common, dst, len, eop); } else { int ret; dma_addr_t dma_addr; ret = artpec6_crypto_dma_map_single(common, dst, len, DMA_TO_DEVICE, &dma_addr); if (ret) return ret; return artpec6_crypto_setup_out_descr_phys(common, dma_addr, len, eop); } } /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a * physical address * * @addr: The physical address of the data buffer * @len: The length of the data buffer * @intr: True if an interrupt should be fired after HW processing of this * descriptor * */ static int artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common, dma_addr_t addr, unsigned int len, bool intr) { struct artpec6_crypto_dma_descriptors *dma = common->dma; struct pdma_descr *d; if (dma->in_cnt >= PDMA_DESCR_COUNT || fault_inject_dma_descr()) { pr_err("No free IN DMA descriptors available!\n"); return -ENOSPC; } d = &dma->in[dma->in_cnt++]; memset(d, 0, sizeof(*d)); d->ctrl.intr = intr; d->data.len = len; d->data.buf = addr; return 0; } /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor * * @buffer: The virtual address to of the data buffer * @len: The length of the data buffer * @last: If this is the last data buffer in the request (i.e. an interrupt * is needed * * Short descriptors are not used for the in channel */ static int artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common, void *buffer, unsigned int len, bool last) { dma_addr_t dma_addr; int ret; ret = artpec6_crypto_dma_map_single(common, buffer, len, DMA_FROM_DEVICE, &dma_addr); if (ret) return ret; return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last); } static struct artpec6_crypto_bounce_buffer * artpec6_crypto_alloc_bounce(gfp_t flags) { void *base; size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) + 2 * ARTPEC_CACHE_LINE_MAX; struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags); if (!bbuf) return NULL; base = bbuf + 1; bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX); return bbuf; } static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common, struct artpec6_crypto_walk *walk, size_t size) { struct artpec6_crypto_bounce_buffer *bbuf; int ret; bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags); if (!bbuf) return -ENOMEM; bbuf->length = size; bbuf->sg = walk->sg; bbuf->offset = walk->offset; ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false); if (ret) { kfree(bbuf); return ret; } pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset); list_add_tail(&bbuf->list, &common->dma->bounce_buffers); return 0; } static int artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common, struct artpec6_crypto_walk *walk, size_t count) { size_t chunk; int ret; dma_addr_t addr; while (walk->sg && count) { chunk = min(count, artpec6_crypto_walk_chunklen(walk)); addr = artpec6_crypto_walk_chunk_phys(walk); /* When destination buffers are not aligned to the cache line * size we need bounce buffers. The DMA-API requires that the * entire line is owned by the DMA buffer and this holds also * for the case when coherent DMA is used. */ if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) { chunk = min_t(dma_addr_t, chunk, ALIGN(addr, ARTPEC_CACHE_LINE_MAX) - addr); pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk); ret = setup_bounce_buffer_in(common, walk, chunk); } else if (chunk < ARTPEC_CACHE_LINE_MAX) { pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk); ret = setup_bounce_buffer_in(common, walk, chunk); } else { dma_addr_t dma_addr; chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1); pr_debug("CHUNK %pad:%zu\n", &addr, chunk); ret = artpec6_crypto_dma_map_page(common, sg_page(walk->sg), walk->sg->offset + walk->offset, chunk, DMA_FROM_DEVICE, &dma_addr); if (ret) return ret; ret = artpec6_crypto_setup_in_descr_phys(common, dma_addr, chunk, false); } if (ret) return ret; count = count - chunk; artpec6_crypto_walk_advance(walk, chunk); } if (count) pr_err("EOL unexpected %zu bytes left\n", count); return count ? -EINVAL : 0; } static int artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common, struct artpec6_crypto_walk *walk, size_t count) { size_t chunk; int ret; dma_addr_t addr; while (walk->sg && count) { chunk = min(count, artpec6_crypto_walk_chunklen(walk)); addr = artpec6_crypto_walk_chunk_phys(walk); pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk); if (addr & 3) { char buf[3]; chunk = min_t(size_t, chunk, (4-(addr&3))); sg_pcopy_to_buffer(walk->sg, 1, buf, chunk, walk->offset); ret = artpec6_crypto_setup_out_descr_short(common, buf, chunk, false); } else { dma_addr_t dma_addr; ret = artpec6_crypto_dma_map_page(common, sg_page(walk->sg), walk->sg->offset + walk->offset, chunk, DMA_TO_DEVICE, &dma_addr); if (ret) return ret; ret = artpec6_crypto_setup_out_descr_phys(common, dma_addr, chunk, false); } if (ret) return ret; count = count - chunk; artpec6_crypto_walk_advance(walk, chunk); } if (count) pr_err("EOL unexpected %zu bytes left\n", count); return count ? -EINVAL : 0; } /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor * * If the out descriptor list is non-empty, then the eop flag on the * last used out descriptor will be set. * * @return 0 on success * -EINVAL if the out descriptor is empty or has overflown */ static int artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common) { struct artpec6_crypto_dma_descriptors *dma = common->dma; struct pdma_descr *d; if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) { pr_err("%s: OUT descriptor list is %s\n", MODULE_NAME, dma->out_cnt ? "empty" : "full"); return -EINVAL; } d = &dma->out[dma->out_cnt-1]; d->ctrl.eop = 1; return 0; } /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last * in descriptor * * See artpec6_crypto_terminate_out_descrs() for return values */ static int artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common) { struct artpec6_crypto_dma_descriptors *dma = common->dma; struct pdma_descr *d; if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) { pr_err("%s: IN descriptor list is %s\n", MODULE_NAME, dma->in_cnt ? "empty" : "full"); return -EINVAL; } d = &dma->in[dma->in_cnt-1]; d->ctrl.intr = 1; return 0; } /** create_hash_pad - Create a Secure Hash conformant pad * * @dst: The destination buffer to write the pad. Must be at least 64 bytes * @dgstlen: The total length of the hash digest in bytes * @bitcount: The total length of the digest in bits * * @return The total number of padding bytes written to @dst */ static size_t create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount) { unsigned int mod, target, diff, pad_bytes, size_bytes; __be64 bits = __cpu_to_be64(bitcount); switch (oper) { case regk_crypto_sha1: case regk_crypto_sha256: case regk_crypto_hmac_sha1: case regk_crypto_hmac_sha256: target = 448 / 8; mod = 512 / 8; size_bytes = 8; break; default: target = 896 / 8; mod = 1024 / 8; size_bytes = 16; break; } target -= 1; diff = dgstlen & (mod - 1); pad_bytes = diff > target ? target + mod - diff : target - diff; memset(dst + 1, 0, pad_bytes); dst[0] = 0x80; if (size_bytes == 16) { memset(dst + 1 + pad_bytes, 0, 8); memcpy(dst + 1 + pad_bytes + 8, &bits, 8); } else { memcpy(dst + 1 + pad_bytes, &bits, 8); } return pad_bytes + size_bytes + 1; } static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common, struct crypto_async_request *parent, void (*complete)(struct crypto_async_request *req), struct scatterlist *dstsg, unsigned int nbytes) { gfp_t flags; struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; common->gfp_flags = flags; common->dma = kmem_cache_alloc(ac->dma_cache, flags); if (!common->dma) return -ENOMEM; common->req = parent; common->complete = complete; return 0; } static void artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma) { struct artpec6_crypto_bounce_buffer *b; struct artpec6_crypto_bounce_buffer *next; list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) { kfree(b); } } static int artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common) { struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); artpec6_crypto_dma_unmap_all(common); artpec6_crypto_bounce_destroy(common->dma); kmem_cache_free(ac->dma_cache, common->dma); common->dma = NULL; return 0; } /* * Ciphering functions. */ static int artpec6_crypto_encrypt(struct skcipher_request *req) { struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); struct artpec6_crypto_request_context *req_ctx = NULL; void (*complete)(struct crypto_async_request *req); int ret; req_ctx = skcipher_request_ctx(req); switch (ctx->crypto_type) { case ARTPEC6_CRYPTO_CIPHER_AES_CBC: case ARTPEC6_CRYPTO_CIPHER_AES_ECB: case ARTPEC6_CRYPTO_CIPHER_AES_XTS: req_ctx->decrypt = 0; break; default: break; } switch (ctx->crypto_type) { case ARTPEC6_CRYPTO_CIPHER_AES_CBC: complete = artpec6_crypto_complete_cbc_encrypt; break; default: complete = artpec6_crypto_complete_crypto; break; } ret = artpec6_crypto_common_init(&req_ctx->common, &req->base, complete, req->dst, req->cryptlen); if (ret) return ret; ret = artpec6_crypto_prepare_crypto(req); if (ret) { artpec6_crypto_common_destroy(&req_ctx->common); return ret; } return artpec6_crypto_submit(&req_ctx->common); } static int artpec6_crypto_decrypt(struct skcipher_request *req) { int ret; struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); struct artpec6_crypto_request_context *req_ctx = NULL; void (*complete)(struct crypto_async_request *req); req_ctx = skcipher_request_ctx(req); switch (ctx->crypto_type) { case ARTPEC6_CRYPTO_CIPHER_AES_CBC: case ARTPEC6_CRYPTO_CIPHER_AES_ECB: case ARTPEC6_CRYPTO_CIPHER_AES_XTS: req_ctx->decrypt = 1; break; default: break; } switch (ctx->crypto_type) { case ARTPEC6_CRYPTO_CIPHER_AES_CBC: complete = artpec6_crypto_complete_cbc_decrypt; break; default: complete = artpec6_crypto_complete_crypto; break; } ret = artpec6_crypto_common_init(&req_ctx->common, &req->base, complete, req->dst, req->cryptlen); if (ret) return ret; ret = artpec6_crypto_prepare_crypto(req); if (ret) { artpec6_crypto_common_destroy(&req_ctx->common); return ret; } return artpec6_crypto_submit(&req_ctx->common); } static int artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); size_t iv_len = crypto_skcipher_ivsize(cipher); unsigned int counter = be32_to_cpup((__be32 *) (req->iv + iv_len - 4)); unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / AES_BLOCK_SIZE; /* * The hardware uses only the last 32-bits as the counter while the * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that * the whole IV is a counter. So fallback if the counter is going to * overlow. */ if (counter + nblks < counter) { int ret; pr_debug("counter %x will overflow (nblks %u), falling back\n", counter, counter + nblks); ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key, ctx->key_length); if (ret) return ret; { SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); skcipher_request_set_sync_tfm(subreq, ctx->fallback); skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL); skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); ret = encrypt ? crypto_skcipher_encrypt(subreq) : crypto_skcipher_decrypt(subreq); skcipher_request_zero(subreq); } return ret; } return encrypt ? artpec6_crypto_encrypt(req) : artpec6_crypto_decrypt(req); } static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req) { return artpec6_crypto_ctr_crypt(req, true); } static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req) { return artpec6_crypto_ctr_crypt(req, false); } /* * AEAD functions */ static int artpec6_crypto_aead_init(struct crypto_aead *tfm) { struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm); memset(tfm_ctx, 0, sizeof(*tfm_ctx)); crypto_aead_set_reqsize(tfm, sizeof(struct artpec6_crypto_aead_req_ctx)); return 0; } static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key, unsigned int len) { struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base); if (len != 16 && len != 24 && len != 32) return -EINVAL; ctx->key_length = len; memcpy(ctx->aes_key, key, len); return 0; } static int artpec6_crypto_aead_encrypt(struct aead_request *req) { int ret; struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req); req_ctx->decrypt = false; ret = artpec6_crypto_common_init(&req_ctx->common, &req->base, artpec6_crypto_complete_aead, NULL, 0); if (ret) return ret; ret = artpec6_crypto_prepare_aead(req); if (ret) { artpec6_crypto_common_destroy(&req_ctx->common); return ret; } return artpec6_crypto_submit(&req_ctx->common); } static int artpec6_crypto_aead_decrypt(struct aead_request *req) { int ret; struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req); req_ctx->decrypt = true; if (req->cryptlen < AES_BLOCK_SIZE) return -EINVAL; ret = artpec6_crypto_common_init(&req_ctx->common, &req->base, artpec6_crypto_complete_aead, NULL, 0); if (ret) return ret; ret = artpec6_crypto_prepare_aead(req); if (ret) { artpec6_crypto_common_destroy(&req_ctx->common); return ret; } return artpec6_crypto_submit(&req_ctx->common); } static int artpec6_crypto_prepare_hash(struct ahash_request *areq) { struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm); struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq); size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq)); size_t contextsize = digestsize; size_t blocksize = crypto_tfm_alg_blocksize( crypto_ahash_tfm(crypto_ahash_reqtfm(areq))); struct artpec6_crypto_req_common *common = &req_ctx->common; struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); enum artpec6_crypto_variant variant = ac->variant; u32 sel_ctx; bool ext_ctx = false; bool run_hw = false; int error = 0; artpec6_crypto_init_dma_operation(common); /* Upload HMAC key, must be first the first packet */ if (req_ctx->hash_flags & HASH_FLAG_HMAC) { if (variant == ARTPEC6_CRYPTO) { req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey); } else { req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey); } /* Copy and pad up the key */ memcpy(req_ctx->key_buffer, ctx->hmac_key, ctx->hmac_key_length); memset(req_ctx->key_buffer + ctx->hmac_key_length, 0, blocksize - ctx->hmac_key_length); error = artpec6_crypto_setup_out_descr(common, (void *)&req_ctx->key_md, sizeof(req_ctx->key_md), false, false); if (error) return error; error = artpec6_crypto_setup_out_descr(common, req_ctx->key_buffer, blocksize, true, false); if (error) return error; } if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) { /* Restore context */ sel_ctx = regk_crypto_ext; ext_ctx = true; } else { sel_ctx = regk_crypto_init; } if (variant == ARTPEC6_CRYPTO) { req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX; req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx); /* If this is the final round, set the final flag */ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN; } else { req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX; req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx); /* If this is the final round, set the final flag */ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN; } /* Setup up metadata descriptors */ error = artpec6_crypto_setup_out_descr(common, (void *)&req_ctx->hash_md, sizeof(req_ctx->hash_md), false, false); if (error) return error; error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); if (error) return error; if (ext_ctx) { error = artpec6_crypto_setup_out_descr(common, req_ctx->digeststate, contextsize, false, false); if (error) return error; } if (req_ctx->hash_flags & HASH_FLAG_UPDATE) { size_t done_bytes = 0; size_t total_bytes = areq->nbytes + req_ctx->partial_bytes; size_t ready_bytes = round_down(total_bytes, blocksize); struct artpec6_crypto_walk walk; run_hw = ready_bytes > 0; if (req_ctx->partial_bytes && ready_bytes) { /* We have a partial buffer and will at least some bytes * to the HW. Empty this partial buffer before tackling * the SG lists */ memcpy(req_ctx->partial_buffer_out, req_ctx->partial_buffer, req_ctx->partial_bytes); error = artpec6_crypto_setup_out_descr(common, req_ctx->partial_buffer_out, req_ctx->partial_bytes, false, true); if (error) return error; /* Reset partial buffer */ done_bytes += req_ctx->partial_bytes; req_ctx->partial_bytes = 0; } artpec6_crypto_walk_init(&walk, areq->src); error = artpec6_crypto_setup_sg_descrs_out(common, &walk, ready_bytes - done_bytes); if (error) return error; if (walk.sg) { size_t sg_skip = ready_bytes - done_bytes; size_t sg_rem = areq->nbytes - sg_skip; sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), req_ctx->partial_buffer + req_ctx->partial_bytes, sg_rem, sg_skip); req_ctx->partial_bytes += sg_rem; } req_ctx->digcnt += ready_bytes; req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE); } /* Finalize */ if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) { size_t hash_pad_len; u64 digest_bits; u32 oper; if (variant == ARTPEC6_CRYPTO) oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md); else oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md); /* Write out the partial buffer if present */ if (req_ctx->partial_bytes) { memcpy(req_ctx->partial_buffer_out, req_ctx->partial_buffer, req_ctx->partial_bytes); error = artpec6_crypto_setup_out_descr(common, req_ctx->partial_buffer_out, req_ctx->partial_bytes, false, true); if (error) return error; req_ctx->digcnt += req_ctx->partial_bytes; req_ctx->partial_bytes = 0; } if (req_ctx->hash_flags & HASH_FLAG_HMAC) digest_bits = 8 * (req_ctx->digcnt + blocksize); else digest_bits = 8 * req_ctx->digcnt; /* Add the hash pad */ hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer, req_ctx->digcnt, digest_bits); error = artpec6_crypto_setup_out_descr(common, req_ctx->pad_buffer, hash_pad_len, false, true); req_ctx->digcnt = 0; if (error) return error; /* Descriptor for the final result */ error = artpec6_crypto_setup_in_descr(common, areq->result, digestsize, true); if (error) return error; } else { /* This is not the final operation for this request */ if (!run_hw) return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START; /* Save the result to the context */ error = artpec6_crypto_setup_in_descr(common, req_ctx->digeststate, contextsize, false); if (error) return error; /* fall through */ } req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE); error = artpec6_crypto_terminate_in_descrs(common); if (error) return error; error = artpec6_crypto_terminate_out_descrs(common); if (error) return error; error = artpec6_crypto_dma_map_descs(common); if (error) return error; return ARTPEC6_CRYPTO_PREPARE_HASH_START; } static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm) { struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); tfm->reqsize = sizeof(struct artpec6_crypto_request_context); ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB; return 0; } static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm) { struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); ctx->fallback = crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base), 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback)) return PTR_ERR(ctx->fallback); tfm->reqsize = sizeof(struct artpec6_crypto_request_context); ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR; return 0; } static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm) { struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); tfm->reqsize = sizeof(struct artpec6_crypto_request_context); ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC; return 0; } static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm) { struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); tfm->reqsize = sizeof(struct artpec6_crypto_request_context); ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS; return 0; } static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm) { struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); memset(ctx, 0, sizeof(*ctx)); } static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm) { struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); crypto_free_sync_skcipher(ctx->fallback); artpec6_crypto_aes_exit(tfm); } static int artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); switch (keylen) { case 16: case 24: case 32: break; default: return -EINVAL; } memcpy(ctx->aes_key, key, keylen); ctx->key_length = keylen; return 0; } static int artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); int ret; ret = xts_verify_key(cipher, key, keylen); if (ret) return ret; switch (keylen) { case 32: case 48: case 64: break; default: return -EINVAL; } memcpy(ctx->aes_key, key, keylen); ctx->key_length = keylen; return 0; } /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request * * @req: The asynch request to process * * @return 0 if the dma job was successfully prepared * <0 on error * * This function sets up the PDMA descriptors for a block cipher request. * * The required padding is added for AES-CTR using a statically defined * buffer. * * The PDMA descriptor list will be as follows: * * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop> * IN: <CIPHER_MD><data_0>...[data_n]<intr> * */ static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq) { int ret; struct artpec6_crypto_walk walk; struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); struct artpec6_crypto_request_context *req_ctx = NULL; size_t iv_len = crypto_skcipher_ivsize(cipher); struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); enum artpec6_crypto_variant variant = ac->variant; struct artpec6_crypto_req_common *common; bool cipher_decr = false; size_t cipher_klen; u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */ u32 oper; req_ctx = skcipher_request_ctx(areq); common = &req_ctx->common; artpec6_crypto_init_dma_operation(common); if (variant == ARTPEC6_CRYPTO) ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey); else ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey); ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md, sizeof(ctx->key_md), false, false); if (ret) return ret; ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key, ctx->key_length, true, false); if (ret) return ret; req_ctx->cipher_md = 0; if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) cipher_klen = ctx->key_length/2; else cipher_klen = ctx->key_length; /* Metadata */ switch (cipher_klen) { case 16: cipher_len = regk_crypto_key_128; break; case 24: cipher_len = regk_crypto_key_192; break; case 32: cipher_len = regk_crypto_key_256; break; default: pr_err("%s: Invalid key length %zu!\n", MODULE_NAME, ctx->key_length); return -EINVAL; } switch (ctx->crypto_type) { case ARTPEC6_CRYPTO_CIPHER_AES_ECB: oper = regk_crypto_aes_ecb; cipher_decr = req_ctx->decrypt; break; case ARTPEC6_CRYPTO_CIPHER_AES_CBC: oper = regk_crypto_aes_cbc; cipher_decr = req_ctx->decrypt; break; case ARTPEC6_CRYPTO_CIPHER_AES_CTR: oper = regk_crypto_aes_ctr; cipher_decr = false; break; case ARTPEC6_CRYPTO_CIPHER_AES_XTS: oper = regk_crypto_aes_xts; cipher_decr = req_ctx->decrypt; if (variant == ARTPEC6_CRYPTO) req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ; else req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ; break; default: pr_err("%s: Invalid cipher mode %d!\n", MODULE_NAME, ctx->crypto_type); return -EINVAL; } if (variant == ARTPEC6_CRYPTO) { req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper); req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN, cipher_len); if (cipher_decr) req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR; } else { req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper); req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN, cipher_len); if (cipher_decr) req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR; } ret = artpec6_crypto_setup_out_descr(common, &req_ctx->cipher_md, sizeof(req_ctx->cipher_md), false, false); if (ret) return ret; ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); if (ret) return ret; if (iv_len) { ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len, false, false); if (ret) return ret; } /* Data out */ artpec6_crypto_walk_init(&walk, areq->src); ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen); if (ret) return ret; /* Data in */ artpec6_crypto_walk_init(&walk, areq->dst); ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen); if (ret) return ret; /* CTR-mode padding required by the HW. */ if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR || ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) { size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) - areq->cryptlen; if (pad) { ret = artpec6_crypto_setup_out_descr(common, ac->pad_buffer, pad, false, false); if (ret) return ret; ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, pad, false); if (ret) return ret; } } ret = artpec6_crypto_terminate_out_descrs(common); if (ret) return ret; ret = artpec6_crypto_terminate_in_descrs(common); if (ret) return ret; return artpec6_crypto_dma_map_descs(common); } static int artpec6_crypto_prepare_aead(struct aead_request *areq) { size_t count; int ret; size_t input_length; struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm); struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq); struct crypto_aead *cipher = crypto_aead_reqtfm(areq); struct artpec6_crypto_req_common *common = &req_ctx->common; struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); enum artpec6_crypto_variant variant = ac->variant; u32 md_cipher_len; artpec6_crypto_init_dma_operation(common); /* Key */ if (variant == ARTPEC6_CRYPTO) { ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey); } else { ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey); } ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md, sizeof(ctx->key_md), false, false); if (ret) return ret; ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key, ctx->key_length, true, false); if (ret) return ret; req_ctx->cipher_md = 0; switch (ctx->key_length) { case 16: md_cipher_len = regk_crypto_key_128; break; case 24: md_cipher_len = regk_crypto_key_192; break; case 32: md_cipher_len = regk_crypto_key_256; break; default: return -EINVAL; } if (variant == ARTPEC6_CRYPTO) { req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, regk_crypto_aes_gcm); req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN, md_cipher_len); if (req_ctx->decrypt) req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR; } else { req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, regk_crypto_aes_gcm); req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN, md_cipher_len); if (req_ctx->decrypt) req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR; } ret = artpec6_crypto_setup_out_descr(common, (void *) &req_ctx->cipher_md, sizeof(req_ctx->cipher_md), false, false); if (ret) return ret; ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); if (ret) return ret; /* For the decryption, cryptlen includes the tag. */ input_length = areq->cryptlen; if (req_ctx->decrypt) input_length -= crypto_aead_authsize(cipher); /* Prepare the context buffer */ req_ctx->hw_ctx.aad_length_bits = __cpu_to_be64(8*areq->assoclen); req_ctx->hw_ctx.text_length_bits = __cpu_to_be64(8*input_length); memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher)); // The HW omits the initial increment of the counter field. memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4); ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx, sizeof(struct artpec6_crypto_aead_hw_ctx), false, false); if (ret) return ret; { struct artpec6_crypto_walk walk; artpec6_crypto_walk_init(&walk, areq->src); /* Associated data */ count = areq->assoclen; ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count); if (ret) return ret; if (!IS_ALIGNED(areq->assoclen, 16)) { size_t assoc_pad = 16 - (areq->assoclen % 16); /* The HW mandates zero padding here */ ret = artpec6_crypto_setup_out_descr(common, ac->zero_buffer, assoc_pad, false, false); if (ret) return ret; } /* Data to crypto */ count = input_length; ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count); if (ret) return ret; if (!IS_ALIGNED(input_length, 16)) { size_t crypto_pad = 16 - (input_length % 16); /* The HW mandates zero padding here */ ret = artpec6_crypto_setup_out_descr(common, ac->zero_buffer, crypto_pad, false, false); if (ret) return ret; } } /* Data from crypto */ { struct artpec6_crypto_walk walk; size_t output_len = areq->cryptlen; if (req_ctx->decrypt) output_len -= crypto_aead_authsize(cipher); artpec6_crypto_walk_init(&walk, areq->dst); /* skip associated data in the output */ count = artpec6_crypto_walk_advance(&walk, areq->assoclen); if (count) return -EINVAL; count = output_len; ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count); if (ret) return ret; /* Put padding between the cryptotext and the auth tag */ if (!IS_ALIGNED(output_len, 16)) { size_t crypto_pad = 16 - (output_len % 16); ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, crypto_pad, false); if (ret) return ret; } /* The authentication tag shall follow immediately after * the output ciphertext. For decryption it is put in a context * buffer for later compare against the input tag. */ if (req_ctx->decrypt) { ret = artpec6_crypto_setup_in_descr(common, req_ctx->decryption_tag, AES_BLOCK_SIZE, false); if (ret) return ret; } else { /* For encryption the requested tag size may be smaller * than the hardware's generated tag. */ size_t authsize = crypto_aead_authsize(cipher); ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, authsize); if (ret) return ret; if (authsize < AES_BLOCK_SIZE) { count = AES_BLOCK_SIZE - authsize; ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, count, false); if (ret) return ret; } } } ret = artpec6_crypto_terminate_in_descrs(common); if (ret) return ret; ret = artpec6_crypto_terminate_out_descrs(common); if (ret) return ret; return artpec6_crypto_dma_map_descs(common); } static void artpec6_crypto_process_queue(struct artpec6_crypto *ac, struct list_head *completions) { struct artpec6_crypto_req_common *req; while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) { req = list_first_entry(&ac->queue, struct artpec6_crypto_req_common, list); list_move_tail(&req->list, &ac->pending); artpec6_crypto_start_dma(req); list_add_tail(&req->complete_in_progress, completions); } /* * In some cases, the hardware can raise an in_eop_flush interrupt * before actually updating the status, so we have an timer which will * recheck the status on timeout. Since the cases are expected to be * very rare, we use a relatively large timeout value. There should be * no noticeable negative effect if we timeout spuriously. */ if (ac->pending_count) mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100)); else del_timer(&ac->timer); } static void artpec6_crypto_timeout(struct timer_list *t) { struct artpec6_crypto *ac = from_timer(ac, t, timer); dev_info_ratelimited(artpec6_crypto_dev, "timeout\n"); tasklet_schedule(&ac->task); } static void artpec6_crypto_task(unsigned long data) { struct artpec6_crypto *ac = (struct artpec6_crypto *)data; struct artpec6_crypto_req_common *req; struct artpec6_crypto_req_common *n; struct list_head complete_done; struct list_head complete_in_progress; INIT_LIST_HEAD(&complete_done); INIT_LIST_HEAD(&complete_in_progress); if (list_empty(&ac->pending)) { pr_debug("Spurious IRQ\n"); return; } spin_lock(&ac->queue_lock); list_for_each_entry_safe(req, n, &ac->pending, list) { struct artpec6_crypto_dma_descriptors *dma = req->dma; u32 stat; dma_addr_t stataddr; stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1); dma_sync_single_for_cpu(artpec6_crypto_dev, stataddr, 4, DMA_BIDIRECTIONAL); stat = req->dma->stat[req->dma->in_cnt-1]; /* A non-zero final status descriptor indicates * this job has finished. */ pr_debug("Request %p status is %X\n", req, stat); if (!stat) break; /* Allow testing of timeout handling with fault injection */ #ifdef CONFIG_FAULT_INJECTION if (should_fail(&artpec6_crypto_fail_status_read, 1)) continue; #endif pr_debug("Completing request %p\n", req); list_move_tail(&req->list, &complete_done); ac->pending_count--; } artpec6_crypto_process_queue(ac, &complete_in_progress); spin_unlock(&ac->queue_lock); /* Perform the completion callbacks without holding the queue lock * to allow new request submissions from the callbacks. */ list_for_each_entry_safe(req, n, &complete_done, list) { artpec6_crypto_dma_unmap_all(req); artpec6_crypto_copy_bounce_buffers(req); artpec6_crypto_common_destroy(req); req->complete(req->req); } list_for_each_entry_safe(req, n, &complete_in_progress, complete_in_progress) { crypto_request_complete(req->req, -EINPROGRESS); } } static void artpec6_crypto_complete_crypto(struct crypto_async_request *req) { crypto_request_complete(req, 0); } static void artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req) { struct skcipher_request *cipher_req = container_of(req, struct skcipher_request, base); scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src, cipher_req->cryptlen - AES_BLOCK_SIZE, AES_BLOCK_SIZE, 0); skcipher_request_complete(cipher_req, 0); } static void artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req) { struct skcipher_request *cipher_req = container_of(req, struct skcipher_request, base); scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst, cipher_req->cryptlen - AES_BLOCK_SIZE, AES_BLOCK_SIZE, 0); skcipher_request_complete(cipher_req, 0); } static void artpec6_crypto_complete_aead(struct crypto_async_request *req) { int result = 0; /* Verify GCM hashtag. */ struct aead_request *areq = container_of(req, struct aead_request, base); struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq); if (req_ctx->decrypt) { u8 input_tag[AES_BLOCK_SIZE]; unsigned int authsize = crypto_aead_authsize(aead); sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), input_tag, authsize, areq->assoclen + areq->cryptlen - authsize); if (crypto_memneq(req_ctx->decryption_tag, input_tag, authsize)) { pr_debug("***EBADMSG:\n"); print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1, input_tag, authsize, true); print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1, req_ctx->decryption_tag, authsize, true); result = -EBADMSG; } } aead_request_complete(areq, result); } static void artpec6_crypto_complete_hash(struct crypto_async_request *req) { crypto_request_complete(req, 0); } /*------------------- Hash functions -----------------------------------------*/ static int artpec6_crypto_hash_set_key(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base); size_t blocksize; int ret; if (!keylen) { pr_err("Invalid length (%d) of HMAC key\n", keylen); return -EINVAL; } memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key)); blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); if (keylen > blocksize) { tfm_ctx->hmac_key_length = blocksize; ret = crypto_shash_tfm_digest(tfm_ctx->child_hash, key, keylen, tfm_ctx->hmac_key); if (ret) return ret; } else { memcpy(tfm_ctx->hmac_key, key, keylen); tfm_ctx->hmac_key_length = keylen; } return 0; } static int artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac) { struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); enum artpec6_crypto_variant variant = ac->variant; struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); u32 oper; memset(req_ctx, 0, sizeof(*req_ctx)); req_ctx->hash_flags = HASH_FLAG_INIT_CTX; if (hmac) req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY); switch (type) { case ARTPEC6_CRYPTO_HASH_SHA1: oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1; break; case ARTPEC6_CRYPTO_HASH_SHA256: oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256; break; default: pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type); return -EINVAL; } if (variant == ARTPEC6_CRYPTO) req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper); else req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper); return 0; } static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req) { struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); int ret; if (!req_ctx->common.dma) { ret = artpec6_crypto_common_init(&req_ctx->common, &req->base, artpec6_crypto_complete_hash, NULL, 0); if (ret) return ret; } ret = artpec6_crypto_prepare_hash(req); switch (ret) { case ARTPEC6_CRYPTO_PREPARE_HASH_START: ret = artpec6_crypto_submit(&req_ctx->common); break; case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START: ret = 0; fallthrough; default: artpec6_crypto_common_destroy(&req_ctx->common); break; } return ret; } static int artpec6_crypto_hash_final(struct ahash_request *req) { struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); req_ctx->hash_flags |= HASH_FLAG_FINALIZE; return artpec6_crypto_prepare_submit_hash(req); } static int artpec6_crypto_hash_update(struct ahash_request *req) { struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); req_ctx->hash_flags |= HASH_FLAG_UPDATE; return artpec6_crypto_prepare_submit_hash(req); } static int artpec6_crypto_sha1_init(struct ahash_request *req) { return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0); } static int artpec6_crypto_sha1_digest(struct ahash_request *req) { struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0); req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; return artpec6_crypto_prepare_submit_hash(req); } static int artpec6_crypto_sha256_init(struct ahash_request *req) { return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0); } static int artpec6_crypto_sha256_digest(struct ahash_request *req) { struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0); req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; return artpec6_crypto_prepare_submit_hash(req); } static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req) { return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1); } static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req) { struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1); req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; return artpec6_crypto_prepare_submit_hash(req); } static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm, const char *base_hash_name) { struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm); crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct artpec6_hash_request_context)); memset(tfm_ctx, 0, sizeof(*tfm_ctx)); if (base_hash_name) { struct crypto_shash *child; child = crypto_alloc_shash(base_hash_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(child)) return PTR_ERR(child); tfm_ctx->child_hash = child; } return 0; } static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm) { return artpec6_crypto_ahash_init_common(tfm, NULL); } static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm) { return artpec6_crypto_ahash_init_common(tfm, "sha256"); } static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm) { struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm); if (tfm_ctx->child_hash) crypto_free_shash(tfm_ctx->child_hash); memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key)); tfm_ctx->hmac_key_length = 0; } static int artpec6_crypto_hash_export(struct ahash_request *req, void *out) { const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req); struct artpec6_hash_export_state *state = out; struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); enum artpec6_crypto_variant variant = ac->variant; BUILD_BUG_ON(sizeof(state->partial_buffer) != sizeof(ctx->partial_buffer)); BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate)); state->digcnt = ctx->digcnt; state->partial_bytes = ctx->partial_bytes; state->hash_flags = ctx->hash_flags; if (variant == ARTPEC6_CRYPTO) state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md); else state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md); memcpy(state->partial_buffer, ctx->partial_buffer, sizeof(state->partial_buffer)); memcpy(state->digeststate, ctx->digeststate, sizeof(state->digeststate)); return 0; } static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in) { struct artpec6_hash_request_context *ctx = ahash_request_ctx(req); const struct artpec6_hash_export_state *state = in; struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); enum artpec6_crypto_variant variant = ac->variant; memset(ctx, 0, sizeof(*ctx)); ctx->digcnt = state->digcnt; ctx->partial_bytes = state->partial_bytes; ctx->hash_flags = state->hash_flags; if (variant == ARTPEC6_CRYPTO) ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper); else ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper); memcpy(ctx->partial_buffer, state->partial_buffer, sizeof(state->partial_buffer)); memcpy(ctx->digeststate, state->digeststate, sizeof(state->digeststate)); return 0; } static int init_crypto_hw(struct artpec6_crypto *ac) { enum artpec6_crypto_variant variant = ac->variant; void __iomem *base = ac->base; u32 out_descr_buf_size; u32 out_data_buf_size; u32 in_data_buf_size; u32 in_descr_buf_size; u32 in_stat_buf_size; u32 in, out; /* * The PDMA unit contains 1984 bytes of internal memory for the OUT * channels and 1024 bytes for the IN channel. This is an elastic * memory used to internally store the descriptors and data. The values * ares specified in 64 byte incremements. Trustzone buffers are not * used at this stage. */ out_data_buf_size = 16; /* 1024 bytes for data */ out_descr_buf_size = 15; /* 960 bytes for descriptors */ in_data_buf_size = 8; /* 512 bytes for data */ in_descr_buf_size = 4; /* 256 bytes for descriptors */ in_stat_buf_size = 4; /* 256 bytes for stat descrs */ BUILD_BUG_ON_MSG((out_data_buf_size + out_descr_buf_size) * 64 > 1984, "Invalid OUT configuration"); BUILD_BUG_ON_MSG((in_data_buf_size + in_descr_buf_size + in_stat_buf_size) * 64 > 1024, "Invalid IN configuration"); in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) | FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) | FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size); out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) | FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size); writel_relaxed(out, base + PDMA_OUT_BUF_CFG); writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG); if (variant == ARTPEC6_CRYPTO) { writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG); writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG); writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA | A6_PDMA_INTR_MASK_IN_EOP_FLUSH, base + A6_PDMA_INTR_MASK); } else { writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG); writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG); writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA | A7_PDMA_INTR_MASK_IN_EOP_FLUSH, base + A7_PDMA_INTR_MASK); } return 0; } static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac) { enum artpec6_crypto_variant variant = ac->variant; void __iomem *base = ac->base; if (variant == ARTPEC6_CRYPTO) { writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD); writel_relaxed(0, base + A6_PDMA_IN_CFG); writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD); } else { writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD); writel_relaxed(0, base + A7_PDMA_IN_CFG); writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD); } writel_relaxed(0, base + PDMA_OUT_CFG); } static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id) { struct artpec6_crypto *ac = dev_id; enum artpec6_crypto_variant variant = ac->variant; void __iomem *base = ac->base; u32 mask_in_data, mask_in_eop_flush; u32 in_cmd_flush_stat, in_cmd_reg; u32 ack_intr_reg; u32 ack = 0; u32 intr; if (variant == ARTPEC6_CRYPTO) { intr = readl_relaxed(base + A6_PDMA_MASKED_INTR); mask_in_data = A6_PDMA_INTR_MASK_IN_DATA; mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH; in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT; in_cmd_reg = A6_PDMA_IN_CMD; ack_intr_reg = A6_PDMA_ACK_INTR; } else { intr = readl_relaxed(base + A7_PDMA_MASKED_INTR); mask_in_data = A7_PDMA_INTR_MASK_IN_DATA; mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH; in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT; in_cmd_reg = A7_PDMA_IN_CMD; ack_intr_reg = A7_PDMA_ACK_INTR; } /* We get two interrupt notifications from each job. * The in_data means all data was sent to memory and then * we request a status flush command to write the per-job * status to its status vector. This ensures that the * tasklet can detect exactly how many submitted jobs * that have finished. */ if (intr & mask_in_data) ack |= mask_in_data; if (intr & mask_in_eop_flush) ack |= mask_in_eop_flush; else writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg); writel_relaxed(ack, base + ack_intr_reg); if (intr & mask_in_eop_flush) tasklet_schedule(&ac->task); return IRQ_HANDLED; } /*------------------- Algorithm definitions ----------------------------------*/ /* Hashes */ static struct ahash_alg hash_algos[] = { /* SHA-1 */ { .init = artpec6_crypto_sha1_init, .update = artpec6_crypto_hash_update, .final = artpec6_crypto_hash_final, .digest = artpec6_crypto_sha1_digest, .import = artpec6_crypto_hash_import, .export = artpec6_crypto_hash_export, .halg.digestsize = SHA1_DIGEST_SIZE, .halg.statesize = sizeof(struct artpec6_hash_export_state), .halg.base = { .cra_name = "sha1", .cra_driver_name = "artpec-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_hashalg_context), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_init = artpec6_crypto_ahash_init, .cra_exit = artpec6_crypto_ahash_exit, } }, /* SHA-256 */ { .init = artpec6_crypto_sha256_init, .update = artpec6_crypto_hash_update, .final = artpec6_crypto_hash_final, .digest = artpec6_crypto_sha256_digest, .import = artpec6_crypto_hash_import, .export = artpec6_crypto_hash_export, .halg.digestsize = SHA256_DIGEST_SIZE, .halg.statesize = sizeof(struct artpec6_hash_export_state), .halg.base = { .cra_name = "sha256", .cra_driver_name = "artpec-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_hashalg_context), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_init = artpec6_crypto_ahash_init, .cra_exit = artpec6_crypto_ahash_exit, } }, /* HMAC SHA-256 */ { .init = artpec6_crypto_hmac_sha256_init, .update = artpec6_crypto_hash_update, .final = artpec6_crypto_hash_final, .digest = artpec6_crypto_hmac_sha256_digest, .import = artpec6_crypto_hash_import, .export = artpec6_crypto_hash_export, .setkey = artpec6_crypto_hash_set_key, .halg.digestsize = SHA256_DIGEST_SIZE, .halg.statesize = sizeof(struct artpec6_hash_export_state), .halg.base = { .cra_name = "hmac(sha256)", .cra_driver_name = "artpec-hmac-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_hashalg_context), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_init = artpec6_crypto_ahash_init_hmac_sha256, .cra_exit = artpec6_crypto_ahash_exit, } }, }; /* Crypto */ static struct skcipher_alg crypto_algos[] = { /* AES - ECB */ { .base = { .cra_name = "ecb(aes)", .cra_driver_name = "artpec6-ecb-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, .cra_module = THIS_MODULE, }, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = artpec6_crypto_cipher_set_key, .encrypt = artpec6_crypto_encrypt, .decrypt = artpec6_crypto_decrypt, .init = artpec6_crypto_aes_ecb_init, .exit = artpec6_crypto_aes_exit, }, /* AES - CTR */ { .base = { .cra_name = "ctr(aes)", .cra_driver_name = "artpec6-ctr-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, .cra_module = THIS_MODULE, }, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = artpec6_crypto_cipher_set_key, .encrypt = artpec6_crypto_ctr_encrypt, .decrypt = artpec6_crypto_ctr_decrypt, .init = artpec6_crypto_aes_ctr_init, .exit = artpec6_crypto_aes_ctr_exit, }, /* AES - CBC */ { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "artpec6-cbc-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, .cra_module = THIS_MODULE, }, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = artpec6_crypto_cipher_set_key, .encrypt = artpec6_crypto_encrypt, .decrypt = artpec6_crypto_decrypt, .init = artpec6_crypto_aes_cbc_init, .exit = artpec6_crypto_aes_exit }, /* AES - XTS */ { .base = { .cra_name = "xts(aes)", .cra_driver_name = "artpec6-xts-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, .cra_module = THIS_MODULE, }, .min_keysize = 2*AES_MIN_KEY_SIZE, .max_keysize = 2*AES_MAX_KEY_SIZE, .ivsize = 16, .setkey = artpec6_crypto_xts_set_key, .encrypt = artpec6_crypto_encrypt, .decrypt = artpec6_crypto_decrypt, .init = artpec6_crypto_aes_xts_init, .exit = artpec6_crypto_aes_exit, }, }; static struct aead_alg aead_algos[] = { { .init = artpec6_crypto_aead_init, .setkey = artpec6_crypto_aead_set_key, .encrypt = artpec6_crypto_aead_encrypt, .decrypt = artpec6_crypto_aead_decrypt, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, .base = { .cra_name = "gcm(aes)", .cra_driver_name = "artpec-gcm-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), .cra_alignmask = 3, .cra_module = THIS_MODULE, }, } }; #ifdef CONFIG_DEBUG_FS struct dbgfs_u32 { char *name; mode_t mode; u32 *flag; char *desc; }; static struct dentry *dbgfs_root; static void artpec6_crypto_init_debugfs(void) { dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); #ifdef CONFIG_FAULT_INJECTION fault_create_debugfs_attr("fail_status_read", dbgfs_root, &artpec6_crypto_fail_status_read); fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root, &artpec6_crypto_fail_dma_array_full); #endif } static void artpec6_crypto_free_debugfs(void) { debugfs_remove_recursive(dbgfs_root); dbgfs_root = NULL; } #endif static const struct of_device_id artpec6_crypto_of_match[] = { { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO }, { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO }, {} }; MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match); static int artpec6_crypto_probe(struct platform_device *pdev) { const struct of_device_id *match; enum artpec6_crypto_variant variant; struct artpec6_crypto *ac; struct device *dev = &pdev->dev; void __iomem *base; int irq; int err; if (artpec6_crypto_dev) return -ENODEV; match = of_match_node(artpec6_crypto_of_match, dev->of_node); if (!match) return -EINVAL; variant = (enum artpec6_crypto_variant)match->data; base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) return PTR_ERR(base); irq = platform_get_irq(pdev, 0); if (irq < 0) return -ENODEV; ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto), GFP_KERNEL); if (!ac) return -ENOMEM; platform_set_drvdata(pdev, ac); ac->variant = variant; spin_lock_init(&ac->queue_lock); INIT_LIST_HEAD(&ac->queue); INIT_LIST_HEAD(&ac->pending); timer_setup(&ac->timer, artpec6_crypto_timeout, 0); ac->base = base; ac->dma_cache = kmem_cache_create("artpec6_crypto_dma", sizeof(struct artpec6_crypto_dma_descriptors), 64, 0, NULL); if (!ac->dma_cache) return -ENOMEM; #ifdef CONFIG_DEBUG_FS artpec6_crypto_init_debugfs(); #endif tasklet_init(&ac->task, artpec6_crypto_task, (unsigned long)ac); ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX, GFP_KERNEL); if (!ac->pad_buffer) return -ENOMEM; ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX); ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX, GFP_KERNEL); if (!ac->zero_buffer) return -ENOMEM; ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX); err = init_crypto_hw(ac); if (err) goto free_cache; err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0, "artpec6-crypto", ac); if (err) goto disable_hw; artpec6_crypto_dev = &pdev->dev; err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); if (err) { dev_err(dev, "Failed to register ahashes\n"); goto disable_hw; } err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); if (err) { dev_err(dev, "Failed to register ciphers\n"); goto unregister_ahashes; } err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos)); if (err) { dev_err(dev, "Failed to register aeads\n"); goto unregister_algs; } return 0; unregister_algs: crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); unregister_ahashes: crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); disable_hw: artpec6_crypto_disable_hw(ac); free_cache: kmem_cache_destroy(ac->dma_cache); return err; } static int artpec6_crypto_remove(struct platform_device *pdev) { struct artpec6_crypto *ac = platform_get_drvdata(pdev); int irq = platform_get_irq(pdev, 0); crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos)); tasklet_disable(&ac->task); devm_free_irq(&pdev->dev, irq, ac); tasklet_kill(&ac->task); del_timer_sync(&ac->timer); artpec6_crypto_disable_hw(ac); kmem_cache_destroy(ac->dma_cache); #ifdef CONFIG_DEBUG_FS artpec6_crypto_free_debugfs(); #endif return 0; } static struct platform_driver artpec6_crypto_driver = { .probe = artpec6_crypto_probe, .remove = artpec6_crypto_remove, .driver = { .name = "artpec6-crypto", .of_match_table = artpec6_crypto_of_match, }, }; module_platform_driver(artpec6_crypto_driver); MODULE_AUTHOR("Axis Communications AB"); MODULE_DESCRIPTION("ARTPEC-6 Crypto driver"); MODULE_LICENSE("GPL");
linux-master
drivers/crypto/axis/artpec6_crypto.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 HiSilicon Limited. */ #include <linux/hisi_acc_qm.h> #include "qm_common.h" #define QM_DFX_BASE 0x0100000 #define QM_DFX_STATE1 0x0104000 #define QM_DFX_STATE2 0x01040C8 #define QM_DFX_COMMON 0x0000 #define QM_DFX_BASE_LEN 0x5A #define QM_DFX_STATE1_LEN 0x2E #define QM_DFX_STATE2_LEN 0x11 #define QM_DFX_COMMON_LEN 0xC3 #define QM_DFX_REGS_LEN 4UL #define QM_DBG_TMP_BUF_LEN 22 #define CURRENT_FUN_MASK GENMASK(5, 0) #define CURRENT_Q_MASK GENMASK(31, 16) #define QM_SQE_ADDR_MASK GENMASK(7, 0) #define QM_DFX_MB_CNT_VF 0x104010 #define QM_DFX_DB_CNT_VF 0x104020 #define QM_DFX_SQE_CNT_VF_SQN 0x104030 #define QM_DFX_CQE_CNT_VF_CQN 0x104040 #define QM_DFX_QN_SHIFT 16 #define QM_DFX_CNT_CLR_CE 0x100118 #define QM_DBG_WRITE_LEN 1024 static const char * const qm_debug_file_name[] = { [CURRENT_QM] = "current_qm", [CURRENT_Q] = "current_q", [CLEAR_ENABLE] = "clear_enable", }; struct qm_dfx_item { const char *name; u32 offset; }; struct qm_cmd_dump_item { const char *cmd; char *info_name; int (*dump_fn)(struct hisi_qm *qm, char *cmd, char *info_name); }; static struct qm_dfx_item qm_dfx_files[] = { {"err_irq", offsetof(struct qm_dfx, err_irq_cnt)}, {"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)}, {"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)}, {"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)}, {"mb_err", offsetof(struct qm_dfx, mb_err_cnt)}, }; #define CNT_CYC_REGS_NUM 10 static const struct debugfs_reg32 qm_dfx_regs[] = { /* XXX_CNT are reading clear register */ {"QM_ECC_1BIT_CNT ", 0x104000ull}, {"QM_ECC_MBIT_CNT ", 0x104008ull}, {"QM_DFX_MB_CNT ", 0x104018ull}, {"QM_DFX_DB_CNT ", 0x104028ull}, {"QM_DFX_SQE_CNT ", 0x104038ull}, {"QM_DFX_CQE_CNT ", 0x104048ull}, {"QM_DFX_SEND_SQE_TO_ACC_CNT ", 0x104050ull}, {"QM_DFX_WB_SQE_FROM_ACC_CNT ", 0x104058ull}, {"QM_DFX_ACC_FINISH_CNT ", 0x104060ull}, {"QM_DFX_CQE_ERR_CNT ", 0x1040b4ull}, {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, {"QM_ECC_1BIT_INF ", 0x104004ull}, {"QM_ECC_MBIT_INF ", 0x10400cull}, {"QM_DFX_ACC_RDY_VLD0 ", 0x1040a0ull}, {"QM_DFX_ACC_RDY_VLD1 ", 0x1040a4ull}, {"QM_DFX_AXI_RDY_VLD ", 0x1040a8ull}, {"QM_DFX_FF_ST0 ", 0x1040c8ull}, {"QM_DFX_FF_ST1 ", 0x1040ccull}, {"QM_DFX_FF_ST2 ", 0x1040d0ull}, {"QM_DFX_FF_ST3 ", 0x1040d4ull}, {"QM_DFX_FF_ST4 ", 0x1040d8ull}, {"QM_DFX_FF_ST5 ", 0x1040dcull}, {"QM_DFX_FF_ST6 ", 0x1040e0ull}, {"QM_IN_IDLE_ST ", 0x1040e4ull}, }; static const struct debugfs_reg32 qm_vf_dfx_regs[] = { {"QM_DFX_FUNS_ACTIVE_ST ", 0x200ull}, }; /* define the QM's dfx regs region and region length */ static struct dfx_diff_registers qm_diff_regs[] = { { .reg_offset = QM_DFX_BASE, .reg_len = QM_DFX_BASE_LEN, }, { .reg_offset = QM_DFX_STATE1, .reg_len = QM_DFX_STATE1_LEN, }, { .reg_offset = QM_DFX_STATE2, .reg_len = QM_DFX_STATE2_LEN, }, { .reg_offset = QM_DFX_COMMON, .reg_len = QM_DFX_COMMON_LEN, }, }; static struct hisi_qm *file_to_qm(struct debugfs_file *file) { struct qm_debug *debug = file->debug; return container_of(debug, struct hisi_qm, debug); } static ssize_t qm_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *pos) { char buf[QM_DBG_READ_LEN]; int len; len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", "Please echo help to cmd to get help information"); return simple_read_from_buffer(buffer, count, pos, buf, len); } static void dump_show(struct hisi_qm *qm, void *info, unsigned int info_size, char *info_name) { struct device *dev = &qm->pdev->dev; u8 *info_curr = info; u32 i; #define BYTE_PER_DW 4 dev_info(dev, "%s DUMP\n", info_name); for (i = 0; i < info_size; i += BYTE_PER_DW, info_curr += BYTE_PER_DW) { pr_info("DW%u: %02X%02X %02X%02X\n", i / BYTE_PER_DW, *(info_curr + 3), *(info_curr + 2), *(info_curr + 1), *(info_curr)); } } static int qm_sqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; struct qm_sqc *sqc, *sqc_curr; dma_addr_t sqc_dma; u32 qp_id; int ret; if (!s) return -EINVAL; ret = kstrtou32(s, 0, &qp_id); if (ret || qp_id >= qm->qp_num) { dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1); return -EINVAL; } sqc = hisi_qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma); if (IS_ERR(sqc)) return PTR_ERR(sqc); ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 1); if (ret) { down_read(&qm->qps_lock); if (qm->sqc) { sqc_curr = qm->sqc + qp_id; dump_show(qm, sqc_curr, sizeof(*sqc), "SOFT SQC"); } up_read(&qm->qps_lock); goto free_ctx; } dump_show(qm, sqc, sizeof(*sqc), name); free_ctx: hisi_qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma); return 0; } static int qm_cqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; struct qm_cqc *cqc, *cqc_curr; dma_addr_t cqc_dma; u32 qp_id; int ret; if (!s) return -EINVAL; ret = kstrtou32(s, 0, &qp_id); if (ret || qp_id >= qm->qp_num) { dev_err(dev, "Please input qp num (0-%u)", qm->qp_num - 1); return -EINVAL; } cqc = hisi_qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma); if (IS_ERR(cqc)) return PTR_ERR(cqc); ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 1); if (ret) { down_read(&qm->qps_lock); if (qm->cqc) { cqc_curr = qm->cqc + qp_id; dump_show(qm, cqc_curr, sizeof(*cqc), "SOFT CQC"); } up_read(&qm->qps_lock); goto free_ctx; } dump_show(qm, cqc, sizeof(*cqc), name); free_ctx: hisi_qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma); return 0; } static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; dma_addr_t xeqc_dma; size_t size; void *xeqc; int ret; u8 cmd; if (strsep(&s, " ")) { dev_err(dev, "Please do not input extra characters!\n"); return -EINVAL; } if (!strcmp(name, "EQC")) { cmd = QM_MB_CMD_EQC; size = sizeof(struct qm_eqc); } else { cmd = QM_MB_CMD_AEQC; size = sizeof(struct qm_aeqc); } xeqc = hisi_qm_ctx_alloc(qm, size, &xeqc_dma); if (IS_ERR(xeqc)) return PTR_ERR(xeqc); ret = hisi_qm_mb(qm, cmd, xeqc_dma, 0, 1); if (ret) goto err_free_ctx; dump_show(qm, xeqc, size, name); err_free_ctx: hisi_qm_ctx_free(qm, size, xeqc, &xeqc_dma); return ret; } static int q_dump_param_parse(struct hisi_qm *qm, char *s, u32 *e_id, u32 *q_id, u16 q_depth) { struct device *dev = &qm->pdev->dev; unsigned int qp_num = qm->qp_num; char *presult; int ret; presult = strsep(&s, " "); if (!presult) { dev_err(dev, "Please input qp number!\n"); return -EINVAL; } ret = kstrtou32(presult, 0, q_id); if (ret || *q_id >= qp_num) { dev_err(dev, "Please input qp num (0-%u)", qp_num - 1); return -EINVAL; } presult = strsep(&s, " "); if (!presult) { dev_err(dev, "Please input sqe number!\n"); return -EINVAL; } ret = kstrtou32(presult, 0, e_id); if (ret || *e_id >= q_depth) { dev_err(dev, "Please input sqe num (0-%u)", q_depth - 1); return -EINVAL; } if (strsep(&s, " ")) { dev_err(dev, "Please do not input extra characters!\n"); return -EINVAL; } return 0; } static int qm_sq_dump(struct hisi_qm *qm, char *s, char *name) { u16 sq_depth = qm->qp_array->cq_depth; void *sqe, *sqe_curr; struct hisi_qp *qp; u32 qp_id, sqe_id; int ret; ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id, sq_depth); if (ret) return ret; sqe = kzalloc(qm->sqe_size * sq_depth, GFP_KERNEL); if (!sqe) return -ENOMEM; qp = &qm->qp_array[qp_id]; memcpy(sqe, qp->sqe, qm->sqe_size * sq_depth); sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size); memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK, qm->debug.sqe_mask_len); dump_show(qm, sqe_curr, qm->sqe_size, name); kfree(sqe); return 0; } static int qm_cq_dump(struct hisi_qm *qm, char *s, char *name) { struct qm_cqe *cqe_curr; struct hisi_qp *qp; u32 qp_id, cqe_id; int ret; ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id, qm->qp_array->cq_depth); if (ret) return ret; qp = &qm->qp_array[qp_id]; cqe_curr = qp->cqe + cqe_id; dump_show(qm, cqe_curr, sizeof(struct qm_cqe), name); return 0; } static int qm_eq_aeq_dump(struct hisi_qm *qm, char *s, char *name) { struct device *dev = &qm->pdev->dev; u16 xeq_depth; size_t size; void *xeqe; u32 xeqe_id; int ret; if (!s) return -EINVAL; ret = kstrtou32(s, 0, &xeqe_id); if (ret) return -EINVAL; if (!strcmp(name, "EQE")) { xeq_depth = qm->eq_depth; size = sizeof(struct qm_eqe); } else { xeq_depth = qm->aeq_depth; size = sizeof(struct qm_aeqe); } if (xeqe_id >= xeq_depth) { dev_err(dev, "Please input eqe or aeqe num (0-%u)", xeq_depth - 1); return -EINVAL; } down_read(&qm->qps_lock); if (qm->eqe && !strcmp(name, "EQE")) { xeqe = qm->eqe + xeqe_id; } else if (qm->aeqe && !strcmp(name, "AEQE")) { xeqe = qm->aeqe + xeqe_id; } else { ret = -EINVAL; goto err_unlock; } dump_show(qm, xeqe, size, name); err_unlock: up_read(&qm->qps_lock); return ret; } static int qm_dbg_help(struct hisi_qm *qm, char *s) { struct device *dev = &qm->pdev->dev; if (strsep(&s, " ")) { dev_err(dev, "Please do not input extra characters!\n"); return -EINVAL; } dev_info(dev, "available commands:\n"); dev_info(dev, "sqc <num>\n"); dev_info(dev, "cqc <num>\n"); dev_info(dev, "eqc\n"); dev_info(dev, "aeqc\n"); dev_info(dev, "sq <num> <e>\n"); dev_info(dev, "cq <num> <e>\n"); dev_info(dev, "eq <e>\n"); dev_info(dev, "aeq <e>\n"); return 0; } static const struct qm_cmd_dump_item qm_cmd_dump_table[] = { { .cmd = "sqc", .info_name = "SQC", .dump_fn = qm_sqc_dump, }, { .cmd = "cqc", .info_name = "CQC", .dump_fn = qm_cqc_dump, }, { .cmd = "eqc", .info_name = "EQC", .dump_fn = qm_eqc_aeqc_dump, }, { .cmd = "aeqc", .info_name = "AEQC", .dump_fn = qm_eqc_aeqc_dump, }, { .cmd = "sq", .info_name = "SQE", .dump_fn = qm_sq_dump, }, { .cmd = "cq", .info_name = "CQE", .dump_fn = qm_cq_dump, }, { .cmd = "eq", .info_name = "EQE", .dump_fn = qm_eq_aeq_dump, }, { .cmd = "aeq", .info_name = "AEQE", .dump_fn = qm_eq_aeq_dump, }, }; static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf) { struct device *dev = &qm->pdev->dev; char *presult, *s, *s_tmp; int table_size, i, ret; s = kstrdup(cmd_buf, GFP_KERNEL); if (!s) return -ENOMEM; s_tmp = s; presult = strsep(&s, " "); if (!presult) { ret = -EINVAL; goto err_buffer_free; } if (!strcmp(presult, "help")) { ret = qm_dbg_help(qm, s); goto err_buffer_free; } table_size = ARRAY_SIZE(qm_cmd_dump_table); for (i = 0; i < table_size; i++) { if (!strcmp(presult, qm_cmd_dump_table[i].cmd)) { ret = qm_cmd_dump_table[i].dump_fn(qm, s, qm_cmd_dump_table[i].info_name); break; } } if (i == table_size) { dev_info(dev, "Please echo help\n"); ret = -EINVAL; } err_buffer_free: kfree(s_tmp); return ret; } static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer, size_t count, loff_t *pos) { struct hisi_qm *qm = filp->private_data; char *cmd_buf, *cmd_buf_tmp; int ret; if (*pos) return 0; ret = hisi_qm_get_dfx_access(qm); if (ret) return ret; /* Judge if the instance is being reset. */ if (unlikely(atomic_read(&qm->status.flags) == QM_STOP)) { ret = 0; goto put_dfx_access; } if (count > QM_DBG_WRITE_LEN) { ret = -ENOSPC; goto put_dfx_access; } cmd_buf = memdup_user_nul(buffer, count); if (IS_ERR(cmd_buf)) { ret = PTR_ERR(cmd_buf); goto put_dfx_access; } cmd_buf_tmp = strchr(cmd_buf, '\n'); if (cmd_buf_tmp) { *cmd_buf_tmp = '\0'; count = cmd_buf_tmp - cmd_buf + 1; } ret = qm_cmd_write_dump(qm, cmd_buf); if (ret) { kfree(cmd_buf); goto put_dfx_access; } kfree(cmd_buf); ret = count; put_dfx_access: hisi_qm_put_dfx_access(qm); return ret; } static const struct file_operations qm_cmd_fops = { .owner = THIS_MODULE, .open = simple_open, .read = qm_cmd_read, .write = qm_cmd_write, }; /** * hisi_qm_regs_dump() - Dump registers's value. * @s: debugfs file handle. * @regset: accelerator registers information. * * Dump accelerator registers. */ void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset) { struct pci_dev *pdev = to_pci_dev(regset->dev); struct hisi_qm *qm = pci_get_drvdata(pdev); const struct debugfs_reg32 *regs = regset->regs; int regs_len = regset->nregs; int i, ret; u32 val; ret = hisi_qm_get_dfx_access(qm); if (ret) return; for (i = 0; i < regs_len; i++) { val = readl(regset->base + regs[i].offset); seq_printf(s, "%s= 0x%08x\n", regs[i].name, val); } hisi_qm_put_dfx_access(qm); } EXPORT_SYMBOL_GPL(hisi_qm_regs_dump); static int qm_regs_show(struct seq_file *s, void *unused) { struct hisi_qm *qm = s->private; struct debugfs_regset32 regset; if (qm->fun_type == QM_HW_PF) { regset.regs = qm_dfx_regs; regset.nregs = ARRAY_SIZE(qm_dfx_regs); } else { regset.regs = qm_vf_dfx_regs; regset.nregs = ARRAY_SIZE(qm_vf_dfx_regs); } regset.base = qm->io_base; regset.dev = &qm->pdev->dev; hisi_qm_regs_dump(s, &regset); return 0; } DEFINE_SHOW_ATTRIBUTE(qm_regs); static u32 current_q_read(struct hisi_qm *qm) { return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT; } static int current_q_write(struct hisi_qm *qm, u32 val) { u32 tmp; if (val >= qm->debug.curr_qm_qp_num) return -EINVAL; tmp = val << QM_DFX_QN_SHIFT | (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK); writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); tmp = val << QM_DFX_QN_SHIFT | (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK); writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); return 0; } static u32 clear_enable_read(struct hisi_qm *qm) { return readl(qm->io_base + QM_DFX_CNT_CLR_CE); } /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */ static int clear_enable_write(struct hisi_qm *qm, u32 rd_clr_ctrl) { if (rd_clr_ctrl > 1) return -EINVAL; writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE); return 0; } static u32 current_qm_read(struct hisi_qm *qm) { return readl(qm->io_base + QM_DFX_MB_CNT_VF); } static int qm_get_vf_qp_num(struct hisi_qm *qm, u32 fun_num) { u32 remain_q_num, vfq_num; u32 num_vfs = qm->vfs_num; vfq_num = (qm->ctrl_qp_num - qm->qp_num) / num_vfs; if (vfq_num >= qm->max_qp_num) return qm->max_qp_num; remain_q_num = (qm->ctrl_qp_num - qm->qp_num) % num_vfs; if (vfq_num + remain_q_num <= qm->max_qp_num) return fun_num == num_vfs ? vfq_num + remain_q_num : vfq_num; /* * if vfq_num + remain_q_num > max_qp_num, the last VFs, * each with one more queue. */ return fun_num + remain_q_num > num_vfs ? vfq_num + 1 : vfq_num; } static int current_qm_write(struct hisi_qm *qm, u32 val) { u32 tmp; if (val > qm->vfs_num) return -EINVAL; /* According PF or VF Dev ID to calculation curr_qm_qp_num and store */ if (!val) qm->debug.curr_qm_qp_num = qm->qp_num; else qm->debug.curr_qm_qp_num = qm_get_vf_qp_num(qm, val); writel(val, qm->io_base + QM_DFX_MB_CNT_VF); writel(val, qm->io_base + QM_DFX_DB_CNT_VF); tmp = val | (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_Q_MASK); writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); tmp = val | (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_Q_MASK); writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); return 0; } static ssize_t qm_debug_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct debugfs_file *file = filp->private_data; enum qm_debug_file index = file->index; struct hisi_qm *qm = file_to_qm(file); char tbuf[QM_DBG_TMP_BUF_LEN]; u32 val; int ret; ret = hisi_qm_get_dfx_access(qm); if (ret) return ret; mutex_lock(&file->lock); switch (index) { case CURRENT_QM: val = current_qm_read(qm); break; case CURRENT_Q: val = current_q_read(qm); break; case CLEAR_ENABLE: val = clear_enable_read(qm); break; default: goto err_input; } mutex_unlock(&file->lock); hisi_qm_put_dfx_access(qm); ret = scnprintf(tbuf, QM_DBG_TMP_BUF_LEN, "%u\n", val); return simple_read_from_buffer(buf, count, pos, tbuf, ret); err_input: mutex_unlock(&file->lock); hisi_qm_put_dfx_access(qm); return -EINVAL; } static ssize_t qm_debug_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct debugfs_file *file = filp->private_data; enum qm_debug_file index = file->index; struct hisi_qm *qm = file_to_qm(file); unsigned long val; char tbuf[QM_DBG_TMP_BUF_LEN]; int len, ret; if (*pos != 0) return 0; if (count >= QM_DBG_TMP_BUF_LEN) return -ENOSPC; len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf, count); if (len < 0) return len; tbuf[len] = '\0'; if (kstrtoul(tbuf, 0, &val)) return -EFAULT; ret = hisi_qm_get_dfx_access(qm); if (ret) return ret; mutex_lock(&file->lock); switch (index) { case CURRENT_QM: ret = current_qm_write(qm, val); break; case CURRENT_Q: ret = current_q_write(qm, val); break; case CLEAR_ENABLE: ret = clear_enable_write(qm, val); break; default: ret = -EINVAL; } mutex_unlock(&file->lock); hisi_qm_put_dfx_access(qm); if (ret) return ret; return count; } static const struct file_operations qm_debug_fops = { .owner = THIS_MODULE, .open = simple_open, .read = qm_debug_read, .write = qm_debug_write, }; static void dfx_regs_uninit(struct hisi_qm *qm, struct dfx_diff_registers *dregs, int reg_len) { int i; /* Setting the pointer is NULL to prevent double free */ for (i = 0; i < reg_len; i++) { kfree(dregs[i].regs); dregs[i].regs = NULL; } kfree(dregs); } static struct dfx_diff_registers *dfx_regs_init(struct hisi_qm *qm, const struct dfx_diff_registers *cregs, u32 reg_len) { struct dfx_diff_registers *diff_regs; u32 j, base_offset; int i; diff_regs = kcalloc(reg_len, sizeof(*diff_regs), GFP_KERNEL); if (!diff_regs) return ERR_PTR(-ENOMEM); for (i = 0; i < reg_len; i++) { if (!cregs[i].reg_len) continue; diff_regs[i].reg_offset = cregs[i].reg_offset; diff_regs[i].reg_len = cregs[i].reg_len; diff_regs[i].regs = kcalloc(QM_DFX_REGS_LEN, cregs[i].reg_len, GFP_KERNEL); if (!diff_regs[i].regs) goto alloc_error; for (j = 0; j < diff_regs[i].reg_len; j++) { base_offset = diff_regs[i].reg_offset + j * QM_DFX_REGS_LEN; diff_regs[i].regs[j] = readl(qm->io_base + base_offset); } } return diff_regs; alloc_error: while (i > 0) { i--; kfree(diff_regs[i].regs); } kfree(diff_regs); return ERR_PTR(-ENOMEM); } static int qm_diff_regs_init(struct hisi_qm *qm, struct dfx_diff_registers *dregs, u32 reg_len) { qm->debug.qm_diff_regs = dfx_regs_init(qm, qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); if (IS_ERR(qm->debug.qm_diff_regs)) return PTR_ERR(qm->debug.qm_diff_regs); qm->debug.acc_diff_regs = dfx_regs_init(qm, dregs, reg_len); if (IS_ERR(qm->debug.acc_diff_regs)) { dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); return PTR_ERR(qm->debug.acc_diff_regs); } return 0; } static void qm_last_regs_uninit(struct hisi_qm *qm) { struct qm_debug *debug = &qm->debug; if (qm->fun_type == QM_HW_VF || !debug->qm_last_words) return; kfree(debug->qm_last_words); debug->qm_last_words = NULL; } static int qm_last_regs_init(struct hisi_qm *qm) { int dfx_regs_num = ARRAY_SIZE(qm_dfx_regs); struct qm_debug *debug = &qm->debug; int i; if (qm->fun_type == QM_HW_VF) return 0; debug->qm_last_words = kcalloc(dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); if (!debug->qm_last_words) return -ENOMEM; for (i = 0; i < dfx_regs_num; i++) { debug->qm_last_words[i] = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset); } return 0; } static void qm_diff_regs_uninit(struct hisi_qm *qm, u32 reg_len) { dfx_regs_uninit(qm, qm->debug.acc_diff_regs, reg_len); dfx_regs_uninit(qm, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); } /** * hisi_qm_regs_debugfs_init() - Allocate memory for registers. * @qm: device qm handle. * @dregs: diff registers handle. * @reg_len: diff registers region length. */ int hisi_qm_regs_debugfs_init(struct hisi_qm *qm, struct dfx_diff_registers *dregs, u32 reg_len) { int ret; if (!qm || !dregs) return -EINVAL; if (qm->fun_type != QM_HW_PF) return 0; ret = qm_last_regs_init(qm); if (ret) { dev_info(&qm->pdev->dev, "failed to init qm words memory!\n"); return ret; } ret = qm_diff_regs_init(qm, dregs, reg_len); if (ret) { qm_last_regs_uninit(qm); return ret; } return 0; } EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_init); /** * hisi_qm_regs_debugfs_uninit() - Free memory for registers. * @qm: device qm handle. * @reg_len: diff registers region length. */ void hisi_qm_regs_debugfs_uninit(struct hisi_qm *qm, u32 reg_len) { if (!qm || qm->fun_type != QM_HW_PF) return; qm_diff_regs_uninit(qm, reg_len); qm_last_regs_uninit(qm); } EXPORT_SYMBOL_GPL(hisi_qm_regs_debugfs_uninit); /** * hisi_qm_acc_diff_regs_dump() - Dump registers's value. * @qm: device qm handle. * @s: Debugfs file handle. * @dregs: diff registers handle. * @regs_len: diff registers region length. */ void hisi_qm_acc_diff_regs_dump(struct hisi_qm *qm, struct seq_file *s, struct dfx_diff_registers *dregs, u32 regs_len) { u32 j, val, base_offset; int i, ret; if (!qm || !s || !dregs) return; ret = hisi_qm_get_dfx_access(qm); if (ret) return; down_read(&qm->qps_lock); for (i = 0; i < regs_len; i++) { if (!dregs[i].reg_len) continue; for (j = 0; j < dregs[i].reg_len; j++) { base_offset = dregs[i].reg_offset + j * QM_DFX_REGS_LEN; val = readl(qm->io_base + base_offset); if (val != dregs[i].regs[j]) seq_printf(s, "0x%08x = 0x%08x ---> 0x%08x\n", base_offset, dregs[i].regs[j], val); } } up_read(&qm->qps_lock); hisi_qm_put_dfx_access(qm); } EXPORT_SYMBOL_GPL(hisi_qm_acc_diff_regs_dump); void hisi_qm_show_last_dfx_regs(struct hisi_qm *qm) { struct qm_debug *debug = &qm->debug; struct pci_dev *pdev = qm->pdev; u32 val; int i; if (qm->fun_type == QM_HW_VF || !debug->qm_last_words) return; for (i = 0; i < ARRAY_SIZE(qm_dfx_regs); i++) { val = readl_relaxed(qm->io_base + qm_dfx_regs[i].offset); if (debug->qm_last_words[i] != val) pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n", qm_dfx_regs[i].name, debug->qm_last_words[i], val); } } static int qm_diff_regs_show(struct seq_file *s, void *unused) { struct hisi_qm *qm = s->private; hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.qm_diff_regs, ARRAY_SIZE(qm_diff_regs)); return 0; } DEFINE_SHOW_ATTRIBUTE(qm_diff_regs); static ssize_t qm_status_read(struct file *filp, char __user *buffer, size_t count, loff_t *pos) { struct hisi_qm *qm = filp->private_data; char buf[QM_DBG_READ_LEN]; int val, len; val = atomic_read(&qm->status.flags); len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]); return simple_read_from_buffer(buffer, count, pos, buf, len); } static const struct file_operations qm_status_fops = { .owner = THIS_MODULE, .open = simple_open, .read = qm_status_read, }; static void qm_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, enum qm_debug_file index) { struct debugfs_file *file = qm->debug.files + index; debugfs_create_file(qm_debug_file_name[index], 0600, dir, file, &qm_debug_fops); file->index = index; mutex_init(&file->lock); file->debug = &qm->debug; } static int qm_debugfs_atomic64_set(void *data, u64 val) { if (val) return -EINVAL; atomic64_set((atomic64_t *)data, 0); return 0; } static int qm_debugfs_atomic64_get(void *data, u64 *val) { *val = atomic64_read((atomic64_t *)data); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get, qm_debugfs_atomic64_set, "%llu\n"); /** * hisi_qm_debug_init() - Initialize qm related debugfs files. * @qm: The qm for which we want to add debugfs files. * * Create qm related debugfs files. */ void hisi_qm_debug_init(struct hisi_qm *qm) { struct dfx_diff_registers *qm_regs = qm->debug.qm_diff_regs; struct qm_dfx *dfx = &qm->debug.dfx; struct dentry *qm_d; void *data; int i; qm_d = debugfs_create_dir("qm", qm->debug.debug_root); qm->debug.qm_d = qm_d; /* only show this in PF */ if (qm->fun_type == QM_HW_PF) { qm_create_debugfs_file(qm, qm->debug.debug_root, CURRENT_QM); for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++) qm_create_debugfs_file(qm, qm->debug.qm_d, i); } if (qm_regs) debugfs_create_file("diff_regs", 0444, qm->debug.qm_d, qm, &qm_diff_regs_fops); debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops); debugfs_create_file("cmd", 0600, qm->debug.qm_d, qm, &qm_cmd_fops); debugfs_create_file("status", 0444, qm->debug.qm_d, qm, &qm_status_fops); for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) { data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset); debugfs_create_file(qm_dfx_files[i].name, 0644, qm_d, data, &qm_atomic64_ops); } if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) hisi_qm_set_algqos_init(qm); } EXPORT_SYMBOL_GPL(hisi_qm_debug_init); /** * hisi_qm_debug_regs_clear() - clear qm debug related registers. * @qm: The qm for which we want to clear its debug registers. */ void hisi_qm_debug_regs_clear(struct hisi_qm *qm) { const struct debugfs_reg32 *regs; int i; /* clear current_qm */ writel(0x0, qm->io_base + QM_DFX_MB_CNT_VF); writel(0x0, qm->io_base + QM_DFX_DB_CNT_VF); /* clear current_q */ writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN); writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN); /* * these registers are reading and clearing, so clear them after * reading them. */ writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE); regs = qm_dfx_regs; for (i = 0; i < CNT_CYC_REGS_NUM; i++) { readl(qm->io_base + regs->offset); regs++; } /* clear clear_enable */ writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE); } EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
linux-master
drivers/crypto/hisilicon/debugfs.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include <linux/align.h> #include <linux/dma-mapping.h> #include <linux/hisi_acc_qm.h> #include <linux/module.h> #include <linux/slab.h> #define HISI_ACC_SGL_SGE_NR_MIN 1 #define HISI_ACC_SGL_NR_MAX 256 #define HISI_ACC_SGL_ALIGN_SIZE 64 #define HISI_ACC_MEM_BLOCK_NR 5 struct acc_hw_sge { dma_addr_t buf; void *page_ctrl; __le32 len; __le32 pad; __le32 pad0; __le32 pad1; }; /* use default sgl head size 64B */ struct hisi_acc_hw_sgl { dma_addr_t next_dma; __le16 entry_sum_in_chain; __le16 entry_sum_in_sgl; __le16 entry_length_in_sgl; __le16 pad0; __le64 pad1[5]; struct hisi_acc_hw_sgl *next; struct acc_hw_sge sge_entries[]; } __aligned(1); struct hisi_acc_sgl_pool { struct mem_block { struct hisi_acc_hw_sgl *sgl; dma_addr_t sgl_dma; size_t size; } mem_block[HISI_ACC_MEM_BLOCK_NR]; u32 sgl_num_per_block; u32 block_num; u32 count; u32 sge_nr; size_t sgl_size; }; /** * hisi_acc_create_sgl_pool() - Create a hw sgl pool. * @dev: The device which hw sgl pool belongs to. * @count: Count of hisi_acc_hw_sgl in pool. * @sge_nr: The count of sge in hw_sgl * * This function creates a hw sgl pool, after this user can get hw sgl memory * from it. */ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, u32 count, u32 sge_nr) { u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl; struct hisi_acc_sgl_pool *pool; struct mem_block *block; u32 i, j; if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX) return ERR_PTR(-EINVAL); sgl_size = ALIGN(sizeof(struct acc_hw_sge) * sge_nr + sizeof(struct hisi_acc_hw_sgl), HISI_ACC_SGL_ALIGN_SIZE); /* * the pool may allocate a block of memory of size PAGE_SIZE * 2^MAX_ORDER, * block size may exceed 2^31 on ia64, so the max of block size is 2^31 */ block_size = 1 << (PAGE_SHIFT + MAX_ORDER < 32 ? PAGE_SHIFT + MAX_ORDER : 31); sgl_num_per_block = block_size / sgl_size; block_num = count / sgl_num_per_block; remain_sgl = count % sgl_num_per_block; if ((!remain_sgl && block_num > HISI_ACC_MEM_BLOCK_NR) || (remain_sgl > 0 && block_num > HISI_ACC_MEM_BLOCK_NR - 1)) return ERR_PTR(-EINVAL); pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (!pool) return ERR_PTR(-ENOMEM); block = pool->mem_block; for (i = 0; i < block_num; i++) { block[i].sgl = dma_alloc_coherent(dev, block_size, &block[i].sgl_dma, GFP_KERNEL); if (!block[i].sgl) { dev_err(dev, "Fail to allocate hw SG buffer!\n"); goto err_free_mem; } block[i].size = block_size; } if (remain_sgl > 0) { block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size, &block[i].sgl_dma, GFP_KERNEL); if (!block[i].sgl) { dev_err(dev, "Fail to allocate remained hw SG buffer!\n"); goto err_free_mem; } block[i].size = remain_sgl * sgl_size; } pool->sgl_num_per_block = sgl_num_per_block; pool->block_num = remain_sgl ? block_num + 1 : block_num; pool->count = count; pool->sgl_size = sgl_size; pool->sge_nr = sge_nr; return pool; err_free_mem: for (j = 0; j < i; j++) { dma_free_coherent(dev, block_size, block[j].sgl, block[j].sgl_dma); } kfree_sensitive(pool); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool); /** * hisi_acc_free_sgl_pool() - Free a hw sgl pool. * @dev: The device which hw sgl pool belongs to. * @pool: Pointer of pool. * * This function frees memory of a hw sgl pool. */ void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool) { struct mem_block *block; int i; if (!dev || !pool) return; block = pool->mem_block; for (i = 0; i < pool->block_num; i++) dma_free_coherent(dev, block[i].size, block[i].sgl, block[i].sgl_dma); kfree(pool); } EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool); static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, u32 index, dma_addr_t *hw_sgl_dma) { struct mem_block *block; u32 block_index, offset; if (!pool || !hw_sgl_dma || index >= pool->count) return ERR_PTR(-EINVAL); block = pool->mem_block; block_index = index / pool->sgl_num_per_block; offset = index % pool->sgl_num_per_block; *hw_sgl_dma = block[block_index].sgl_dma + pool->sgl_size * offset; return (void *)block[block_index].sgl + pool->sgl_size * offset; } static void sg_map_to_hw_sg(struct scatterlist *sgl, struct acc_hw_sge *hw_sge) { hw_sge->buf = sg_dma_address(sgl); hw_sge->len = cpu_to_le32(sg_dma_len(sgl)); hw_sge->page_ctrl = sg_virt(sgl); } static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl) { u16 var = le16_to_cpu(hw_sgl->entry_sum_in_sgl); var++; hw_sgl->entry_sum_in_sgl = cpu_to_le16(var); } static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum) { hw_sgl->entry_sum_in_chain = cpu_to_le16(sum); } static void clear_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl) { struct acc_hw_sge *hw_sge = hw_sgl->sge_entries; int i; for (i = 0; i < le16_to_cpu(hw_sgl->entry_sum_in_sgl); i++) { hw_sge[i].page_ctrl = NULL; hw_sge[i].buf = 0; hw_sge[i].len = 0; } } /** * hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl. * @dev: The device which hw sgl belongs to. * @sgl: Scatterlist which will be mapped to hw sgl. * @pool: Pool which hw sgl memory will be allocated in. * @index: Index of hisi_acc_hw_sgl in pool. * @hw_sgl_dma: The dma address of allocated hw sgl. * * This function builds hw sgl according input sgl, user can use hw_sgl_dma * as src/dst in its BD. Only support single hw sgl currently. */ struct hisi_acc_hw_sgl * hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, u32 index, dma_addr_t *hw_sgl_dma) { struct hisi_acc_hw_sgl *curr_hw_sgl; dma_addr_t curr_sgl_dma = 0; struct acc_hw_sge *curr_hw_sge; struct scatterlist *sg; int i, sg_n, sg_n_mapped; if (!dev || !sgl || !pool || !hw_sgl_dma) return ERR_PTR(-EINVAL); sg_n = sg_nents(sgl); sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); if (!sg_n_mapped) { dev_err(dev, "DMA mapping for SG error!\n"); return ERR_PTR(-EINVAL); } if (sg_n_mapped > pool->sge_nr) { dev_err(dev, "the number of entries in input scatterlist is bigger than SGL pool setting.\n"); return ERR_PTR(-EINVAL); } curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); if (IS_ERR(curr_hw_sgl)) { dev_err(dev, "Get SGL error!\n"); dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); return ERR_PTR(-ENOMEM); } curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr); curr_hw_sge = curr_hw_sgl->sge_entries; for_each_sg(sgl, sg, sg_n_mapped, i) { sg_map_to_hw_sg(sg, curr_hw_sge); inc_hw_sgl_sge(curr_hw_sgl); curr_hw_sge++; } update_hw_sgl_sum_sge(curr_hw_sgl, pool->sge_nr); *hw_sgl_dma = curr_sgl_dma; return curr_hw_sgl; } EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl); /** * hisi_acc_sg_buf_unmap() - Unmap allocated hw sgl. * @dev: The device which hw sgl belongs to. * @sgl: Related scatterlist. * @hw_sgl: Virtual address of hw sgl. * * This function unmaps allocated hw sgl. */ void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, struct hisi_acc_hw_sgl *hw_sgl) { if (!dev || !sgl || !hw_sgl) return; dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL); clear_hw_sgl_sge(hw_sgl); hw_sgl->entry_sum_in_chain = 0; hw_sgl->entry_sum_in_sgl = 0; hw_sgl->entry_length_in_sgl = 0; } EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap);
linux-master
drivers/crypto/hisilicon/sgl.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include <asm/page.h> #include <linux/acpi.h> #include <linux/bitmap.h> #include <linux/dma-mapping.h> #include <linux/idr.h> #include <linux/io.h> #include <linux/irqreturn.h> #include <linux/log2.h> #include <linux/pm_runtime.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/uacce.h> #include <linux/uaccess.h> #include <uapi/misc/uacce/hisi_qm.h> #include <linux/hisi_acc_qm.h> #include "qm_common.h" /* eq/aeq irq enable */ #define QM_VF_AEQ_INT_SOURCE 0x0 #define QM_VF_AEQ_INT_MASK 0x4 #define QM_VF_EQ_INT_SOURCE 0x8 #define QM_VF_EQ_INT_MASK 0xc #define QM_IRQ_VECTOR_MASK GENMASK(15, 0) #define QM_IRQ_TYPE_MASK GENMASK(15, 0) #define QM_IRQ_TYPE_SHIFT 16 #define QM_ABN_IRQ_TYPE_MASK GENMASK(7, 0) /* mailbox */ #define QM_MB_PING_ALL_VFS 0xffff #define QM_MB_CMD_DATA_SHIFT 32 #define QM_MB_CMD_DATA_MASK GENMASK(31, 0) #define QM_MB_STATUS_MASK GENMASK(12, 9) /* sqc shift */ #define QM_SQ_HOP_NUM_SHIFT 0 #define QM_SQ_PAGE_SIZE_SHIFT 4 #define QM_SQ_BUF_SIZE_SHIFT 8 #define QM_SQ_SQE_SIZE_SHIFT 12 #define QM_SQ_PRIORITY_SHIFT 0 #define QM_SQ_ORDERS_SHIFT 4 #define QM_SQ_TYPE_SHIFT 8 #define QM_QC_PASID_ENABLE 0x1 #define QM_QC_PASID_ENABLE_SHIFT 7 #define QM_SQ_TYPE_MASK GENMASK(3, 0) #define QM_SQ_TAIL_IDX(sqc) ((le16_to_cpu((sqc)->w11) >> 6) & 0x1) /* cqc shift */ #define QM_CQ_HOP_NUM_SHIFT 0 #define QM_CQ_PAGE_SIZE_SHIFT 4 #define QM_CQ_BUF_SIZE_SHIFT 8 #define QM_CQ_CQE_SIZE_SHIFT 12 #define QM_CQ_PHASE_SHIFT 0 #define QM_CQ_FLAG_SHIFT 1 #define QM_CQE_PHASE(cqe) (le16_to_cpu((cqe)->w7) & 0x1) #define QM_QC_CQE_SIZE 4 #define QM_CQ_TAIL_IDX(cqc) ((le16_to_cpu((cqc)->w11) >> 6) & 0x1) /* eqc shift */ #define QM_EQE_AEQE_SIZE (2UL << 12) #define QM_EQC_PHASE_SHIFT 16 #define QM_EQE_PHASE(eqe) ((le32_to_cpu((eqe)->dw0) >> 16) & 0x1) #define QM_EQE_CQN_MASK GENMASK(15, 0) #define QM_AEQE_PHASE(aeqe) ((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1) #define QM_AEQE_TYPE_SHIFT 17 #define QM_AEQE_CQN_MASK GENMASK(15, 0) #define QM_CQ_OVERFLOW 0 #define QM_EQ_OVERFLOW 1 #define QM_CQE_ERROR 2 #define QM_XQ_DEPTH_SHIFT 16 #define QM_XQ_DEPTH_MASK GENMASK(15, 0) #define QM_DOORBELL_CMD_SQ 0 #define QM_DOORBELL_CMD_CQ 1 #define QM_DOORBELL_CMD_EQ 2 #define QM_DOORBELL_CMD_AEQ 3 #define QM_DOORBELL_BASE_V1 0x340 #define QM_DB_CMD_SHIFT_V1 16 #define QM_DB_INDEX_SHIFT_V1 32 #define QM_DB_PRIORITY_SHIFT_V1 48 #define QM_PAGE_SIZE 0x0034 #define QM_QP_DB_INTERVAL 0x10000 #define QM_DB_TIMEOUT_CFG 0x100074 #define QM_DB_TIMEOUT_SET 0x1fffff #define QM_MEM_START_INIT 0x100040 #define QM_MEM_INIT_DONE 0x100044 #define QM_VFT_CFG_RDY 0x10006c #define QM_VFT_CFG_OP_WR 0x100058 #define QM_VFT_CFG_TYPE 0x10005c #define QM_VFT_CFG 0x100060 #define QM_VFT_CFG_OP_ENABLE 0x100054 #define QM_PM_CTRL 0x100148 #define QM_IDLE_DISABLE BIT(9) #define QM_VFT_CFG_DATA_L 0x100064 #define QM_VFT_CFG_DATA_H 0x100068 #define QM_SQC_VFT_BUF_SIZE (7ULL << 8) #define QM_SQC_VFT_SQC_SIZE (5ULL << 12) #define QM_SQC_VFT_INDEX_NUMBER (1ULL << 16) #define QM_SQC_VFT_START_SQN_SHIFT 28 #define QM_SQC_VFT_VALID (1ULL << 44) #define QM_SQC_VFT_SQN_SHIFT 45 #define QM_CQC_VFT_BUF_SIZE (7ULL << 8) #define QM_CQC_VFT_SQC_SIZE (5ULL << 12) #define QM_CQC_VFT_INDEX_NUMBER (1ULL << 16) #define QM_CQC_VFT_VALID (1ULL << 28) #define QM_SQC_VFT_BASE_SHIFT_V2 28 #define QM_SQC_VFT_BASE_MASK_V2 GENMASK(15, 0) #define QM_SQC_VFT_NUM_SHIFT_V2 45 #define QM_SQC_VFT_NUM_MASK_V2 GENMASK(9, 0) #define QM_ABNORMAL_INT_SOURCE 0x100000 #define QM_ABNORMAL_INT_MASK 0x100004 #define QM_ABNORMAL_INT_MASK_VALUE 0x7fff #define QM_ABNORMAL_INT_STATUS 0x100008 #define QM_ABNORMAL_INT_SET 0x10000c #define QM_ABNORMAL_INF00 0x100010 #define QM_FIFO_OVERFLOW_TYPE 0xc0 #define QM_FIFO_OVERFLOW_TYPE_SHIFT 6 #define QM_FIFO_OVERFLOW_VF 0x3f #define QM_ABNORMAL_INF01 0x100014 #define QM_DB_TIMEOUT_TYPE 0xc0 #define QM_DB_TIMEOUT_TYPE_SHIFT 6 #define QM_DB_TIMEOUT_VF 0x3f #define QM_RAS_CE_ENABLE 0x1000ec #define QM_RAS_FE_ENABLE 0x1000f0 #define QM_RAS_NFE_ENABLE 0x1000f4 #define QM_RAS_CE_THRESHOLD 0x1000f8 #define QM_RAS_CE_TIMES_PER_IRQ 1 #define QM_OOO_SHUTDOWN_SEL 0x1040f8 #define QM_ECC_MBIT BIT(2) #define QM_DB_TIMEOUT BIT(10) #define QM_OF_FIFO_OF BIT(11) #define QM_RESET_WAIT_TIMEOUT 400 #define QM_PEH_VENDOR_ID 0x1000d8 #define ACC_VENDOR_ID_VALUE 0x5a5a #define QM_PEH_DFX_INFO0 0x1000fc #define QM_PEH_DFX_INFO1 0x100100 #define QM_PEH_DFX_MASK (BIT(0) | BIT(2)) #define QM_PEH_MSI_FINISH_MASK GENMASK(19, 16) #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT 3 #define ACC_PEH_MSI_DISABLE GENMASK(31, 0) #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN 0x1 #define ACC_MASTER_TRANS_RETURN_RW 3 #define ACC_MASTER_TRANS_RETURN 0x300150 #define ACC_MASTER_GLOBAL_CTRL 0x300000 #define ACC_AM_CFG_PORT_WR_EN 0x30001c #define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT #define ACC_AM_ROB_ECC_INT_STS 0x300104 #define ACC_ROB_ECC_ERR_MULTPL BIT(1) #define QM_MSI_CAP_ENABLE BIT(16) /* interfunction communication */ #define QM_IFC_READY_STATUS 0x100128 #define QM_IFC_INT_SET_P 0x100130 #define QM_IFC_INT_CFG 0x100134 #define QM_IFC_INT_SOURCE_P 0x100138 #define QM_IFC_INT_SOURCE_V 0x0020 #define QM_IFC_INT_MASK 0x0024 #define QM_IFC_INT_STATUS 0x0028 #define QM_IFC_INT_SET_V 0x002C #define QM_IFC_SEND_ALL_VFS GENMASK(6, 0) #define QM_IFC_INT_SOURCE_CLR GENMASK(63, 0) #define QM_IFC_INT_SOURCE_MASK BIT(0) #define QM_IFC_INT_DISABLE BIT(0) #define QM_IFC_INT_STATUS_MASK BIT(0) #define QM_IFC_INT_SET_MASK BIT(0) #define QM_WAIT_DST_ACK 10 #define QM_MAX_PF_WAIT_COUNT 10 #define QM_MAX_VF_WAIT_COUNT 40 #define QM_VF_RESET_WAIT_US 20000 #define QM_VF_RESET_WAIT_CNT 3000 #define QM_VF_RESET_WAIT_TIMEOUT_US \ (QM_VF_RESET_WAIT_US * QM_VF_RESET_WAIT_CNT) #define POLL_PERIOD 10 #define POLL_TIMEOUT 1000 #define WAIT_PERIOD_US_MAX 200 #define WAIT_PERIOD_US_MIN 100 #define MAX_WAIT_COUNTS 1000 #define QM_CACHE_WB_START 0x204 #define QM_CACHE_WB_DONE 0x208 #define QM_FUNC_CAPS_REG 0x3100 #define QM_CAPBILITY_VERSION GENMASK(7, 0) #define PCI_BAR_2 2 #define PCI_BAR_4 4 #define QMC_ALIGN(sz) ALIGN(sz, 32) #define QM_DBG_READ_LEN 256 #define QM_PCI_COMMAND_INVALID ~0 #define QM_RESET_STOP_TX_OFFSET 1 #define QM_RESET_STOP_RX_OFFSET 2 #define WAIT_PERIOD 20 #define REMOVE_WAIT_DELAY 10 #define QM_DRIVER_REMOVING 0 #define QM_RST_SCHED 1 #define QM_QOS_PARAM_NUM 2 #define QM_QOS_MAX_VAL 1000 #define QM_QOS_RATE 100 #define QM_QOS_EXPAND_RATE 1000 #define QM_SHAPER_CIR_B_MASK GENMASK(7, 0) #define QM_SHAPER_CIR_U_MASK GENMASK(10, 8) #define QM_SHAPER_CIR_S_MASK GENMASK(14, 11) #define QM_SHAPER_FACTOR_CIR_U_SHIFT 8 #define QM_SHAPER_FACTOR_CIR_S_SHIFT 11 #define QM_SHAPER_FACTOR_CBS_B_SHIFT 15 #define QM_SHAPER_FACTOR_CBS_S_SHIFT 19 #define QM_SHAPER_CBS_B 1 #define QM_SHAPER_VFT_OFFSET 6 #define QM_QOS_MIN_ERROR_RATE 5 #define QM_SHAPER_MIN_CBS_S 8 #define QM_QOS_TICK 0x300U #define QM_QOS_DIVISOR_CLK 0x1f40U #define QM_QOS_MAX_CIR_B 200 #define QM_QOS_MIN_CIR_B 100 #define QM_QOS_MAX_CIR_U 6 #define QM_AUTOSUSPEND_DELAY 3000 #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \ (((hop_num) << QM_CQ_HOP_NUM_SHIFT) | \ ((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT) | \ ((buf_sz) << QM_CQ_BUF_SIZE_SHIFT) | \ ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) #define QM_MK_CQC_DW3_V2(cqe_sz, cq_depth) \ ((((u32)cq_depth) - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT)) #define QM_MK_SQC_W13(priority, orders, alg_type) \ (((priority) << QM_SQ_PRIORITY_SHIFT) | \ ((orders) << QM_SQ_ORDERS_SHIFT) | \ (((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT)) #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \ (((hop_num) << QM_SQ_HOP_NUM_SHIFT) | \ ((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT) | \ ((buf_sz) << QM_SQ_BUF_SIZE_SHIFT) | \ ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) #define QM_MK_SQC_DW3_V2(sqe_sz, sq_depth) \ ((((u32)sq_depth) - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT)) #define INIT_QC_COMMON(qc, base, pasid) do { \ (qc)->head = 0; \ (qc)->tail = 0; \ (qc)->base_l = cpu_to_le32(lower_32_bits(base)); \ (qc)->base_h = cpu_to_le32(upper_32_bits(base)); \ (qc)->dw3 = 0; \ (qc)->w8 = 0; \ (qc)->rsvd0 = 0; \ (qc)->pasid = cpu_to_le16(pasid); \ (qc)->w11 = 0; \ (qc)->rsvd1 = 0; \ } while (0) enum vft_type { SQC_VFT = 0, CQC_VFT, SHAPER_VFT, }; enum acc_err_result { ACC_ERR_NONE, ACC_ERR_NEED_RESET, ACC_ERR_RECOVERED, }; enum qm_alg_type { ALG_TYPE_0, ALG_TYPE_1, }; enum qm_mb_cmd { QM_PF_FLR_PREPARE = 0x01, QM_PF_SRST_PREPARE, QM_PF_RESET_DONE, QM_VF_PREPARE_DONE, QM_VF_PREPARE_FAIL, QM_VF_START_DONE, QM_VF_START_FAIL, QM_PF_SET_QOS, QM_VF_GET_QOS, }; enum qm_basic_type { QM_TOTAL_QP_NUM_CAP = 0x0, QM_FUNC_MAX_QP_CAP, QM_XEQ_DEPTH_CAP, QM_QP_DEPTH_CAP, QM_EQ_IRQ_TYPE_CAP, QM_AEQ_IRQ_TYPE_CAP, QM_ABN_IRQ_TYPE_CAP, QM_PF2VF_IRQ_TYPE_CAP, QM_PF_IRQ_NUM_CAP, QM_VF_IRQ_NUM_CAP, }; static const struct hisi_qm_cap_info qm_cap_info_comm[] = { {QM_SUPPORT_DB_ISOLATION, 0x30, 0, BIT(0), 0x0, 0x0, 0x0}, {QM_SUPPORT_FUNC_QOS, 0x3100, 0, BIT(8), 0x0, 0x0, 0x1}, {QM_SUPPORT_STOP_QP, 0x3100, 0, BIT(9), 0x0, 0x0, 0x1}, {QM_SUPPORT_MB_COMMAND, 0x3100, 0, BIT(11), 0x0, 0x0, 0x1}, {QM_SUPPORT_SVA_PREFETCH, 0x3100, 0, BIT(14), 0x0, 0x0, 0x1}, }; static const struct hisi_qm_cap_info qm_cap_info_pf[] = { {QM_SUPPORT_RPM, 0x3100, 0, BIT(13), 0x0, 0x0, 0x1}, }; static const struct hisi_qm_cap_info qm_cap_info_vf[] = { {QM_SUPPORT_RPM, 0x3100, 0, BIT(12), 0x0, 0x0, 0x0}, }; static const struct hisi_qm_cap_info qm_basic_info[] = { {QM_TOTAL_QP_NUM_CAP, 0x100158, 0, GENMASK(10, 0), 0x1000, 0x400, 0x400}, {QM_FUNC_MAX_QP_CAP, 0x100158, 11, GENMASK(10, 0), 0x1000, 0x400, 0x400}, {QM_XEQ_DEPTH_CAP, 0x3104, 0, GENMASK(31, 0), 0x800, 0x4000800, 0x4000800}, {QM_QP_DEPTH_CAP, 0x3108, 0, GENMASK(31, 0), 0x4000400, 0x4000400, 0x4000400}, {QM_EQ_IRQ_TYPE_CAP, 0x310c, 0, GENMASK(31, 0), 0x10000, 0x10000, 0x10000}, {QM_AEQ_IRQ_TYPE_CAP, 0x3110, 0, GENMASK(31, 0), 0x0, 0x10001, 0x10001}, {QM_ABN_IRQ_TYPE_CAP, 0x3114, 0, GENMASK(31, 0), 0x0, 0x10003, 0x10003}, {QM_PF2VF_IRQ_TYPE_CAP, 0x3118, 0, GENMASK(31, 0), 0x0, 0x0, 0x10002}, {QM_PF_IRQ_NUM_CAP, 0x311c, 16, GENMASK(15, 0), 0x1, 0x4, 0x4}, {QM_VF_IRQ_NUM_CAP, 0x311c, 0, GENMASK(15, 0), 0x1, 0x2, 0x3}, }; struct qm_mailbox { __le16 w0; __le16 queue_num; __le32 base_l; __le32 base_h; __le32 rsvd; }; struct qm_doorbell { __le16 queue_num; __le16 cmd; __le16 index; __le16 priority; }; struct hisi_qm_resource { struct hisi_qm *qm; int distance; struct list_head list; }; /** * struct qm_hw_err - Structure describing the device errors * @list: hardware error list * @timestamp: timestamp when the error occurred */ struct qm_hw_err { struct list_head list; unsigned long long timestamp; }; struct hisi_qm_hw_ops { int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number); void (*qm_db)(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority); int (*debug_init)(struct hisi_qm *qm); void (*hw_error_init)(struct hisi_qm *qm); void (*hw_error_uninit)(struct hisi_qm *qm); enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm); int (*set_msi)(struct hisi_qm *qm, bool set); }; struct hisi_qm_hw_error { u32 int_msk; const char *msg; }; static const struct hisi_qm_hw_error qm_hw_error[] = { { .int_msk = BIT(0), .msg = "qm_axi_rresp" }, { .int_msk = BIT(1), .msg = "qm_axi_bresp" }, { .int_msk = BIT(2), .msg = "qm_ecc_mbit" }, { .int_msk = BIT(3), .msg = "qm_ecc_1bit" }, { .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" }, { .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" }, { .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" }, { .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" }, { .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" }, { .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" }, { .int_msk = BIT(10), .msg = "qm_db_timeout" }, { .int_msk = BIT(11), .msg = "qm_of_fifo_of" }, { .int_msk = BIT(12), .msg = "qm_db_random_invalid" }, { .int_msk = BIT(13), .msg = "qm_mailbox_timeout" }, { .int_msk = BIT(14), .msg = "qm_flr_timeout" }, { /* sentinel */ } }; static const char * const qm_db_timeout[] = { "sq", "cq", "eq", "aeq", }; static const char * const qm_fifo_overflow[] = { "cq", "eq", "aeq", }; static const char * const qp_s[] = { "none", "init", "start", "stop", "close", }; struct qm_typical_qos_table { u32 start; u32 end; u32 val; }; /* the qos step is 100 */ static struct qm_typical_qos_table shaper_cir_s[] = { {100, 100, 4}, {200, 200, 3}, {300, 500, 2}, {600, 1000, 1}, {1100, 100000, 0}, }; static struct qm_typical_qos_table shaper_cbs_s[] = { {100, 200, 9}, {300, 500, 11}, {600, 1000, 12}, {1100, 10000, 16}, {10100, 25000, 17}, {25100, 50000, 18}, {50100, 100000, 19} }; static void qm_irqs_unregister(struct hisi_qm *qm); static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new) { enum qm_state curr = atomic_read(&qm->status.flags); bool avail = false; switch (curr) { case QM_INIT: if (new == QM_START || new == QM_CLOSE) avail = true; break; case QM_START: if (new == QM_STOP) avail = true; break; case QM_STOP: if (new == QM_CLOSE || new == QM_START) avail = true; break; default: break; } dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n", qm_s[curr], qm_s[new]); if (!avail) dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n", qm_s[curr], qm_s[new]); return avail; } static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp, enum qp_state new) { enum qm_state qm_curr = atomic_read(&qm->status.flags); enum qp_state qp_curr = 0; bool avail = false; if (qp) qp_curr = atomic_read(&qp->qp_status.flags); switch (new) { case QP_INIT: if (qm_curr == QM_START || qm_curr == QM_INIT) avail = true; break; case QP_START: if ((qm_curr == QM_START && qp_curr == QP_INIT) || (qm_curr == QM_START && qp_curr == QP_STOP)) avail = true; break; case QP_STOP: if ((qm_curr == QM_START && qp_curr == QP_START) || (qp_curr == QP_INIT)) avail = true; break; case QP_CLOSE: if ((qm_curr == QM_START && qp_curr == QP_INIT) || (qm_curr == QM_START && qp_curr == QP_STOP) || (qm_curr == QM_STOP && qp_curr == QP_STOP) || (qm_curr == QM_STOP && qp_curr == QP_INIT)) avail = true; break; default: break; } dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n", qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); if (!avail) dev_warn(&qm->pdev->dev, "Can not change qp state from %s to %s in QM %s\n", qp_s[qp_curr], qp_s[new], qm_s[qm_curr]); return avail; } static u32 qm_get_hw_error_status(struct hisi_qm *qm) { return readl(qm->io_base + QM_ABNORMAL_INT_STATUS); } static u32 qm_get_dev_err_status(struct hisi_qm *qm) { return qm->err_ini->get_dev_hw_err_status(qm); } /* Check if the error causes the master ooo block */ static bool qm_check_dev_error(struct hisi_qm *qm) { u32 val, dev_val; if (qm->fun_type == QM_HW_VF) return false; val = qm_get_hw_error_status(qm) & qm->err_info.qm_shutdown_mask; dev_val = qm_get_dev_err_status(qm) & qm->err_info.dev_shutdown_mask; return val || dev_val; } static int qm_wait_reset_finish(struct hisi_qm *qm) { int delay = 0; /* All reset requests need to be queued for processing */ while (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { msleep(++delay); if (delay > QM_RESET_WAIT_TIMEOUT) return -EBUSY; } return 0; } static int qm_reset_prepare_ready(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); /* * PF and VF on host doesnot support resetting at the * same time on Kunpeng920. */ if (qm->ver < QM_HW_V3) return qm_wait_reset_finish(pf_qm); return qm_wait_reset_finish(qm); } static void qm_reset_bit_clear(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); if (qm->ver < QM_HW_V3) clear_bit(QM_RESETTING, &pf_qm->misc_ctl); clear_bit(QM_RESETTING, &qm->misc_ctl); } static void qm_mb_pre_init(struct qm_mailbox *mailbox, u8 cmd, u64 base, u16 queue, bool op) { mailbox->w0 = cpu_to_le16((cmd) | ((op) ? 0x1 << QM_MB_OP_SHIFT : 0) | (0x1 << QM_MB_BUSY_SHIFT)); mailbox->queue_num = cpu_to_le16(queue); mailbox->base_l = cpu_to_le32(lower_32_bits(base)); mailbox->base_h = cpu_to_le32(upper_32_bits(base)); mailbox->rsvd = 0; } /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */ int hisi_qm_wait_mb_ready(struct hisi_qm *qm) { u32 val; return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE, val, !((val >> QM_MB_BUSY_SHIFT) & 0x1), POLL_PERIOD, POLL_TIMEOUT); } EXPORT_SYMBOL_GPL(hisi_qm_wait_mb_ready); /* 128 bit should be written to hardware at one time to trigger a mailbox */ static void qm_mb_write(struct hisi_qm *qm, const void *src) { void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE; #if IS_ENABLED(CONFIG_ARM64) unsigned long tmp0 = 0, tmp1 = 0; #endif if (!IS_ENABLED(CONFIG_ARM64)) { memcpy_toio(fun_base, src, 16); dma_wmb(); return; } #if IS_ENABLED(CONFIG_ARM64) asm volatile("ldp %0, %1, %3\n" "stp %0, %1, %2\n" "dmb oshst\n" : "=&r" (tmp0), "=&r" (tmp1), "+Q" (*((char __iomem *)fun_base)) : "Q" (*((char *)src)) : "memory"); #endif } static int qm_mb_nolock(struct hisi_qm *qm, struct qm_mailbox *mailbox) { int ret; u32 val; if (unlikely(hisi_qm_wait_mb_ready(qm))) { dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n"); ret = -EBUSY; goto mb_busy; } qm_mb_write(qm, mailbox); if (unlikely(hisi_qm_wait_mb_ready(qm))) { dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n"); ret = -ETIMEDOUT; goto mb_busy; } val = readl(qm->io_base + QM_MB_CMD_SEND_BASE); if (val & QM_MB_STATUS_MASK) { dev_err(&qm->pdev->dev, "QM mailbox operation failed!\n"); ret = -EIO; goto mb_busy; } return 0; mb_busy: atomic64_inc(&qm->debug.dfx.mb_err_cnt); return ret; } int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, bool op) { struct qm_mailbox mailbox; int ret; dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n", queue, cmd, (unsigned long long)dma_addr); qm_mb_pre_init(&mailbox, cmd, dma_addr, queue, op); mutex_lock(&qm->mailbox_lock); ret = qm_mb_nolock(qm, &mailbox); mutex_unlock(&qm->mailbox_lock); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_mb); static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) { u64 doorbell; doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) | ((u64)index << QM_DB_INDEX_SHIFT_V1) | ((u64)priority << QM_DB_PRIORITY_SHIFT_V1); writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1); } static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) { void __iomem *io_base = qm->io_base; u16 randata = 0; u64 doorbell; if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ) io_base = qm->db_io_base + (u64)qn * qm->db_interval + QM_DOORBELL_SQ_CQ_BASE_V2; else io_base += QM_DOORBELL_EQ_AEQ_BASE_V2; doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) | ((u64)randata << QM_DB_RAND_SHIFT_V2) | ((u64)index << QM_DB_INDEX_SHIFT_V2) | ((u64)priority << QM_DB_PRIORITY_SHIFT_V2); writeq(doorbell, io_base); } static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority) { dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n", qn, cmd, index); qm->ops->qm_db(qm, qn, cmd, index, priority); } static void qm_disable_clock_gate(struct hisi_qm *qm) { u32 val; /* if qm enables clock gating in Kunpeng930, qos will be inaccurate. */ if (qm->ver < QM_HW_V3) return; val = readl(qm->io_base + QM_PM_CTRL); val |= QM_IDLE_DISABLE; writel(val, qm->io_base + QM_PM_CTRL); } static int qm_dev_mem_reset(struct hisi_qm *qm) { u32 val; writel(0x1, qm->io_base + QM_MEM_START_INIT); return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val, val & BIT(0), POLL_PERIOD, POLL_TIMEOUT); } /** * hisi_qm_get_hw_info() - Get device information. * @qm: The qm which want to get information. * @info_table: Array for storing device information. * @index: Index in info_table. * @is_read: Whether read from reg, 0: not support read from reg. * * This function returns device information the caller needs. */ u32 hisi_qm_get_hw_info(struct hisi_qm *qm, const struct hisi_qm_cap_info *info_table, u32 index, bool is_read) { u32 val; switch (qm->ver) { case QM_HW_V1: return info_table[index].v1_val; case QM_HW_V2: return info_table[index].v2_val; default: if (!is_read) return info_table[index].v3_val; val = readl(qm->io_base + info_table[index].offset); return (val >> info_table[index].shift) & info_table[index].mask; } } EXPORT_SYMBOL_GPL(hisi_qm_get_hw_info); static void qm_get_xqc_depth(struct hisi_qm *qm, u16 *low_bits, u16 *high_bits, enum qm_basic_type type) { u32 depth; depth = hisi_qm_get_hw_info(qm, qm_basic_info, type, qm->cap_ver); *low_bits = depth & QM_XQ_DEPTH_MASK; *high_bits = (depth >> QM_XQ_DEPTH_SHIFT) & QM_XQ_DEPTH_MASK; } static u32 qm_get_irq_num(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_PF) return hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF_IRQ_NUM_CAP, qm->cap_ver); return hisi_qm_get_hw_info(qm, qm_basic_info, QM_VF_IRQ_NUM_CAP, qm->cap_ver); } static int qm_pm_get_sync(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; int ret; if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) return 0; ret = pm_runtime_resume_and_get(dev); if (ret < 0) { dev_err(dev, "failed to get_sync(%d).\n", ret); return ret; } return 0; } static void qm_pm_put_sync(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) return; pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } static void qm_cq_head_update(struct hisi_qp *qp) { if (qp->qp_status.cq_head == qp->cq_depth - 1) { qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase; qp->qp_status.cq_head = 0; } else { qp->qp_status.cq_head++; } } static void qm_poll_req_cb(struct hisi_qp *qp) { struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; struct hisi_qm *qm = qp->qm; while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { dma_rmb(); qp->req_cb(qp, qp->sqe + qm->sqe_size * le16_to_cpu(cqe->sq_head)); qm_cq_head_update(qp); cqe = qp->cqe + qp->qp_status.cq_head; qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 0); atomic_dec(&qp->qp_status.used); } /* set c_flag */ qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->qp_status.cq_head, 1); } static int qm_get_complete_eqe_num(struct hisi_qm_poll_data *poll_data) { struct hisi_qm *qm = poll_data->qm; struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; u16 eq_depth = qm->eq_depth; int eqe_num = 0; u16 cqn; while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; poll_data->qp_finish_id[eqe_num] = cqn; eqe_num++; if (qm->status.eq_head == eq_depth - 1) { qm->status.eqc_phase = !qm->status.eqc_phase; eqe = qm->eqe; qm->status.eq_head = 0; } else { eqe++; qm->status.eq_head++; } if (eqe_num == (eq_depth >> 1) - 1) break; } qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); return eqe_num; } static void qm_work_process(struct work_struct *work) { struct hisi_qm_poll_data *poll_data = container_of(work, struct hisi_qm_poll_data, work); struct hisi_qm *qm = poll_data->qm; struct hisi_qp *qp; int eqe_num, i; /* Get qp id of completed tasks and re-enable the interrupt. */ eqe_num = qm_get_complete_eqe_num(poll_data); for (i = eqe_num - 1; i >= 0; i--) { qp = &qm->qp_array[poll_data->qp_finish_id[i]]; if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP)) continue; if (qp->event_cb) { qp->event_cb(qp); continue; } if (likely(qp->req_cb)) qm_poll_req_cb(qp); } } static bool do_qm_eq_irq(struct hisi_qm *qm) { struct qm_eqe *eqe = qm->eqe + qm->status.eq_head; struct hisi_qm_poll_data *poll_data; u16 cqn; if (!readl(qm->io_base + QM_VF_EQ_INT_SOURCE)) return false; if (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) { cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK; poll_data = &qm->poll_data[cqn]; queue_work(qm->wq, &poll_data->work); return true; } return false; } static irqreturn_t qm_eq_irq(int irq, void *data) { struct hisi_qm *qm = data; bool ret; ret = do_qm_eq_irq(qm); if (ret) return IRQ_HANDLED; atomic64_inc(&qm->debug.dfx.err_irq_cnt); qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); return IRQ_NONE; } static irqreturn_t qm_mb_cmd_irq(int irq, void *data) { struct hisi_qm *qm = data; u32 val; val = readl(qm->io_base + QM_IFC_INT_STATUS); val &= QM_IFC_INT_STATUS_MASK; if (!val) return IRQ_NONE; if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) { dev_warn(&qm->pdev->dev, "Driver is down, message cannot be processed!\n"); return IRQ_HANDLED; } schedule_work(&qm->cmd_process); return IRQ_HANDLED; } static void qm_set_qp_disable(struct hisi_qp *qp, int offset) { u32 *addr; if (qp->is_in_kernel) return; addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset; *addr = 1; /* make sure setup is completed */ smp_wmb(); } static void qm_disable_qp(struct hisi_qm *qm, u32 qp_id) { struct hisi_qp *qp = &qm->qp_array[qp_id]; qm_set_qp_disable(qp, QM_RESET_STOP_TX_OFFSET); hisi_qm_stop_qp(qp); qm_set_qp_disable(qp, QM_RESET_STOP_RX_OFFSET); } static void qm_reset_function(struct hisi_qm *qm) { struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); struct device *dev = &qm->pdev->dev; int ret; if (qm_check_dev_error(pf_qm)) return; ret = qm_reset_prepare_ready(qm); if (ret) { dev_err(dev, "reset function not ready\n"); return; } ret = hisi_qm_stop(qm, QM_DOWN); if (ret) { dev_err(dev, "failed to stop qm when reset function\n"); goto clear_bit; } ret = hisi_qm_start(qm); if (ret) dev_err(dev, "failed to start qm when reset function\n"); clear_bit: qm_reset_bit_clear(qm); } static irqreturn_t qm_aeq_thread(int irq, void *data) { struct hisi_qm *qm = data; struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head; u16 aeq_depth = qm->aeq_depth; u32 type, qp_id; while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) { type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT; qp_id = le32_to_cpu(aeqe->dw0) & QM_AEQE_CQN_MASK; switch (type) { case QM_EQ_OVERFLOW: dev_err(&qm->pdev->dev, "eq overflow, reset function\n"); qm_reset_function(qm); return IRQ_HANDLED; case QM_CQ_OVERFLOW: dev_err(&qm->pdev->dev, "cq overflow, stop qp(%u)\n", qp_id); fallthrough; case QM_CQE_ERROR: qm_disable_qp(qm, qp_id); break; default: dev_err(&qm->pdev->dev, "unknown error type %u\n", type); break; } if (qm->status.aeq_head == aeq_depth - 1) { qm->status.aeqc_phase = !qm->status.aeqc_phase; aeqe = qm->aeqe; qm->status.aeq_head = 0; } else { aeqe++; qm->status.aeq_head++; } } qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); return IRQ_HANDLED; } static irqreturn_t qm_aeq_irq(int irq, void *data) { struct hisi_qm *qm = data; atomic64_inc(&qm->debug.dfx.aeq_irq_cnt); if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE)) return IRQ_NONE; return IRQ_WAKE_THREAD; } static void qm_init_qp_status(struct hisi_qp *qp) { struct hisi_qp_status *qp_status = &qp->qp_status; qp_status->sq_tail = 0; qp_status->cq_head = 0; qp_status->cqc_phase = true; atomic_set(&qp_status->used, 0); } static void qm_init_prefetch(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; u32 page_type = 0x0; if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; switch (PAGE_SIZE) { case SZ_4K: page_type = 0x0; break; case SZ_16K: page_type = 0x1; break; case SZ_64K: page_type = 0x2; break; default: dev_err(dev, "system page size is not support: %lu, default set to 4KB", PAGE_SIZE); } writel(page_type, qm->io_base + QM_PAGE_SIZE); } /* * acc_shaper_para_calc() Get the IR value by the qos formula, the return value * is the expected qos calculated. * the formula: * IR = X Mbps if ir = 1 means IR = 100 Mbps, if ir = 10000 means = 10Gbps * * IR_b * (2 ^ IR_u) * 8000 * IR(Mbps) = ------------------------- * Tick * (2 ^ IR_s) */ static u32 acc_shaper_para_calc(u64 cir_b, u64 cir_u, u64 cir_s) { return ((cir_b * QM_QOS_DIVISOR_CLK) * (1 << cir_u)) / (QM_QOS_TICK * (1 << cir_s)); } static u32 acc_shaper_calc_cbs_s(u32 ir) { int table_size = ARRAY_SIZE(shaper_cbs_s); int i; for (i = 0; i < table_size; i++) { if (ir >= shaper_cbs_s[i].start && ir <= shaper_cbs_s[i].end) return shaper_cbs_s[i].val; } return QM_SHAPER_MIN_CBS_S; } static u32 acc_shaper_calc_cir_s(u32 ir) { int table_size = ARRAY_SIZE(shaper_cir_s); int i; for (i = 0; i < table_size; i++) { if (ir >= shaper_cir_s[i].start && ir <= shaper_cir_s[i].end) return shaper_cir_s[i].val; } return 0; } static int qm_get_shaper_para(u32 ir, struct qm_shaper_factor *factor) { u32 cir_b, cir_u, cir_s, ir_calc; u32 error_rate; factor->cbs_s = acc_shaper_calc_cbs_s(ir); cir_s = acc_shaper_calc_cir_s(ir); for (cir_b = QM_QOS_MIN_CIR_B; cir_b <= QM_QOS_MAX_CIR_B; cir_b++) { for (cir_u = 0; cir_u <= QM_QOS_MAX_CIR_U; cir_u++) { ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; if (error_rate <= QM_QOS_MIN_ERROR_RATE) { factor->cir_b = cir_b; factor->cir_u = cir_u; factor->cir_s = cir_s; return 0; } } } return -EINVAL; } static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base, u32 number, struct qm_shaper_factor *factor) { u64 tmp = 0; if (number > 0) { switch (type) { case SQC_VFT: if (qm->ver == QM_HW_V1) { tmp = QM_SQC_VFT_BUF_SIZE | QM_SQC_VFT_SQC_SIZE | QM_SQC_VFT_INDEX_NUMBER | QM_SQC_VFT_VALID | (u64)base << QM_SQC_VFT_START_SQN_SHIFT; } else { tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT | QM_SQC_VFT_VALID | (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT; } break; case CQC_VFT: if (qm->ver == QM_HW_V1) { tmp = QM_CQC_VFT_BUF_SIZE | QM_CQC_VFT_SQC_SIZE | QM_CQC_VFT_INDEX_NUMBER | QM_CQC_VFT_VALID; } else { tmp = QM_CQC_VFT_VALID; } break; case SHAPER_VFT: if (factor) { tmp = factor->cir_b | (factor->cir_u << QM_SHAPER_FACTOR_CIR_U_SHIFT) | (factor->cir_s << QM_SHAPER_FACTOR_CIR_S_SHIFT) | (QM_SHAPER_CBS_B << QM_SHAPER_FACTOR_CBS_B_SHIFT) | (factor->cbs_s << QM_SHAPER_FACTOR_CBS_S_SHIFT); } break; } } writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L); writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H); } static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type, u32 fun_num, u32 base, u32 number) { struct qm_shaper_factor *factor = NULL; unsigned int val; int ret; if (type == SHAPER_VFT && test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) factor = &qm->factor[fun_num]; ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, val & BIT(0), POLL_PERIOD, POLL_TIMEOUT); if (ret) return ret; writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR); writel(type, qm->io_base + QM_VFT_CFG_TYPE); if (type == SHAPER_VFT) fun_num |= base << QM_SHAPER_VFT_OFFSET; writel(fun_num, qm->io_base + QM_VFT_CFG); qm_vft_data_cfg(qm, type, base, number, factor); writel(0x0, qm->io_base + QM_VFT_CFG_RDY); writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, val & BIT(0), POLL_PERIOD, POLL_TIMEOUT); } static int qm_shaper_init_vft(struct hisi_qm *qm, u32 fun_num) { u32 qos = qm->factor[fun_num].func_qos; int ret, i; ret = qm_get_shaper_para(qos * QM_QOS_RATE, &qm->factor[fun_num]); if (ret) { dev_err(&qm->pdev->dev, "failed to calculate shaper parameter!\n"); return ret; } writel(qm->type_rate, qm->io_base + QM_SHAPER_CFG); for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { /* The base number of queue reuse for different alg type */ ret = qm_set_vft_common(qm, SHAPER_VFT, fun_num, i, 1); if (ret) return ret; } return 0; } /* The config should be conducted after qm_dev_mem_reset() */ static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number) { int ret, i; for (i = SQC_VFT; i <= CQC_VFT; i++) { ret = qm_set_vft_common(qm, i, fun_num, base, number); if (ret) return ret; } /* init default shaper qos val */ if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { ret = qm_shaper_init_vft(qm, fun_num); if (ret) goto back_sqc_cqc; } return 0; back_sqc_cqc: for (i = SQC_VFT; i <= CQC_VFT; i++) qm_set_vft_common(qm, i, fun_num, 0, 0); return ret; } static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number) { u64 sqc_vft; int ret; ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1); if (ret) return ret; sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); *base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2); *number = (QM_SQC_VFT_NUM_MASK_V2 & (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1; return 0; } void *hisi_qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size, dma_addr_t *dma_addr) { struct device *dev = &qm->pdev->dev; void *ctx_addr; ctx_addr = kzalloc(ctx_size, GFP_KERNEL); if (!ctx_addr) return ERR_PTR(-ENOMEM); *dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE); if (dma_mapping_error(dev, *dma_addr)) { dev_err(dev, "DMA mapping error!\n"); kfree(ctx_addr); return ERR_PTR(-ENOMEM); } return ctx_addr; } void hisi_qm_ctx_free(struct hisi_qm *qm, size_t ctx_size, const void *ctx_addr, dma_addr_t *dma_addr) { struct device *dev = &qm->pdev->dev; dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE); kfree(ctx_addr); } static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) { return hisi_qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1); } static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id) { return hisi_qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1); } static void qm_hw_error_init_v1(struct hisi_qm *qm) { writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK); } static void qm_hw_error_cfg(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; qm->error_mask = err_info->nfe | err_info->ce | err_info->fe; /* clear QM hw residual error source */ writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE); /* configure error type */ writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE); writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD); writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE); writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE); } static void qm_hw_error_init_v2(struct hisi_qm *qm) { u32 irq_unmask; qm_hw_error_cfg(qm); irq_unmask = ~qm->error_mask; irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); } static void qm_hw_error_uninit_v2(struct hisi_qm *qm) { u32 irq_mask = qm->error_mask; irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); } static void qm_hw_error_init_v3(struct hisi_qm *qm) { u32 irq_unmask; qm_hw_error_cfg(qm); /* enable close master ooo when hardware error happened */ writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL); irq_unmask = ~qm->error_mask; irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK); writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK); } static void qm_hw_error_uninit_v3(struct hisi_qm *qm) { u32 irq_mask = qm->error_mask; irq_mask |= readl(qm->io_base + QM_ABNORMAL_INT_MASK); writel(irq_mask, qm->io_base + QM_ABNORMAL_INT_MASK); /* disable close master ooo when hardware error happened */ writel(0x0, qm->io_base + QM_OOO_SHUTDOWN_SEL); } static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status) { const struct hisi_qm_hw_error *err; struct device *dev = &qm->pdev->dev; u32 reg_val, type, vf_num; int i; for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) { err = &qm_hw_error[i]; if (!(err->int_msk & error_status)) continue; dev_err(dev, "%s [error status=0x%x] found\n", err->msg, err->int_msk); if (err->int_msk & QM_DB_TIMEOUT) { reg_val = readl(qm->io_base + QM_ABNORMAL_INF01); type = (reg_val & QM_DB_TIMEOUT_TYPE) >> QM_DB_TIMEOUT_TYPE_SHIFT; vf_num = reg_val & QM_DB_TIMEOUT_VF; dev_err(dev, "qm %s doorbell timeout in function %u\n", qm_db_timeout[type], vf_num); } else if (err->int_msk & QM_OF_FIFO_OF) { reg_val = readl(qm->io_base + QM_ABNORMAL_INF00); type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >> QM_FIFO_OVERFLOW_TYPE_SHIFT; vf_num = reg_val & QM_FIFO_OVERFLOW_VF; if (type < ARRAY_SIZE(qm_fifo_overflow)) dev_err(dev, "qm %s fifo overflow in function %u\n", qm_fifo_overflow[type], vf_num); else dev_err(dev, "unknown error type\n"); } } } static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm) { u32 error_status, tmp; /* read err sts */ tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS); error_status = qm->error_mask & tmp; if (error_status) { if (error_status & QM_ECC_MBIT) qm->err_status.is_qm_ecc_mbit = true; qm_log_hw_error(qm, error_status); if (error_status & qm->err_info.qm_reset_mask) return ACC_ERR_NEED_RESET; writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE); writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE); } return ACC_ERR_RECOVERED; } static int qm_get_mb_cmd(struct hisi_qm *qm, u64 *msg, u16 fun_num) { struct qm_mailbox mailbox; int ret; qm_mb_pre_init(&mailbox, QM_MB_CMD_DST, 0, fun_num, 0); mutex_lock(&qm->mailbox_lock); ret = qm_mb_nolock(qm, &mailbox); if (ret) goto err_unlock; *msg = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) | ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32); err_unlock: mutex_unlock(&qm->mailbox_lock); return ret; } static void qm_clear_cmd_interrupt(struct hisi_qm *qm, u64 vf_mask) { u32 val; if (qm->fun_type == QM_HW_PF) writeq(vf_mask, qm->io_base + QM_IFC_INT_SOURCE_P); val = readl(qm->io_base + QM_IFC_INT_SOURCE_V); val |= QM_IFC_INT_SOURCE_MASK; writel(val, qm->io_base + QM_IFC_INT_SOURCE_V); } static void qm_handle_vf_msg(struct hisi_qm *qm, u32 vf_id) { struct device *dev = &qm->pdev->dev; u32 cmd; u64 msg; int ret; ret = qm_get_mb_cmd(qm, &msg, vf_id); if (ret) { dev_err(dev, "failed to get msg from VF(%u)!\n", vf_id); return; } cmd = msg & QM_MB_CMD_DATA_MASK; switch (cmd) { case QM_VF_PREPARE_FAIL: dev_err(dev, "failed to stop VF(%u)!\n", vf_id); break; case QM_VF_START_FAIL: dev_err(dev, "failed to start VF(%u)!\n", vf_id); break; case QM_VF_PREPARE_DONE: case QM_VF_START_DONE: break; default: dev_err(dev, "unsupported cmd %u sent by VF(%u)!\n", cmd, vf_id); break; } } static int qm_wait_vf_prepare_finish(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; u32 vfs_num = qm->vfs_num; int cnt = 0; int ret = 0; u64 val; u32 i; if (!qm->vfs_num || !test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) return 0; while (true) { val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); /* All VFs send command to PF, break */ if ((val & GENMASK(vfs_num, 1)) == GENMASK(vfs_num, 1)) break; if (++cnt > QM_MAX_PF_WAIT_COUNT) { ret = -EBUSY; break; } msleep(QM_WAIT_DST_ACK); } /* PF check VFs msg */ for (i = 1; i <= vfs_num; i++) { if (val & BIT(i)) qm_handle_vf_msg(qm, i); else dev_err(dev, "VF(%u) not ping PF!\n", i); } /* PF clear interrupt to ack VFs */ qm_clear_cmd_interrupt(qm, val); return ret; } static void qm_trigger_vf_interrupt(struct hisi_qm *qm, u32 fun_num) { u32 val; val = readl(qm->io_base + QM_IFC_INT_CFG); val &= ~QM_IFC_SEND_ALL_VFS; val |= fun_num; writel(val, qm->io_base + QM_IFC_INT_CFG); val = readl(qm->io_base + QM_IFC_INT_SET_P); val |= QM_IFC_INT_SET_MASK; writel(val, qm->io_base + QM_IFC_INT_SET_P); } static void qm_trigger_pf_interrupt(struct hisi_qm *qm) { u32 val; val = readl(qm->io_base + QM_IFC_INT_SET_V); val |= QM_IFC_INT_SET_MASK; writel(val, qm->io_base + QM_IFC_INT_SET_V); } static int qm_ping_single_vf(struct hisi_qm *qm, u64 cmd, u32 fun_num) { struct device *dev = &qm->pdev->dev; struct qm_mailbox mailbox; int cnt = 0; u64 val; int ret; qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, fun_num, 0); mutex_lock(&qm->mailbox_lock); ret = qm_mb_nolock(qm, &mailbox); if (ret) { dev_err(dev, "failed to send command to vf(%u)!\n", fun_num); goto err_unlock; } qm_trigger_vf_interrupt(qm, fun_num); while (true) { msleep(QM_WAIT_DST_ACK); val = readq(qm->io_base + QM_IFC_READY_STATUS); /* if VF respond, PF notifies VF successfully. */ if (!(val & BIT(fun_num))) goto err_unlock; if (++cnt > QM_MAX_PF_WAIT_COUNT) { dev_err(dev, "failed to get response from VF(%u)!\n", fun_num); ret = -ETIMEDOUT; break; } } err_unlock: mutex_unlock(&qm->mailbox_lock); return ret; } static int qm_ping_all_vfs(struct hisi_qm *qm, u64 cmd) { struct device *dev = &qm->pdev->dev; u32 vfs_num = qm->vfs_num; struct qm_mailbox mailbox; u64 val = 0; int cnt = 0; int ret; u32 i; qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, QM_MB_PING_ALL_VFS, 0); mutex_lock(&qm->mailbox_lock); /* PF sends command to all VFs by mailbox */ ret = qm_mb_nolock(qm, &mailbox); if (ret) { dev_err(dev, "failed to send command to VFs!\n"); mutex_unlock(&qm->mailbox_lock); return ret; } qm_trigger_vf_interrupt(qm, QM_IFC_SEND_ALL_VFS); while (true) { msleep(QM_WAIT_DST_ACK); val = readq(qm->io_base + QM_IFC_READY_STATUS); /* If all VFs acked, PF notifies VFs successfully. */ if (!(val & GENMASK(vfs_num, 1))) { mutex_unlock(&qm->mailbox_lock); return 0; } if (++cnt > QM_MAX_PF_WAIT_COUNT) break; } mutex_unlock(&qm->mailbox_lock); /* Check which vf respond timeout. */ for (i = 1; i <= vfs_num; i++) { if (val & BIT(i)) dev_err(dev, "failed to get response from VF(%u)!\n", i); } return -ETIMEDOUT; } static int qm_ping_pf(struct hisi_qm *qm, u64 cmd) { struct qm_mailbox mailbox; int cnt = 0; u32 val; int ret; qm_mb_pre_init(&mailbox, QM_MB_CMD_SRC, cmd, 0, 0); mutex_lock(&qm->mailbox_lock); ret = qm_mb_nolock(qm, &mailbox); if (ret) { dev_err(&qm->pdev->dev, "failed to send command to PF!\n"); goto unlock; } qm_trigger_pf_interrupt(qm); /* Waiting for PF response */ while (true) { msleep(QM_WAIT_DST_ACK); val = readl(qm->io_base + QM_IFC_INT_SET_V); if (!(val & QM_IFC_INT_STATUS_MASK)) break; if (++cnt > QM_MAX_VF_WAIT_COUNT) { ret = -ETIMEDOUT; break; } } unlock: mutex_unlock(&qm->mailbox_lock); return ret; } static int qm_stop_qp(struct hisi_qp *qp) { return hisi_qm_mb(qp->qm, QM_MB_CMD_STOP_QP, 0, qp->qp_id, 0); } static int qm_set_msi(struct hisi_qm *qm, bool set) { struct pci_dev *pdev = qm->pdev; if (set) { pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, 0); } else { pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64, ACC_PEH_MSI_DISABLE); if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit) return 0; mdelay(1); if (readl(qm->io_base + QM_PEH_DFX_INFO0)) return -EFAULT; } return 0; } static void qm_wait_msi_finish(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; u32 cmd = ~0; int cnt = 0; u32 val; int ret; while (true) { pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_PENDING_64, &cmd); if (!cmd) break; if (++cnt > MAX_WAIT_COUNTS) { pci_warn(pdev, "failed to empty MSI PENDING!\n"); break; } udelay(1); } ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO0, val, !(val & QM_PEH_DFX_MASK), POLL_PERIOD, POLL_TIMEOUT); if (ret) pci_warn(pdev, "failed to empty PEH MSI!\n"); ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_DFX_INFO1, val, !(val & QM_PEH_MSI_FINISH_MASK), POLL_PERIOD, POLL_TIMEOUT); if (ret) pci_warn(pdev, "failed to finish MSI operation!\n"); } static int qm_set_msi_v3(struct hisi_qm *qm, bool set) { struct pci_dev *pdev = qm->pdev; int ret = -ETIMEDOUT; u32 cmd, i; pci_read_config_dword(pdev, pdev->msi_cap, &cmd); if (set) cmd |= QM_MSI_CAP_ENABLE; else cmd &= ~QM_MSI_CAP_ENABLE; pci_write_config_dword(pdev, pdev->msi_cap, cmd); if (set) { for (i = 0; i < MAX_WAIT_COUNTS; i++) { pci_read_config_dword(pdev, pdev->msi_cap, &cmd); if (cmd & QM_MSI_CAP_ENABLE) return 0; udelay(1); } } else { udelay(WAIT_PERIOD_US_MIN); qm_wait_msi_finish(qm); ret = 0; } return ret; } static const struct hisi_qm_hw_ops qm_hw_ops_v1 = { .qm_db = qm_db_v1, .hw_error_init = qm_hw_error_init_v1, .set_msi = qm_set_msi, }; static const struct hisi_qm_hw_ops qm_hw_ops_v2 = { .get_vft = qm_get_vft_v2, .qm_db = qm_db_v2, .hw_error_init = qm_hw_error_init_v2, .hw_error_uninit = qm_hw_error_uninit_v2, .hw_error_handle = qm_hw_error_handle_v2, .set_msi = qm_set_msi, }; static const struct hisi_qm_hw_ops qm_hw_ops_v3 = { .get_vft = qm_get_vft_v2, .qm_db = qm_db_v2, .hw_error_init = qm_hw_error_init_v3, .hw_error_uninit = qm_hw_error_uninit_v3, .hw_error_handle = qm_hw_error_handle_v2, .set_msi = qm_set_msi_v3, }; static void *qm_get_avail_sqe(struct hisi_qp *qp) { struct hisi_qp_status *qp_status = &qp->qp_status; u16 sq_tail = qp_status->sq_tail; if (unlikely(atomic_read(&qp->qp_status.used) == qp->sq_depth - 1)) return NULL; return qp->sqe + sq_tail * qp->qm->sqe_size; } static void hisi_qm_unset_hw_reset(struct hisi_qp *qp) { u64 *addr; /* Use last 64 bits of DUS to reset status. */ addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET; *addr = 0; } static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type) { struct device *dev = &qm->pdev->dev; struct hisi_qp *qp; int qp_id; if (!qm_qp_avail_state(qm, NULL, QP_INIT)) return ERR_PTR(-EPERM); if (qm->qp_in_used == qm->qp_num) { dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", qm->qp_num); atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); return ERR_PTR(-EBUSY); } qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC); if (qp_id < 0) { dev_info_ratelimited(dev, "All %u queues of QM are busy!\n", qm->qp_num); atomic64_inc(&qm->debug.dfx.create_qp_err_cnt); return ERR_PTR(-EBUSY); } qp = &qm->qp_array[qp_id]; hisi_qm_unset_hw_reset(qp); memset(qp->cqe, 0, sizeof(struct qm_cqe) * qp->cq_depth); qp->event_cb = NULL; qp->req_cb = NULL; qp->qp_id = qp_id; qp->alg_type = alg_type; qp->is_in_kernel = true; qm->qp_in_used++; atomic_set(&qp->qp_status.flags, QP_INIT); return qp; } /** * hisi_qm_create_qp() - Create a queue pair from qm. * @qm: The qm we create a qp from. * @alg_type: Accelerator specific algorithm type in sqc. * * Return created qp, negative error code if failed. */ static struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type) { struct hisi_qp *qp; int ret; ret = qm_pm_get_sync(qm); if (ret) return ERR_PTR(ret); down_write(&qm->qps_lock); qp = qm_create_qp_nolock(qm, alg_type); up_write(&qm->qps_lock); if (IS_ERR(qp)) qm_pm_put_sync(qm); return qp; } /** * hisi_qm_release_qp() - Release a qp back to its qm. * @qp: The qp we want to release. * * This function releases the resource of a qp. */ static void hisi_qm_release_qp(struct hisi_qp *qp) { struct hisi_qm *qm = qp->qm; down_write(&qm->qps_lock); if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) { up_write(&qm->qps_lock); return; } qm->qp_in_used--; idr_remove(&qm->qp_idr, qp->qp_id); up_write(&qm->qps_lock); qm_pm_put_sync(qm); } static int qm_sq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; enum qm_hw_ver ver = qm->ver; struct qm_sqc *sqc; dma_addr_t sqc_dma; int ret; sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL); if (!sqc) return -ENOMEM; INIT_QC_COMMON(sqc, qp->sqe_dma, pasid); if (ver == QM_HW_V1) { sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size)); sqc->w8 = cpu_to_le16(qp->sq_depth - 1); } else { sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size, qp->sq_depth)); sqc->w8 = 0; /* rand_qc */ } sqc->cq_num = cpu_to_le16(qp_id); sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type)); if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) sqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE << QM_QC_PASID_ENABLE_SHIFT); sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc), DMA_TO_DEVICE); if (dma_mapping_error(dev, sqc_dma)) { kfree(sqc); return -ENOMEM; } ret = hisi_qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0); dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE); kfree(sqc); return ret; } static int qm_cq_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; enum qm_hw_ver ver = qm->ver; struct qm_cqc *cqc; dma_addr_t cqc_dma; int ret; cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL); if (!cqc) return -ENOMEM; INIT_QC_COMMON(cqc, qp->cqe_dma, pasid); if (ver == QM_HW_V1) { cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, QM_QC_CQE_SIZE)); cqc->w8 = cpu_to_le16(qp->cq_depth - 1); } else { cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(QM_QC_CQE_SIZE, qp->cq_depth)); cqc->w8 = 0; /* rand_qc */ } cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT); if (ver >= QM_HW_V3 && qm->use_sva && !qp->is_in_kernel) cqc->w11 = cpu_to_le16(QM_QC_PASID_ENABLE); cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc), DMA_TO_DEVICE); if (dma_mapping_error(dev, cqc_dma)) { kfree(cqc); return -ENOMEM; } ret = hisi_qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0); dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE); kfree(cqc); return ret; } static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid) { int ret; qm_init_qp_status(qp); ret = qm_sq_ctx_cfg(qp, qp_id, pasid); if (ret) return ret; return qm_cq_ctx_cfg(qp, qp_id, pasid); } static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg) { struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; int qp_id = qp->qp_id; u32 pasid = arg; int ret; if (!qm_qp_avail_state(qm, qp, QP_START)) return -EPERM; ret = qm_qp_ctx_cfg(qp, qp_id, pasid); if (ret) return ret; atomic_set(&qp->qp_status.flags, QP_START); dev_dbg(dev, "queue %d started\n", qp_id); return 0; } /** * hisi_qm_start_qp() - Start a qp into running. * @qp: The qp we want to start to run. * @arg: Accelerator specific argument. * * After this function, qp can receive request from user. Return 0 if * successful, negative error code if failed. */ int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg) { struct hisi_qm *qm = qp->qm; int ret; down_write(&qm->qps_lock); ret = qm_start_qp_nolock(qp, arg); up_write(&qm->qps_lock); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_start_qp); /** * qp_stop_fail_cb() - call request cb. * @qp: stopped failed qp. * * Callback function should be called whether task completed or not. */ static void qp_stop_fail_cb(struct hisi_qp *qp) { int qp_used = atomic_read(&qp->qp_status.used); u16 cur_tail = qp->qp_status.sq_tail; u16 sq_depth = qp->sq_depth; u16 cur_head = (cur_tail + sq_depth - qp_used) % sq_depth; struct hisi_qm *qm = qp->qm; u16 pos; int i; for (i = 0; i < qp_used; i++) { pos = (i + cur_head) % sq_depth; qp->req_cb(qp, qp->sqe + (u32)(qm->sqe_size * pos)); atomic_dec(&qp->qp_status.used); } } /** * qm_drain_qp() - Drain a qp. * @qp: The qp we want to drain. * * Determine whether the queue is cleared by judging the tail pointers of * sq and cq. */ static int qm_drain_qp(struct hisi_qp *qp) { size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc); struct hisi_qm *qm = qp->qm; struct device *dev = &qm->pdev->dev; struct qm_sqc *sqc; struct qm_cqc *cqc; dma_addr_t dma_addr; int ret = 0, i = 0; void *addr; /* No need to judge if master OOO is blocked. */ if (qm_check_dev_error(qm)) return 0; /* Kunpeng930 supports drain qp by device */ if (test_bit(QM_SUPPORT_STOP_QP, &qm->caps)) { ret = qm_stop_qp(qp); if (ret) dev_err(dev, "Failed to stop qp(%u)!\n", qp->qp_id); return ret; } addr = hisi_qm_ctx_alloc(qm, size, &dma_addr); if (IS_ERR(addr)) { dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n"); return -ENOMEM; } while (++i) { ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id); if (ret) { dev_err_ratelimited(dev, "Failed to dump sqc!\n"); break; } sqc = addr; ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)), qp->qp_id); if (ret) { dev_err_ratelimited(dev, "Failed to dump cqc!\n"); break; } cqc = addr + sizeof(struct qm_sqc); if ((sqc->tail == cqc->tail) && (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc))) break; if (i == MAX_WAIT_COUNTS) { dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id); ret = -EBUSY; break; } usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX); } hisi_qm_ctx_free(qm, size, addr, &dma_addr); return ret; } static int qm_stop_qp_nolock(struct hisi_qp *qp) { struct device *dev = &qp->qm->pdev->dev; int ret; /* * It is allowed to stop and release qp when reset, If the qp is * stopped when reset but still want to be released then, the * is_resetting flag should be set negative so that this qp will not * be restarted after reset. */ if (atomic_read(&qp->qp_status.flags) == QP_STOP) { qp->is_resetting = false; return 0; } if (!qm_qp_avail_state(qp->qm, qp, QP_STOP)) return -EPERM; atomic_set(&qp->qp_status.flags, QP_STOP); ret = qm_drain_qp(qp); if (ret) dev_err(dev, "Failed to drain out data for stopping!\n"); flush_workqueue(qp->qm->wq); if (unlikely(qp->is_resetting && atomic_read(&qp->qp_status.used))) qp_stop_fail_cb(qp); dev_dbg(dev, "stop queue %u!", qp->qp_id); return 0; } /** * hisi_qm_stop_qp() - Stop a qp in qm. * @qp: The qp we want to stop. * * This function is reverse of hisi_qm_start_qp. Return 0 if successful. */ int hisi_qm_stop_qp(struct hisi_qp *qp) { int ret; down_write(&qp->qm->qps_lock); ret = qm_stop_qp_nolock(qp); up_write(&qp->qm->qps_lock); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_stop_qp); /** * hisi_qp_send() - Queue up a task in the hardware queue. * @qp: The qp in which to put the message. * @msg: The message. * * This function will return -EBUSY if qp is currently full, and -EAGAIN * if qp related qm is resetting. * * Note: This function may run with qm_irq_thread and ACC reset at same time. * It has no race with qm_irq_thread. However, during hisi_qp_send, ACC * reset may happen, we have no lock here considering performance. This * causes current qm_db sending fail or can not receive sended sqe. QM * sync/async receive function should handle the error sqe. ACC reset * done function should clear used sqe to 0. */ int hisi_qp_send(struct hisi_qp *qp, const void *msg) { struct hisi_qp_status *qp_status = &qp->qp_status; u16 sq_tail = qp_status->sq_tail; u16 sq_tail_next = (sq_tail + 1) % qp->sq_depth; void *sqe = qm_get_avail_sqe(qp); if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP || atomic_read(&qp->qm->status.flags) == QM_STOP || qp->is_resetting)) { dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n"); return -EAGAIN; } if (!sqe) return -EBUSY; memcpy(sqe, msg, qp->qm->sqe_size); qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0); atomic_inc(&qp->qp_status.used); qp_status->sq_tail = sq_tail_next; return 0; } EXPORT_SYMBOL_GPL(hisi_qp_send); static void hisi_qm_cache_wb(struct hisi_qm *qm) { unsigned int val; if (qm->ver == QM_HW_V1) return; writel(0x1, qm->io_base + QM_CACHE_WB_START); if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE, val, val & BIT(0), POLL_PERIOD, POLL_TIMEOUT)) dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n"); } static void qm_qp_event_notifier(struct hisi_qp *qp) { wake_up_interruptible(&qp->uacce_q->wait); } /* This function returns free number of qp in qm. */ static int hisi_qm_get_available_instances(struct uacce_device *uacce) { struct hisi_qm *qm = uacce->priv; int ret; down_read(&qm->qps_lock); ret = qm->qp_num - qm->qp_in_used; up_read(&qm->qps_lock); return ret; } static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset) { int i; for (i = 0; i < qm->qp_num; i++) qm_set_qp_disable(&qm->qp_array[i], offset); } static int hisi_qm_uacce_get_queue(struct uacce_device *uacce, unsigned long arg, struct uacce_queue *q) { struct hisi_qm *qm = uacce->priv; struct hisi_qp *qp; u8 alg_type = 0; qp = hisi_qm_create_qp(qm, alg_type); if (IS_ERR(qp)) return PTR_ERR(qp); q->priv = qp; q->uacce = uacce; qp->uacce_q = q; qp->event_cb = qm_qp_event_notifier; qp->pasid = arg; qp->is_in_kernel = false; return 0; } static void hisi_qm_uacce_put_queue(struct uacce_queue *q) { struct hisi_qp *qp = q->priv; hisi_qm_release_qp(qp); } /* map sq/cq/doorbell to user space */ static int hisi_qm_uacce_mmap(struct uacce_queue *q, struct vm_area_struct *vma, struct uacce_qfile_region *qfr) { struct hisi_qp *qp = q->priv; struct hisi_qm *qm = qp->qm; resource_size_t phys_base = qm->db_phys_base + qp->qp_id * qm->db_interval; size_t sz = vma->vm_end - vma->vm_start; struct pci_dev *pdev = qm->pdev; struct device *dev = &pdev->dev; unsigned long vm_pgoff; int ret; switch (qfr->type) { case UACCE_QFRT_MMIO: if (qm->ver == QM_HW_V1) { if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR) return -EINVAL; } else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE)) return -EINVAL; } else { if (sz > qm->db_interval) return -EINVAL; } vm_flags_set(vma, VM_IO); return remap_pfn_range(vma, vma->vm_start, phys_base >> PAGE_SHIFT, sz, pgprot_noncached(vma->vm_page_prot)); case UACCE_QFRT_DUS: if (sz != qp->qdma.size) return -EINVAL; /* * dma_mmap_coherent() requires vm_pgoff as 0 * restore vm_pfoff to initial value for mmap() */ vm_pgoff = vma->vm_pgoff; vma->vm_pgoff = 0; ret = dma_mmap_coherent(dev, vma, qp->qdma.va, qp->qdma.dma, sz); vma->vm_pgoff = vm_pgoff; return ret; default: return -EINVAL; } } static int hisi_qm_uacce_start_queue(struct uacce_queue *q) { struct hisi_qp *qp = q->priv; return hisi_qm_start_qp(qp, qp->pasid); } static void hisi_qm_uacce_stop_queue(struct uacce_queue *q) { hisi_qm_stop_qp(q->priv); } static int hisi_qm_is_q_updated(struct uacce_queue *q) { struct hisi_qp *qp = q->priv; struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head; int updated = 0; while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) { /* make sure to read data from memory */ dma_rmb(); qm_cq_head_update(qp); cqe = qp->cqe + qp->qp_status.cq_head; updated = 1; } return updated; } static void qm_set_sqctype(struct uacce_queue *q, u16 type) { struct hisi_qm *qm = q->uacce->priv; struct hisi_qp *qp = q->priv; down_write(&qm->qps_lock); qp->alg_type = type; up_write(&qm->qps_lock); } static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd, unsigned long arg) { struct hisi_qp *qp = q->priv; struct hisi_qp_info qp_info; struct hisi_qp_ctx qp_ctx; if (cmd == UACCE_CMD_QM_SET_QP_CTX) { if (copy_from_user(&qp_ctx, (void __user *)arg, sizeof(struct hisi_qp_ctx))) return -EFAULT; if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1) return -EINVAL; qm_set_sqctype(q, qp_ctx.qc_type); qp_ctx.id = qp->qp_id; if (copy_to_user((void __user *)arg, &qp_ctx, sizeof(struct hisi_qp_ctx))) return -EFAULT; return 0; } else if (cmd == UACCE_CMD_QM_SET_QP_INFO) { if (copy_from_user(&qp_info, (void __user *)arg, sizeof(struct hisi_qp_info))) return -EFAULT; qp_info.sqe_size = qp->qm->sqe_size; qp_info.sq_depth = qp->sq_depth; qp_info.cq_depth = qp->cq_depth; if (copy_to_user((void __user *)arg, &qp_info, sizeof(struct hisi_qp_info))) return -EFAULT; return 0; } return -EINVAL; } /** * qm_hw_err_isolate() - Try to set the isolation status of the uacce device * according to user's configuration of error threshold. * @qm: the uacce device */ static int qm_hw_err_isolate(struct hisi_qm *qm) { struct qm_hw_err *err, *tmp, *hw_err; struct qm_err_isolate *isolate; u32 count = 0; isolate = &qm->isolate_data; #define SECONDS_PER_HOUR 3600 /* All the hw errs are processed by PF driver */ if (qm->uacce->is_vf || isolate->is_isolate || !isolate->err_threshold) return 0; hw_err = kzalloc(sizeof(*hw_err), GFP_KERNEL); if (!hw_err) return -ENOMEM; /* * Time-stamp every slot AER error. Then check the AER error log when the * next device AER error occurred. if the device slot AER error count exceeds * the setting error threshold in one hour, the isolated state will be set * to true. And the AER error logs that exceed one hour will be cleared. */ mutex_lock(&isolate->isolate_lock); hw_err->timestamp = jiffies; list_for_each_entry_safe(err, tmp, &isolate->qm_hw_errs, list) { if ((hw_err->timestamp - err->timestamp) / HZ > SECONDS_PER_HOUR) { list_del(&err->list); kfree(err); } else { count++; } } list_add(&hw_err->list, &isolate->qm_hw_errs); mutex_unlock(&isolate->isolate_lock); if (count >= isolate->err_threshold) isolate->is_isolate = true; return 0; } static void qm_hw_err_destroy(struct hisi_qm *qm) { struct qm_hw_err *err, *tmp; mutex_lock(&qm->isolate_data.isolate_lock); list_for_each_entry_safe(err, tmp, &qm->isolate_data.qm_hw_errs, list) { list_del(&err->list); kfree(err); } mutex_unlock(&qm->isolate_data.isolate_lock); } static enum uacce_dev_state hisi_qm_get_isolate_state(struct uacce_device *uacce) { struct hisi_qm *qm = uacce->priv; struct hisi_qm *pf_qm; if (uacce->is_vf) pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); else pf_qm = qm; return pf_qm->isolate_data.is_isolate ? UACCE_DEV_ISOLATE : UACCE_DEV_NORMAL; } static int hisi_qm_isolate_threshold_write(struct uacce_device *uacce, u32 num) { struct hisi_qm *qm = uacce->priv; /* Must be set by PF */ if (uacce->is_vf) return -EPERM; if (qm->isolate_data.is_isolate) return -EPERM; qm->isolate_data.err_threshold = num; /* After the policy is updated, need to reset the hardware err list */ qm_hw_err_destroy(qm); return 0; } static u32 hisi_qm_isolate_threshold_read(struct uacce_device *uacce) { struct hisi_qm *qm = uacce->priv; struct hisi_qm *pf_qm; if (uacce->is_vf) { pf_qm = pci_get_drvdata(pci_physfn(qm->pdev)); return pf_qm->isolate_data.err_threshold; } return qm->isolate_data.err_threshold; } static const struct uacce_ops uacce_qm_ops = { .get_available_instances = hisi_qm_get_available_instances, .get_queue = hisi_qm_uacce_get_queue, .put_queue = hisi_qm_uacce_put_queue, .start_queue = hisi_qm_uacce_start_queue, .stop_queue = hisi_qm_uacce_stop_queue, .mmap = hisi_qm_uacce_mmap, .ioctl = hisi_qm_uacce_ioctl, .is_q_updated = hisi_qm_is_q_updated, .get_isolate_state = hisi_qm_get_isolate_state, .isolate_err_threshold_write = hisi_qm_isolate_threshold_write, .isolate_err_threshold_read = hisi_qm_isolate_threshold_read, }; static void qm_remove_uacce(struct hisi_qm *qm) { struct uacce_device *uacce = qm->uacce; if (qm->use_sva) { qm_hw_err_destroy(qm); uacce_remove(uacce); qm->uacce = NULL; } } static int qm_alloc_uacce(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; struct uacce_device *uacce; unsigned long mmio_page_nr; unsigned long dus_page_nr; u16 sq_depth, cq_depth; struct uacce_interface interface = { .flags = UACCE_DEV_SVA, .ops = &uacce_qm_ops, }; int ret; ret = strscpy(interface.name, dev_driver_string(&pdev->dev), sizeof(interface.name)); if (ret < 0) return -ENAMETOOLONG; uacce = uacce_alloc(&pdev->dev, &interface); if (IS_ERR(uacce)) return PTR_ERR(uacce); if (uacce->flags & UACCE_DEV_SVA) { qm->use_sva = true; } else { /* only consider sva case */ qm_remove_uacce(qm); return -EINVAL; } uacce->is_vf = pdev->is_virtfn; uacce->priv = qm; if (qm->ver == QM_HW_V1) uacce->api_ver = HISI_QM_API_VER_BASE; else if (qm->ver == QM_HW_V2) uacce->api_ver = HISI_QM_API_VER2_BASE; else uacce->api_ver = HISI_QM_API_VER3_BASE; if (qm->ver == QM_HW_V1) mmio_page_nr = QM_DOORBELL_PAGE_NR; else if (!test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) mmio_page_nr = QM_DOORBELL_PAGE_NR + QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE; else mmio_page_nr = qm->db_interval / PAGE_SIZE; qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); /* Add one more page for device or qp status */ dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth + PAGE_SIZE) >> PAGE_SHIFT; uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr; uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr; qm->uacce = uacce; INIT_LIST_HEAD(&qm->isolate_data.qm_hw_errs); mutex_init(&qm->isolate_data.isolate_lock); return 0; } /** * qm_frozen() - Try to froze QM to cut continuous queue request. If * there is user on the QM, return failure without doing anything. * @qm: The qm needed to be fronzen. * * This function frozes QM, then we can do SRIOV disabling. */ static int qm_frozen(struct hisi_qm *qm) { if (test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl)) return 0; down_write(&qm->qps_lock); if (!qm->qp_in_used) { qm->qp_in_used = qm->qp_num; up_write(&qm->qps_lock); set_bit(QM_DRIVER_REMOVING, &qm->misc_ctl); return 0; } up_write(&qm->qps_lock); return -EBUSY; } static int qm_try_frozen_vfs(struct pci_dev *pdev, struct hisi_qm_list *qm_list) { struct hisi_qm *qm, *vf_qm; struct pci_dev *dev; int ret = 0; if (!qm_list || !pdev) return -EINVAL; /* Try to frozen all the VFs as disable SRIOV */ mutex_lock(&qm_list->lock); list_for_each_entry(qm, &qm_list->list, list) { dev = qm->pdev; if (dev == pdev) continue; if (pci_physfn(dev) == pdev) { vf_qm = pci_get_drvdata(dev); ret = qm_frozen(vf_qm); if (ret) goto frozen_fail; } } frozen_fail: mutex_unlock(&qm_list->lock); return ret; } /** * hisi_qm_wait_task_finish() - Wait until the task is finished * when removing the driver. * @qm: The qm needed to wait for the task to finish. * @qm_list: The list of all available devices. */ void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list) { while (qm_frozen(qm) || ((qm->fun_type == QM_HW_PF) && qm_try_frozen_vfs(qm->pdev, qm_list))) { msleep(WAIT_PERIOD); } while (test_bit(QM_RST_SCHED, &qm->misc_ctl) || test_bit(QM_RESETTING, &qm->misc_ctl)) msleep(WAIT_PERIOD); if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) flush_work(&qm->cmd_process); udelay(REMOVE_WAIT_DELAY); } EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish); static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num) { struct device *dev = &qm->pdev->dev; struct qm_dma *qdma; int i; for (i = num - 1; i >= 0; i--) { qdma = &qm->qp_array[i].qdma; dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma); kfree(qm->poll_data[i].qp_finish_id); } kfree(qm->poll_data); kfree(qm->qp_array); } static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id, u16 sq_depth, u16 cq_depth) { struct device *dev = &qm->pdev->dev; size_t off = qm->sqe_size * sq_depth; struct hisi_qp *qp; int ret = -ENOMEM; qm->poll_data[id].qp_finish_id = kcalloc(qm->qp_num, sizeof(u16), GFP_KERNEL); if (!qm->poll_data[id].qp_finish_id) return -ENOMEM; qp = &qm->qp_array[id]; qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma, GFP_KERNEL); if (!qp->qdma.va) goto err_free_qp_finish_id; qp->sqe = qp->qdma.va; qp->sqe_dma = qp->qdma.dma; qp->cqe = qp->qdma.va + off; qp->cqe_dma = qp->qdma.dma + off; qp->qdma.size = dma_size; qp->sq_depth = sq_depth; qp->cq_depth = cq_depth; qp->qm = qm; qp->qp_id = id; return 0; err_free_qp_finish_id: kfree(qm->poll_data[id].qp_finish_id); return ret; } static void hisi_qm_pre_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; if (qm->ver == QM_HW_V1) qm->ops = &qm_hw_ops_v1; else if (qm->ver == QM_HW_V2) qm->ops = &qm_hw_ops_v2; else qm->ops = &qm_hw_ops_v3; pci_set_drvdata(pdev, qm); mutex_init(&qm->mailbox_lock); init_rwsem(&qm->qps_lock); qm->qp_in_used = 0; qm->misc_ctl = false; if (test_bit(QM_SUPPORT_RPM, &qm->caps)) { if (!acpi_device_power_manageable(ACPI_COMPANION(&pdev->dev))) dev_info(&pdev->dev, "_PS0 and _PR0 are not defined"); } } static void qm_cmd_uninit(struct hisi_qm *qm) { u32 val; if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) return; val = readl(qm->io_base + QM_IFC_INT_MASK); val |= QM_IFC_INT_DISABLE; writel(val, qm->io_base + QM_IFC_INT_MASK); } static void qm_cmd_init(struct hisi_qm *qm) { u32 val; if (!test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) return; /* Clear communication interrupt source */ qm_clear_cmd_interrupt(qm, QM_IFC_INT_SOURCE_CLR); /* Enable pf to vf communication reg. */ val = readl(qm->io_base + QM_IFC_INT_MASK); val &= ~QM_IFC_INT_DISABLE; writel(val, qm->io_base + QM_IFC_INT_MASK); } static void qm_put_pci_res(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) iounmap(qm->db_io_base); iounmap(qm->io_base); pci_release_mem_regions(pdev); } static void hisi_qm_pci_uninit(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; pci_free_irq_vectors(pdev); qm_put_pci_res(qm); pci_disable_device(pdev); } static void hisi_qm_set_state(struct hisi_qm *qm, u8 state) { if (qm->ver > QM_HW_V2 && qm->fun_type == QM_HW_VF) writel(state, qm->io_base + QM_VF_STATE); } static void hisi_qm_unint_work(struct hisi_qm *qm) { destroy_workqueue(qm->wq); } static void hisi_qm_memory_uninit(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; hisi_qp_memory_uninit(qm, qm->qp_num); if (qm->qdma.va) { hisi_qm_cache_wb(qm); dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); } idr_destroy(&qm->qp_idr); if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) kfree(qm->factor); } /** * hisi_qm_uninit() - Uninitialize qm. * @qm: The qm needed uninit. * * This function uninits qm related device resources. */ void hisi_qm_uninit(struct hisi_qm *qm) { qm_cmd_uninit(qm); hisi_qm_unint_work(qm); down_write(&qm->qps_lock); if (!qm_avail_state(qm, QM_CLOSE)) { up_write(&qm->qps_lock); return; } hisi_qm_memory_uninit(qm); hisi_qm_set_state(qm, QM_NOT_READY); up_write(&qm->qps_lock); qm_irqs_unregister(qm); hisi_qm_pci_uninit(qm); if (qm->use_sva) { uacce_remove(qm->uacce); qm->uacce = NULL; } } EXPORT_SYMBOL_GPL(hisi_qm_uninit); /** * hisi_qm_get_vft() - Get vft from a qm. * @qm: The qm we want to get its vft. * @base: The base number of queue in vft. * @number: The number of queues in vft. * * We can allocate multiple queues to a qm by configuring virtual function * table. We get related configures by this function. Normally, we call this * function in VF driver to get the queue information. * * qm hw v1 does not support this interface. */ static int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number) { if (!base || !number) return -EINVAL; if (!qm->ops->get_vft) { dev_err(&qm->pdev->dev, "Don't support vft read!\n"); return -EINVAL; } return qm->ops->get_vft(qm, base, number); } /** * hisi_qm_set_vft() - Set vft to a qm. * @qm: The qm we want to set its vft. * @fun_num: The function number. * @base: The base number of queue in vft. * @number: The number of queues in vft. * * This function is alway called in PF driver, it is used to assign queues * among PF and VFs. * * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1) * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1) * (VF function number 0x2) */ static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base, u32 number) { u32 max_q_num = qm->ctrl_qp_num; if (base >= max_q_num || number > max_q_num || (base + number) > max_q_num) return -EINVAL; return qm_set_sqc_cqc_vft(qm, fun_num, base, number); } static void qm_init_eq_aeq_status(struct hisi_qm *qm) { struct hisi_qm_status *status = &qm->status; status->eq_head = 0; status->aeq_head = 0; status->eqc_phase = true; status->aeqc_phase = true; } static void qm_enable_eq_aeq_interrupts(struct hisi_qm *qm) { /* Clear eq/aeq interrupt source */ qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0); qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0); writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK); writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK); } static void qm_disable_eq_aeq_interrupts(struct hisi_qm *qm) { writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK); writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK); } static int qm_eq_ctx_cfg(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; struct qm_eqc *eqc; dma_addr_t eqc_dma; int ret; eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL); if (!eqc) return -ENOMEM; eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma)); eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma)); if (qm->ver == QM_HW_V1) eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE); eqc->dw6 = cpu_to_le32(((u32)qm->eq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc), DMA_TO_DEVICE); if (dma_mapping_error(dev, eqc_dma)) { kfree(eqc); return -ENOMEM; } ret = hisi_qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0); dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE); kfree(eqc); return ret; } static int qm_aeq_ctx_cfg(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; struct qm_aeqc *aeqc; dma_addr_t aeqc_dma; int ret; aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL); if (!aeqc) return -ENOMEM; aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma)); aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma)); aeqc->dw6 = cpu_to_le32(((u32)qm->aeq_depth - 1) | (1 << QM_EQC_PHASE_SHIFT)); aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc), DMA_TO_DEVICE); if (dma_mapping_error(dev, aeqc_dma)) { kfree(aeqc); return -ENOMEM; } ret = hisi_qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0); dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE); kfree(aeqc); return ret; } static int qm_eq_aeq_ctx_cfg(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; int ret; qm_init_eq_aeq_status(qm); ret = qm_eq_ctx_cfg(qm); if (ret) { dev_err(dev, "Set eqc failed!\n"); return ret; } return qm_aeq_ctx_cfg(qm); } static int __hisi_qm_start(struct hisi_qm *qm) { int ret; WARN_ON(!qm->qdma.va); if (qm->fun_type == QM_HW_PF) { ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num); if (ret) return ret; } ret = qm_eq_aeq_ctx_cfg(qm); if (ret) return ret; ret = hisi_qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0); if (ret) return ret; ret = hisi_qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0); if (ret) return ret; qm_init_prefetch(qm); qm_enable_eq_aeq_interrupts(qm); return 0; } /** * hisi_qm_start() - start qm * @qm: The qm to be started. * * This function starts a qm, then we can allocate qp from this qm. */ int hisi_qm_start(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; int ret = 0; down_write(&qm->qps_lock); if (!qm_avail_state(qm, QM_START)) { up_write(&qm->qps_lock); return -EPERM; } dev_dbg(dev, "qm start with %u queue pairs\n", qm->qp_num); if (!qm->qp_num) { dev_err(dev, "qp_num should not be 0\n"); ret = -EINVAL; goto err_unlock; } ret = __hisi_qm_start(qm); if (!ret) atomic_set(&qm->status.flags, QM_START); hisi_qm_set_state(qm, QM_READY); err_unlock: up_write(&qm->qps_lock); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_start); static int qm_restart(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; struct hisi_qp *qp; int ret, i; ret = hisi_qm_start(qm); if (ret < 0) return ret; down_write(&qm->qps_lock); for (i = 0; i < qm->qp_num; i++) { qp = &qm->qp_array[i]; if (atomic_read(&qp->qp_status.flags) == QP_STOP && qp->is_resetting == true) { ret = qm_start_qp_nolock(qp, 0); if (ret < 0) { dev_err(dev, "Failed to start qp%d!\n", i); up_write(&qm->qps_lock); return ret; } qp->is_resetting = false; } } up_write(&qm->qps_lock); return 0; } /* Stop started qps in reset flow */ static int qm_stop_started_qp(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; struct hisi_qp *qp; int i, ret; for (i = 0; i < qm->qp_num; i++) { qp = &qm->qp_array[i]; if (qp && atomic_read(&qp->qp_status.flags) == QP_START) { qp->is_resetting = true; ret = qm_stop_qp_nolock(qp); if (ret < 0) { dev_err(dev, "Failed to stop qp%d!\n", i); return ret; } } } return 0; } /** * qm_clear_queues() - Clear all queues memory in a qm. * @qm: The qm in which the queues will be cleared. * * This function clears all queues memory in a qm. Reset of accelerator can * use this to clear queues. */ static void qm_clear_queues(struct hisi_qm *qm) { struct hisi_qp *qp; int i; for (i = 0; i < qm->qp_num; i++) { qp = &qm->qp_array[i]; if (qp->is_in_kernel && qp->is_resetting) memset(qp->qdma.va, 0, qp->qdma.size); } memset(qm->qdma.va, 0, qm->qdma.size); } /** * hisi_qm_stop() - Stop a qm. * @qm: The qm which will be stopped. * @r: The reason to stop qm. * * This function stops qm and its qps, then qm can not accept request. * Related resources are not released at this state, we can use hisi_qm_start * to let qm start again. */ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r) { struct device *dev = &qm->pdev->dev; int ret = 0; down_write(&qm->qps_lock); qm->status.stop_reason = r; if (!qm_avail_state(qm, QM_STOP)) { ret = -EPERM; goto err_unlock; } if (qm->status.stop_reason == QM_SOFT_RESET || qm->status.stop_reason == QM_DOWN) { hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); ret = qm_stop_started_qp(qm); if (ret < 0) { dev_err(dev, "Failed to stop started qp!\n"); goto err_unlock; } hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); } qm_disable_eq_aeq_interrupts(qm); if (qm->fun_type == QM_HW_PF) { ret = hisi_qm_set_vft(qm, 0, 0, 0); if (ret < 0) { dev_err(dev, "Failed to set vft!\n"); ret = -EBUSY; goto err_unlock; } } qm_clear_queues(qm); atomic_set(&qm->status.flags, QM_STOP); err_unlock: up_write(&qm->qps_lock); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_stop); static void qm_hw_error_init(struct hisi_qm *qm) { if (!qm->ops->hw_error_init) { dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n"); return; } qm->ops->hw_error_init(qm); } static void qm_hw_error_uninit(struct hisi_qm *qm) { if (!qm->ops->hw_error_uninit) { dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n"); return; } qm->ops->hw_error_uninit(qm); } static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm) { if (!qm->ops->hw_error_handle) { dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n"); return ACC_ERR_NONE; } return qm->ops->hw_error_handle(qm); } /** * hisi_qm_dev_err_init() - Initialize device error configuration. * @qm: The qm for which we want to do error initialization. * * Initialize QM and device error related configuration. */ void hisi_qm_dev_err_init(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_VF) return; qm_hw_error_init(qm); if (!qm->err_ini->hw_err_enable) { dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n"); return; } qm->err_ini->hw_err_enable(qm); } EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init); /** * hisi_qm_dev_err_uninit() - Uninitialize device error configuration. * @qm: The qm for which we want to do error uninitialization. * * Uninitialize QM and device error related configuration. */ void hisi_qm_dev_err_uninit(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_VF) return; qm_hw_error_uninit(qm); if (!qm->err_ini->hw_err_disable) { dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n"); return; } qm->err_ini->hw_err_disable(qm); } EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit); /** * hisi_qm_free_qps() - free multiple queue pairs. * @qps: The queue pairs need to be freed. * @qp_num: The num of queue pairs. */ void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num) { int i; if (!qps || qp_num <= 0) return; for (i = qp_num - 1; i >= 0; i--) hisi_qm_release_qp(qps[i]); } EXPORT_SYMBOL_GPL(hisi_qm_free_qps); static void free_list(struct list_head *head) { struct hisi_qm_resource *res, *tmp; list_for_each_entry_safe(res, tmp, head, list) { list_del(&res->list); kfree(res); } } static int hisi_qm_sort_devices(int node, struct list_head *head, struct hisi_qm_list *qm_list) { struct hisi_qm_resource *res, *tmp; struct hisi_qm *qm; struct list_head *n; struct device *dev; int dev_node; list_for_each_entry(qm, &qm_list->list, list) { dev = &qm->pdev->dev; dev_node = dev_to_node(dev); if (dev_node < 0) dev_node = 0; res = kzalloc(sizeof(*res), GFP_KERNEL); if (!res) return -ENOMEM; res->qm = qm; res->distance = node_distance(dev_node, node); n = head; list_for_each_entry(tmp, head, list) { if (res->distance < tmp->distance) { n = &tmp->list; break; } } list_add_tail(&res->list, n); } return 0; } /** * hisi_qm_alloc_qps_node() - Create multiple queue pairs. * @qm_list: The list of all available devices. * @qp_num: The number of queue pairs need created. * @alg_type: The algorithm type. * @node: The numa node. * @qps: The queue pairs need created. * * This function will sort all available device according to numa distance. * Then try to create all queue pairs from one device, if all devices do * not meet the requirements will return error. */ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num, u8 alg_type, int node, struct hisi_qp **qps) { struct hisi_qm_resource *tmp; int ret = -ENODEV; LIST_HEAD(head); int i; if (!qps || !qm_list || qp_num <= 0) return -EINVAL; mutex_lock(&qm_list->lock); if (hisi_qm_sort_devices(node, &head, qm_list)) { mutex_unlock(&qm_list->lock); goto err; } list_for_each_entry(tmp, &head, list) { for (i = 0; i < qp_num; i++) { qps[i] = hisi_qm_create_qp(tmp->qm, alg_type); if (IS_ERR(qps[i])) { hisi_qm_free_qps(qps, i); break; } } if (i == qp_num) { ret = 0; break; } } mutex_unlock(&qm_list->lock); if (ret) pr_info("Failed to create qps, node[%d], alg[%u], qp[%d]!\n", node, alg_type, qp_num); err: free_list(&head); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node); static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs) { u32 remain_q_num, vfs_q_num, act_q_num, q_num, i, j; u32 max_qp_num = qm->max_qp_num; u32 q_base = qm->qp_num; int ret; if (!num_vfs) return -EINVAL; vfs_q_num = qm->ctrl_qp_num - qm->qp_num; /* If vfs_q_num is less than num_vfs, return error. */ if (vfs_q_num < num_vfs) return -EINVAL; q_num = vfs_q_num / num_vfs; remain_q_num = vfs_q_num % num_vfs; for (i = num_vfs; i > 0; i--) { /* * if q_num + remain_q_num > max_qp_num in last vf, divide the * remaining queues equally. */ if (i == num_vfs && q_num + remain_q_num <= max_qp_num) { act_q_num = q_num + remain_q_num; remain_q_num = 0; } else if (remain_q_num > 0) { act_q_num = q_num + 1; remain_q_num--; } else { act_q_num = q_num; } act_q_num = min(act_q_num, max_qp_num); ret = hisi_qm_set_vft(qm, i, q_base, act_q_num); if (ret) { for (j = num_vfs; j > i; j--) hisi_qm_set_vft(qm, j, 0, 0); return ret; } q_base += act_q_num; } return 0; } static int qm_clear_vft_config(struct hisi_qm *qm) { int ret; u32 i; for (i = 1; i <= qm->vfs_num; i++) { ret = hisi_qm_set_vft(qm, i, 0, 0); if (ret) return ret; } qm->vfs_num = 0; return 0; } static int qm_func_shaper_enable(struct hisi_qm *qm, u32 fun_index, u32 qos) { struct device *dev = &qm->pdev->dev; u32 ir = qos * QM_QOS_RATE; int ret, total_vfs, i; total_vfs = pci_sriov_get_totalvfs(qm->pdev); if (fun_index > total_vfs) return -EINVAL; qm->factor[fun_index].func_qos = qos; ret = qm_get_shaper_para(ir, &qm->factor[fun_index]); if (ret) { dev_err(dev, "failed to calculate shaper parameter!\n"); return -EINVAL; } for (i = ALG_TYPE_0; i <= ALG_TYPE_1; i++) { /* The base number of queue reuse for different alg type */ ret = qm_set_vft_common(qm, SHAPER_VFT, fun_index, i, 1); if (ret) { dev_err(dev, "type: %d, failed to set shaper vft!\n", i); return -EINVAL; } } return 0; } static u32 qm_get_shaper_vft_qos(struct hisi_qm *qm, u32 fun_index) { u64 cir_u = 0, cir_b = 0, cir_s = 0; u64 shaper_vft, ir_calc, ir; unsigned int val; u32 error_rate; int ret; ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, val & BIT(0), POLL_PERIOD, POLL_TIMEOUT); if (ret) return 0; writel(0x1, qm->io_base + QM_VFT_CFG_OP_WR); writel(SHAPER_VFT, qm->io_base + QM_VFT_CFG_TYPE); writel(fun_index, qm->io_base + QM_VFT_CFG); writel(0x0, qm->io_base + QM_VFT_CFG_RDY); writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE); ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val, val & BIT(0), POLL_PERIOD, POLL_TIMEOUT); if (ret) return 0; shaper_vft = readl(qm->io_base + QM_VFT_CFG_DATA_L) | ((u64)readl(qm->io_base + QM_VFT_CFG_DATA_H) << 32); cir_b = shaper_vft & QM_SHAPER_CIR_B_MASK; cir_u = shaper_vft & QM_SHAPER_CIR_U_MASK; cir_u = cir_u >> QM_SHAPER_FACTOR_CIR_U_SHIFT; cir_s = shaper_vft & QM_SHAPER_CIR_S_MASK; cir_s = cir_s >> QM_SHAPER_FACTOR_CIR_S_SHIFT; ir_calc = acc_shaper_para_calc(cir_b, cir_u, cir_s); ir = qm->factor[fun_index].func_qos * QM_QOS_RATE; error_rate = QM_QOS_EXPAND_RATE * (u32)abs(ir_calc - ir) / ir; if (error_rate > QM_QOS_MIN_ERROR_RATE) { pci_err(qm->pdev, "error_rate: %u, get function qos is error!\n", error_rate); return 0; } return ir; } static void qm_vf_get_qos(struct hisi_qm *qm, u32 fun_num) { struct device *dev = &qm->pdev->dev; u64 mb_cmd; u32 qos; int ret; qos = qm_get_shaper_vft_qos(qm, fun_num); if (!qos) { dev_err(dev, "function(%u) failed to get qos by PF!\n", fun_num); return; } mb_cmd = QM_PF_SET_QOS | (u64)qos << QM_MB_CMD_DATA_SHIFT; ret = qm_ping_single_vf(qm, mb_cmd, fun_num); if (ret) dev_err(dev, "failed to send cmd to VF(%u)!\n", fun_num); } static int qm_vf_read_qos(struct hisi_qm *qm) { int cnt = 0; int ret = -EINVAL; /* reset mailbox qos val */ qm->mb_qos = 0; /* vf ping pf to get function qos */ ret = qm_ping_pf(qm, QM_VF_GET_QOS); if (ret) { pci_err(qm->pdev, "failed to send cmd to PF to get qos!\n"); return ret; } while (true) { msleep(QM_WAIT_DST_ACK); if (qm->mb_qos) break; if (++cnt > QM_MAX_VF_WAIT_COUNT) { pci_err(qm->pdev, "PF ping VF timeout!\n"); return -ETIMEDOUT; } } return ret; } static ssize_t qm_algqos_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct hisi_qm *qm = filp->private_data; char tbuf[QM_DBG_READ_LEN]; u32 qos_val, ir; int ret; ret = hisi_qm_get_dfx_access(qm); if (ret) return ret; /* Mailbox and reset cannot be operated at the same time */ if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { pci_err(qm->pdev, "dev resetting, read alg qos failed!\n"); ret = -EAGAIN; goto err_put_dfx_access; } if (qm->fun_type == QM_HW_PF) { ir = qm_get_shaper_vft_qos(qm, 0); } else { ret = qm_vf_read_qos(qm); if (ret) goto err_get_status; ir = qm->mb_qos; } qos_val = ir / QM_QOS_RATE; ret = scnprintf(tbuf, QM_DBG_READ_LEN, "%u\n", qos_val); ret = simple_read_from_buffer(buf, count, pos, tbuf, ret); err_get_status: clear_bit(QM_RESETTING, &qm->misc_ctl); err_put_dfx_access: hisi_qm_put_dfx_access(qm); return ret; } static ssize_t qm_get_qos_value(struct hisi_qm *qm, const char *buf, unsigned long *val, unsigned int *fun_index) { const struct bus_type *bus_type = qm->pdev->dev.bus; char tbuf_bdf[QM_DBG_READ_LEN] = {0}; char val_buf[QM_DBG_READ_LEN] = {0}; struct pci_dev *pdev; struct device *dev; int ret; ret = sscanf(buf, "%s %s", tbuf_bdf, val_buf); if (ret != QM_QOS_PARAM_NUM) return -EINVAL; ret = kstrtoul(val_buf, 10, val); if (ret || *val == 0 || *val > QM_QOS_MAX_VAL) { pci_err(qm->pdev, "input qos value is error, please set 1~1000!\n"); return -EINVAL; } dev = bus_find_device_by_name(bus_type, NULL, tbuf_bdf); if (!dev) { pci_err(qm->pdev, "input pci bdf number is error!\n"); return -ENODEV; } pdev = container_of(dev, struct pci_dev, dev); *fun_index = pdev->devfn; return 0; } static ssize_t qm_algqos_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct hisi_qm *qm = filp->private_data; char tbuf[QM_DBG_READ_LEN]; unsigned int fun_index; unsigned long val; int len, ret; if (*pos != 0) return 0; if (count >= QM_DBG_READ_LEN) return -ENOSPC; len = simple_write_to_buffer(tbuf, QM_DBG_READ_LEN - 1, pos, buf, count); if (len < 0) return len; tbuf[len] = '\0'; ret = qm_get_qos_value(qm, tbuf, &val, &fun_index); if (ret) return ret; /* Mailbox and reset cannot be operated at the same time */ if (test_and_set_bit(QM_RESETTING, &qm->misc_ctl)) { pci_err(qm->pdev, "dev resetting, write alg qos failed!\n"); return -EAGAIN; } ret = qm_pm_get_sync(qm); if (ret) { ret = -EINVAL; goto err_get_status; } ret = qm_func_shaper_enable(qm, fun_index, val); if (ret) { pci_err(qm->pdev, "failed to enable function shaper!\n"); ret = -EINVAL; goto err_put_sync; } pci_info(qm->pdev, "the qos value of function%u is set to %lu.\n", fun_index, val); ret = count; err_put_sync: qm_pm_put_sync(qm); err_get_status: clear_bit(QM_RESETTING, &qm->misc_ctl); return ret; } static const struct file_operations qm_algqos_fops = { .owner = THIS_MODULE, .open = simple_open, .read = qm_algqos_read, .write = qm_algqos_write, }; /** * hisi_qm_set_algqos_init() - Initialize function qos debugfs files. * @qm: The qm for which we want to add debugfs files. * * Create function qos debugfs files, VF ping PF to get function qos. */ void hisi_qm_set_algqos_init(struct hisi_qm *qm) { if (qm->fun_type == QM_HW_PF) debugfs_create_file("alg_qos", 0644, qm->debug.debug_root, qm, &qm_algqos_fops); else if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) debugfs_create_file("alg_qos", 0444, qm->debug.debug_root, qm, &qm_algqos_fops); } static void hisi_qm_init_vf_qos(struct hisi_qm *qm, int total_func) { int i; for (i = 1; i <= total_func; i++) qm->factor[i].func_qos = QM_QOS_MAX_VAL; } /** * hisi_qm_sriov_enable() - enable virtual functions * @pdev: the PCIe device * @max_vfs: the number of virtual functions to enable * * Returns the number of enabled VFs. If there are VFs enabled already or * max_vfs is more than the total number of device can be enabled, returns * failure. */ int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs) { struct hisi_qm *qm = pci_get_drvdata(pdev); int pre_existing_vfs, num_vfs, total_vfs, ret; ret = qm_pm_get_sync(qm); if (ret) return ret; total_vfs = pci_sriov_get_totalvfs(pdev); pre_existing_vfs = pci_num_vf(pdev); if (pre_existing_vfs) { pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n", pre_existing_vfs); goto err_put_sync; } if (max_vfs > total_vfs) { pci_err(pdev, "%d VFs is more than total VFs %d!\n", max_vfs, total_vfs); ret = -ERANGE; goto err_put_sync; } num_vfs = max_vfs; if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) hisi_qm_init_vf_qos(qm, num_vfs); ret = qm_vf_q_assign(qm, num_vfs); if (ret) { pci_err(pdev, "Can't assign queues for VF!\n"); goto err_put_sync; } qm->vfs_num = num_vfs; ret = pci_enable_sriov(pdev, num_vfs); if (ret) { pci_err(pdev, "Can't enable VF!\n"); qm_clear_vft_config(qm); goto err_put_sync; } pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs); return num_vfs; err_put_sync: qm_pm_put_sync(qm); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable); /** * hisi_qm_sriov_disable - disable virtual functions * @pdev: the PCI device. * @is_frozen: true when all the VFs are frozen. * * Return failure if there are VFs assigned already or VF is in used. */ int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen) { struct hisi_qm *qm = pci_get_drvdata(pdev); int ret; if (pci_vfs_assigned(pdev)) { pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n"); return -EPERM; } /* While VF is in used, SRIOV cannot be disabled. */ if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) { pci_err(pdev, "Task is using its VF!\n"); return -EBUSY; } pci_disable_sriov(pdev); ret = qm_clear_vft_config(qm); if (ret) return ret; qm_pm_put_sync(qm); return 0; } EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable); /** * hisi_qm_sriov_configure - configure the number of VFs * @pdev: The PCI device * @num_vfs: The number of VFs need enabled * * Enable SR-IOV according to num_vfs, 0 means disable. */ int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs) { if (num_vfs == 0) return hisi_qm_sriov_disable(pdev, false); else return hisi_qm_sriov_enable(pdev, num_vfs); } EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure); static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm) { u32 err_sts; if (!qm->err_ini->get_dev_hw_err_status) { dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n"); return ACC_ERR_NONE; } /* get device hardware error status */ err_sts = qm->err_ini->get_dev_hw_err_status(qm); if (err_sts) { if (err_sts & qm->err_info.ecc_2bits_mask) qm->err_status.is_dev_ecc_mbit = true; if (qm->err_ini->log_dev_hw_err) qm->err_ini->log_dev_hw_err(qm, err_sts); if (err_sts & qm->err_info.dev_reset_mask) return ACC_ERR_NEED_RESET; if (qm->err_ini->clear_dev_hw_err_status) qm->err_ini->clear_dev_hw_err_status(qm, err_sts); } return ACC_ERR_RECOVERED; } static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm) { enum acc_err_result qm_ret, dev_ret; /* log qm error */ qm_ret = qm_hw_error_handle(qm); /* log device error */ dev_ret = qm_dev_err_handle(qm); return (qm_ret == ACC_ERR_NEED_RESET || dev_ret == ACC_ERR_NEED_RESET) ? ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED; } /** * hisi_qm_dev_err_detected() - Get device and qm error status then log it. * @pdev: The PCI device which need report error. * @state: The connectivity between CPU and device. * * We register this function into PCIe AER handlers, It will report device or * qm hardware error status when error occur. */ pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev, pci_channel_state_t state) { struct hisi_qm *qm = pci_get_drvdata(pdev); enum acc_err_result ret; if (pdev->is_virtfn) return PCI_ERS_RESULT_NONE; pci_info(pdev, "PCI error detected, state(=%u)!!\n", state); if (state == pci_channel_io_perm_failure) return PCI_ERS_RESULT_DISCONNECT; ret = qm_process_dev_error(qm); if (ret == ACC_ERR_NEED_RESET) return PCI_ERS_RESULT_NEED_RESET; return PCI_ERS_RESULT_RECOVERED; } EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected); static int qm_check_req_recv(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; u32 val; if (qm->ver >= QM_HW_V3) return 0; writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID); ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, (val == ACC_VENDOR_ID_VALUE), POLL_PERIOD, POLL_TIMEOUT); if (ret) { dev_err(&pdev->dev, "Fails to read QM reg!\n"); return ret; } writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID); ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val, (val == PCI_VENDOR_ID_HUAWEI), POLL_PERIOD, POLL_TIMEOUT); if (ret) dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n"); return ret; } static int qm_set_pf_mse(struct hisi_qm *qm, bool set) { struct pci_dev *pdev = qm->pdev; u16 cmd; int i; pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (set) cmd |= PCI_COMMAND_MEMORY; else cmd &= ~PCI_COMMAND_MEMORY; pci_write_config_word(pdev, PCI_COMMAND, cmd); for (i = 0; i < MAX_WAIT_COUNTS; i++) { pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1)) return 0; udelay(1); } return -ETIMEDOUT; } static int qm_set_vf_mse(struct hisi_qm *qm, bool set) { struct pci_dev *pdev = qm->pdev; u16 sriov_ctrl; int pos; int i; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); if (set) sriov_ctrl |= PCI_SRIOV_CTRL_MSE; else sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE; pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl); for (i = 0; i < MAX_WAIT_COUNTS; i++) { pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl); if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >> ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT) return 0; udelay(1); } return -ETIMEDOUT; } static int qm_vf_reset_prepare(struct hisi_qm *qm, enum qm_stop_reason stop_reason) { struct hisi_qm_list *qm_list = qm->qm_list; struct pci_dev *pdev = qm->pdev; struct pci_dev *virtfn; struct hisi_qm *vf_qm; int ret = 0; mutex_lock(&qm_list->lock); list_for_each_entry(vf_qm, &qm_list->list, list) { virtfn = vf_qm->pdev; if (virtfn == pdev) continue; if (pci_physfn(virtfn) == pdev) { /* save VFs PCIE BAR configuration */ pci_save_state(virtfn); ret = hisi_qm_stop(vf_qm, stop_reason); if (ret) goto stop_fail; } } stop_fail: mutex_unlock(&qm_list->lock); return ret; } static int qm_try_stop_vfs(struct hisi_qm *qm, u64 cmd, enum qm_stop_reason stop_reason) { struct pci_dev *pdev = qm->pdev; int ret; if (!qm->vfs_num) return 0; /* Kunpeng930 supports to notify VFs to stop before PF reset */ if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { ret = qm_ping_all_vfs(qm, cmd); if (ret) pci_err(pdev, "failed to send cmd to all VFs before PF reset!\n"); } else { ret = qm_vf_reset_prepare(qm, stop_reason); if (ret) pci_err(pdev, "failed to prepare reset, ret = %d.\n", ret); } return ret; } static int qm_controller_reset_prepare(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; ret = qm_reset_prepare_ready(qm); if (ret) { pci_err(pdev, "Controller reset not ready!\n"); return ret; } /* PF obtains the information of VF by querying the register. */ qm_cmd_uninit(qm); /* Whether VFs stop successfully, soft reset will continue. */ ret = qm_try_stop_vfs(qm, QM_PF_SRST_PREPARE, QM_SOFT_RESET); if (ret) pci_err(pdev, "failed to stop vfs by pf in soft reset.\n"); ret = hisi_qm_stop(qm, QM_SOFT_RESET); if (ret) { pci_err(pdev, "Fails to stop QM!\n"); qm_reset_bit_clear(qm); return ret; } if (qm->use_sva) { ret = qm_hw_err_isolate(qm); if (ret) pci_err(pdev, "failed to isolate hw err!\n"); } ret = qm_wait_vf_prepare_finish(qm); if (ret) pci_err(pdev, "failed to stop by vfs in soft reset!\n"); clear_bit(QM_RST_SCHED, &qm->misc_ctl); return 0; } static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm) { u32 nfe_enb = 0; /* Kunpeng930 hardware automatically close master ooo when NFE occurs */ if (qm->ver >= QM_HW_V3) return; if (!qm->err_status.is_dev_ecc_mbit && qm->err_status.is_qm_ecc_mbit && qm->err_ini->close_axi_master_ooo) { qm->err_ini->close_axi_master_ooo(qm); } else if (qm->err_status.is_dev_ecc_mbit && !qm->err_status.is_qm_ecc_mbit && !qm->err_ini->close_axi_master_ooo) { nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE); writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE, qm->io_base + QM_RAS_NFE_ENABLE); writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET); } } static int qm_soft_reset(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; u32 val; /* Ensure all doorbells and mailboxes received by QM */ ret = qm_check_req_recv(qm); if (ret) return ret; if (qm->vfs_num) { ret = qm_set_vf_mse(qm, false); if (ret) { pci_err(pdev, "Fails to disable vf MSE bit.\n"); return ret; } } ret = qm->ops->set_msi(qm, false); if (ret) { pci_err(pdev, "Fails to disable PEH MSI bit.\n"); return ret; } qm_dev_ecc_mbit_handle(qm); /* OOO register set and check */ writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); /* If bus lock, reset chip */ ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, val, (val == ACC_MASTER_TRANS_RETURN_RW), POLL_PERIOD, POLL_TIMEOUT); if (ret) { pci_emerg(pdev, "Bus lock! Please reset system.\n"); return ret; } if (qm->err_ini->close_sva_prefetch) qm->err_ini->close_sva_prefetch(qm); ret = qm_set_pf_mse(qm, false); if (ret) { pci_err(pdev, "Fails to disable pf MSE bit.\n"); return ret; } /* The reset related sub-control registers are not in PCI BAR */ if (ACPI_HANDLE(&pdev->dev)) { unsigned long long value = 0; acpi_status s; s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev), qm->err_info.acpi_rst, NULL, &value); if (ACPI_FAILURE(s)) { pci_err(pdev, "NO controller reset method!\n"); return -EIO; } if (value) { pci_err(pdev, "Reset step %llu failed!\n", value); return -EIO; } } else { pci_err(pdev, "No reset method!\n"); return -EINVAL; } return 0; } static int qm_vf_reset_done(struct hisi_qm *qm) { struct hisi_qm_list *qm_list = qm->qm_list; struct pci_dev *pdev = qm->pdev; struct pci_dev *virtfn; struct hisi_qm *vf_qm; int ret = 0; mutex_lock(&qm_list->lock); list_for_each_entry(vf_qm, &qm_list->list, list) { virtfn = vf_qm->pdev; if (virtfn == pdev) continue; if (pci_physfn(virtfn) == pdev) { /* enable VFs PCIE BAR configuration */ pci_restore_state(virtfn); ret = qm_restart(vf_qm); if (ret) goto restart_fail; } } restart_fail: mutex_unlock(&qm_list->lock); return ret; } static int qm_try_start_vfs(struct hisi_qm *qm, enum qm_mb_cmd cmd) { struct pci_dev *pdev = qm->pdev; int ret; if (!qm->vfs_num) return 0; ret = qm_vf_q_assign(qm, qm->vfs_num); if (ret) { pci_err(pdev, "failed to assign VFs, ret = %d.\n", ret); return ret; } /* Kunpeng930 supports to notify VFs to start after PF reset. */ if (test_bit(QM_SUPPORT_MB_COMMAND, &qm->caps)) { ret = qm_ping_all_vfs(qm, cmd); if (ret) pci_warn(pdev, "failed to send cmd to all VFs after PF reset!\n"); } else { ret = qm_vf_reset_done(qm); if (ret) pci_warn(pdev, "failed to start vfs, ret = %d.\n", ret); } return ret; } static int qm_dev_hw_init(struct hisi_qm *qm) { return qm->err_ini->hw_init(qm); } static void qm_restart_prepare(struct hisi_qm *qm) { u32 value; if (qm->err_ini->open_sva_prefetch) qm->err_ini->open_sva_prefetch(qm); if (qm->ver >= QM_HW_V3) return; if (!qm->err_status.is_qm_ecc_mbit && !qm->err_status.is_dev_ecc_mbit) return; /* temporarily close the OOO port used for PEH to write out MSI */ value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); writel(value & ~qm->err_info.msi_wr_port, qm->io_base + ACC_AM_CFG_PORT_WR_EN); /* clear dev ecc 2bit error source if having */ value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask; if (value && qm->err_ini->clear_dev_hw_err_status) qm->err_ini->clear_dev_hw_err_status(qm, value); /* clear QM ecc mbit error source */ writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE); /* clear AM Reorder Buffer ecc mbit source */ writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS); } static void qm_restart_done(struct hisi_qm *qm) { u32 value; if (qm->ver >= QM_HW_V3) goto clear_flags; if (!qm->err_status.is_qm_ecc_mbit && !qm->err_status.is_dev_ecc_mbit) return; /* open the OOO port for PEH to write out MSI */ value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN); value |= qm->err_info.msi_wr_port; writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN); clear_flags: qm->err_status.is_qm_ecc_mbit = false; qm->err_status.is_dev_ecc_mbit = false; } static int qm_controller_reset_done(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; ret = qm->ops->set_msi(qm, true); if (ret) { pci_err(pdev, "Fails to enable PEH MSI bit!\n"); return ret; } ret = qm_set_pf_mse(qm, true); if (ret) { pci_err(pdev, "Fails to enable pf MSE bit!\n"); return ret; } if (qm->vfs_num) { ret = qm_set_vf_mse(qm, true); if (ret) { pci_err(pdev, "Fails to enable vf MSE bit!\n"); return ret; } } ret = qm_dev_hw_init(qm); if (ret) { pci_err(pdev, "Failed to init device\n"); return ret; } qm_restart_prepare(qm); hisi_qm_dev_err_init(qm); if (qm->err_ini->open_axi_master_ooo) qm->err_ini->open_axi_master_ooo(qm); ret = qm_dev_mem_reset(qm); if (ret) { pci_err(pdev, "failed to reset device memory\n"); return ret; } ret = qm_restart(qm); if (ret) { pci_err(pdev, "Failed to start QM!\n"); return ret; } ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); if (ret) pci_err(pdev, "failed to start vfs by pf in soft reset.\n"); ret = qm_wait_vf_prepare_finish(qm); if (ret) pci_err(pdev, "failed to start by vfs in soft reset!\n"); qm_cmd_init(qm); qm_restart_done(qm); qm_reset_bit_clear(qm); return 0; } static int qm_controller_reset(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; pci_info(pdev, "Controller resetting...\n"); ret = qm_controller_reset_prepare(qm); if (ret) { hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); clear_bit(QM_RST_SCHED, &qm->misc_ctl); return ret; } hisi_qm_show_last_dfx_regs(qm); if (qm->err_ini->show_last_dfx_regs) qm->err_ini->show_last_dfx_regs(qm); ret = qm_soft_reset(qm); if (ret) goto err_reset; ret = qm_controller_reset_done(qm); if (ret) goto err_reset; pci_info(pdev, "Controller reset complete\n"); return 0; err_reset: pci_err(pdev, "Controller reset failed (%d)\n", ret); qm_reset_bit_clear(qm); /* if resetting fails, isolate the device */ if (qm->use_sva) qm->isolate_data.is_isolate = true; return ret; } /** * hisi_qm_dev_slot_reset() - slot reset * @pdev: the PCIe device * * This function offers QM relate PCIe device reset interface. Drivers which * use QM can use this function as slot_reset in its struct pci_error_handlers. */ pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev) { struct hisi_qm *qm = pci_get_drvdata(pdev); int ret; if (pdev->is_virtfn) return PCI_ERS_RESULT_RECOVERED; /* reset pcie device controller */ ret = qm_controller_reset(qm); if (ret) { pci_err(pdev, "Controller reset failed (%d)\n", ret); return PCI_ERS_RESULT_DISCONNECT; } return PCI_ERS_RESULT_RECOVERED; } EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset); void hisi_qm_reset_prepare(struct pci_dev *pdev) { struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); struct hisi_qm *qm = pci_get_drvdata(pdev); u32 delay = 0; int ret; hisi_qm_dev_err_uninit(pf_qm); /* * Check whether there is an ECC mbit error, If it occurs, need to * wait for soft reset to fix it. */ while (qm_check_dev_error(pf_qm)) { msleep(++delay); if (delay > QM_RESET_WAIT_TIMEOUT) return; } ret = qm_reset_prepare_ready(qm); if (ret) { pci_err(pdev, "FLR not ready!\n"); return; } /* PF obtains the information of VF by querying the register. */ if (qm->fun_type == QM_HW_PF) qm_cmd_uninit(qm); ret = qm_try_stop_vfs(qm, QM_PF_FLR_PREPARE, QM_DOWN); if (ret) pci_err(pdev, "failed to stop vfs by pf in FLR.\n"); ret = hisi_qm_stop(qm, QM_DOWN); if (ret) { pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret); hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); return; } ret = qm_wait_vf_prepare_finish(qm); if (ret) pci_err(pdev, "failed to stop by vfs in FLR!\n"); pci_info(pdev, "FLR resetting...\n"); } EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare); static bool qm_flr_reset_complete(struct pci_dev *pdev) { struct pci_dev *pf_pdev = pci_physfn(pdev); struct hisi_qm *qm = pci_get_drvdata(pf_pdev); u32 id; pci_read_config_dword(qm->pdev, PCI_COMMAND, &id); if (id == QM_PCI_COMMAND_INVALID) { pci_err(pdev, "Device can not be used!\n"); return false; } return true; } void hisi_qm_reset_done(struct pci_dev *pdev) { struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev)); struct hisi_qm *qm = pci_get_drvdata(pdev); int ret; if (qm->fun_type == QM_HW_PF) { ret = qm_dev_hw_init(qm); if (ret) { pci_err(pdev, "Failed to init PF, ret = %d.\n", ret); goto flr_done; } } hisi_qm_dev_err_init(pf_qm); ret = qm_restart(qm); if (ret) { pci_err(pdev, "Failed to start QM, ret = %d.\n", ret); goto flr_done; } ret = qm_try_start_vfs(qm, QM_PF_RESET_DONE); if (ret) pci_err(pdev, "failed to start vfs by pf in FLR.\n"); ret = qm_wait_vf_prepare_finish(qm); if (ret) pci_err(pdev, "failed to start by vfs in FLR!\n"); flr_done: if (qm->fun_type == QM_HW_PF) qm_cmd_init(qm); if (qm_flr_reset_complete(pdev)) pci_info(pdev, "FLR reset complete\n"); qm_reset_bit_clear(qm); } EXPORT_SYMBOL_GPL(hisi_qm_reset_done); static irqreturn_t qm_abnormal_irq(int irq, void *data) { struct hisi_qm *qm = data; enum acc_err_result ret; atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt); ret = qm_process_dev_error(qm); if (ret == ACC_ERR_NEED_RESET && !test_bit(QM_DRIVER_REMOVING, &qm->misc_ctl) && !test_and_set_bit(QM_RST_SCHED, &qm->misc_ctl)) schedule_work(&qm->rst_work); return IRQ_HANDLED; } /** * hisi_qm_dev_shutdown() - Shutdown device. * @pdev: The device will be shutdown. * * This function will stop qm when OS shutdown or rebooting. */ void hisi_qm_dev_shutdown(struct pci_dev *pdev) { struct hisi_qm *qm = pci_get_drvdata(pdev); int ret; ret = hisi_qm_stop(qm, QM_DOWN); if (ret) dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n"); hisi_qm_cache_wb(qm); } EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown); static void hisi_qm_controller_reset(struct work_struct *rst_work) { struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work); int ret; ret = qm_pm_get_sync(qm); if (ret) { clear_bit(QM_RST_SCHED, &qm->misc_ctl); return; } /* reset pcie device controller */ ret = qm_controller_reset(qm); if (ret) dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret); qm_pm_put_sync(qm); } static void qm_pf_reset_vf_prepare(struct hisi_qm *qm, enum qm_stop_reason stop_reason) { enum qm_mb_cmd cmd = QM_VF_PREPARE_DONE; struct pci_dev *pdev = qm->pdev; int ret; ret = qm_reset_prepare_ready(qm); if (ret) { dev_err(&pdev->dev, "reset prepare not ready!\n"); atomic_set(&qm->status.flags, QM_STOP); cmd = QM_VF_PREPARE_FAIL; goto err_prepare; } ret = hisi_qm_stop(qm, stop_reason); if (ret) { dev_err(&pdev->dev, "failed to stop QM, ret = %d.\n", ret); atomic_set(&qm->status.flags, QM_STOP); cmd = QM_VF_PREPARE_FAIL; goto err_prepare; } else { goto out; } err_prepare: hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET); hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET); out: pci_save_state(pdev); ret = qm_ping_pf(qm, cmd); if (ret) dev_warn(&pdev->dev, "PF responds timeout in reset prepare!\n"); } static void qm_pf_reset_vf_done(struct hisi_qm *qm) { enum qm_mb_cmd cmd = QM_VF_START_DONE; struct pci_dev *pdev = qm->pdev; int ret; pci_restore_state(pdev); ret = hisi_qm_start(qm); if (ret) { dev_err(&pdev->dev, "failed to start QM, ret = %d.\n", ret); cmd = QM_VF_START_FAIL; } qm_cmd_init(qm); ret = qm_ping_pf(qm, cmd); if (ret) dev_warn(&pdev->dev, "PF responds timeout in reset done!\n"); qm_reset_bit_clear(qm); } static int qm_wait_pf_reset_finish(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; u32 val, cmd; u64 msg; int ret; /* Wait for reset to finish */ ret = readl_relaxed_poll_timeout(qm->io_base + QM_IFC_INT_SOURCE_V, val, val == BIT(0), QM_VF_RESET_WAIT_US, QM_VF_RESET_WAIT_TIMEOUT_US); /* hardware completion status should be available by this time */ if (ret) { dev_err(dev, "couldn't get reset done status from PF, timeout!\n"); return -ETIMEDOUT; } /* * Whether message is got successfully, * VF needs to ack PF by clearing the interrupt. */ ret = qm_get_mb_cmd(qm, &msg, 0); qm_clear_cmd_interrupt(qm, 0); if (ret) { dev_err(dev, "failed to get msg from PF in reset done!\n"); return ret; } cmd = msg & QM_MB_CMD_DATA_MASK; if (cmd != QM_PF_RESET_DONE) { dev_err(dev, "the cmd(%u) is not reset done!\n", cmd); ret = -EINVAL; } return ret; } static void qm_pf_reset_vf_process(struct hisi_qm *qm, enum qm_stop_reason stop_reason) { struct device *dev = &qm->pdev->dev; int ret; dev_info(dev, "device reset start...\n"); /* The message is obtained by querying the register during resetting */ qm_cmd_uninit(qm); qm_pf_reset_vf_prepare(qm, stop_reason); ret = qm_wait_pf_reset_finish(qm); if (ret) goto err_get_status; qm_pf_reset_vf_done(qm); dev_info(dev, "device reset done.\n"); return; err_get_status: qm_cmd_init(qm); qm_reset_bit_clear(qm); } static void qm_handle_cmd_msg(struct hisi_qm *qm, u32 fun_num) { struct device *dev = &qm->pdev->dev; u64 msg; u32 cmd; int ret; /* * Get the msg from source by sending mailbox. Whether message is got * successfully, destination needs to ack source by clearing the interrupt. */ ret = qm_get_mb_cmd(qm, &msg, fun_num); qm_clear_cmd_interrupt(qm, BIT(fun_num)); if (ret) { dev_err(dev, "failed to get msg from source!\n"); return; } cmd = msg & QM_MB_CMD_DATA_MASK; switch (cmd) { case QM_PF_FLR_PREPARE: qm_pf_reset_vf_process(qm, QM_DOWN); break; case QM_PF_SRST_PREPARE: qm_pf_reset_vf_process(qm, QM_SOFT_RESET); break; case QM_VF_GET_QOS: qm_vf_get_qos(qm, fun_num); break; case QM_PF_SET_QOS: qm->mb_qos = msg >> QM_MB_CMD_DATA_SHIFT; break; default: dev_err(dev, "unsupported cmd %u sent by function(%u)!\n", cmd, fun_num); break; } } static void qm_cmd_process(struct work_struct *cmd_process) { struct hisi_qm *qm = container_of(cmd_process, struct hisi_qm, cmd_process); u32 vfs_num = qm->vfs_num; u64 val; u32 i; if (qm->fun_type == QM_HW_PF) { val = readq(qm->io_base + QM_IFC_INT_SOURCE_P); if (!val) return; for (i = 1; i <= vfs_num; i++) { if (val & BIT(i)) qm_handle_cmd_msg(qm, i); } return; } qm_handle_cmd_msg(qm, 0); } /** * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list. * @qm: The qm needs add. * @qm_list: The qm list. * * This function adds qm to qm list, and will register algorithm to * crypto when the qm list is empty. */ int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list) { struct device *dev = &qm->pdev->dev; int flag = 0; int ret = 0; mutex_lock(&qm_list->lock); if (list_empty(&qm_list->list)) flag = 1; list_add_tail(&qm->list, &qm_list->list); mutex_unlock(&qm_list->lock); if (qm->ver <= QM_HW_V2 && qm->use_sva) { dev_info(dev, "HW V2 not both use uacce sva mode and hardware crypto algs.\n"); return 0; } if (flag) { ret = qm_list->register_to_crypto(qm); if (ret) { mutex_lock(&qm_list->lock); list_del(&qm->list); mutex_unlock(&qm_list->lock); } } return ret; } EXPORT_SYMBOL_GPL(hisi_qm_alg_register); /** * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from * qm list. * @qm: The qm needs delete. * @qm_list: The qm list. * * This function deletes qm from qm list, and will unregister algorithm * from crypto when the qm list is empty. */ void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list) { mutex_lock(&qm_list->lock); list_del(&qm->list); mutex_unlock(&qm_list->lock); if (qm->ver <= QM_HW_V2 && qm->use_sva) return; if (list_empty(&qm_list->list)) qm_list->unregister_from_crypto(qm); } EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister); static void qm_unregister_abnormal_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; u32 irq_vector, val; if (qm->fun_type == QM_HW_VF) return; val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) return; irq_vector = val & QM_IRQ_VECTOR_MASK; free_irq(pci_irq_vector(pdev, irq_vector), qm); } static int qm_register_abnormal_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; u32 irq_vector, val; int ret; if (qm->fun_type == QM_HW_VF) return 0; val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_ABN_IRQ_TYPE_CAP, qm->cap_ver); if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_ABN_IRQ_TYPE_MASK)) return 0; irq_vector = val & QM_IRQ_VECTOR_MASK; ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_abnormal_irq, 0, qm->dev_name, qm); if (ret) dev_err(&qm->pdev->dev, "failed to request abnormal irq, ret = %d", ret); return ret; } static void qm_unregister_mb_cmd_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; u32 irq_vector, val; val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return; irq_vector = val & QM_IRQ_VECTOR_MASK; free_irq(pci_irq_vector(pdev, irq_vector), qm); } static int qm_register_mb_cmd_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; u32 irq_vector, val; int ret; val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_PF2VF_IRQ_TYPE_CAP, qm->cap_ver); if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return 0; irq_vector = val & QM_IRQ_VECTOR_MASK; ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_mb_cmd_irq, 0, qm->dev_name, qm); if (ret) dev_err(&pdev->dev, "failed to request function communication irq, ret = %d", ret); return ret; } static void qm_unregister_aeq_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; u32 irq_vector, val; val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return; irq_vector = val & QM_IRQ_VECTOR_MASK; free_irq(pci_irq_vector(pdev, irq_vector), qm); } static int qm_register_aeq_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; u32 irq_vector, val; int ret; val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_AEQ_IRQ_TYPE_CAP, qm->cap_ver); if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return 0; irq_vector = val & QM_IRQ_VECTOR_MASK; ret = request_threaded_irq(pci_irq_vector(pdev, irq_vector), qm_aeq_irq, qm_aeq_thread, 0, qm->dev_name, qm); if (ret) dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); return ret; } static void qm_unregister_eq_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; u32 irq_vector, val; val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return; irq_vector = val & QM_IRQ_VECTOR_MASK; free_irq(pci_irq_vector(pdev, irq_vector), qm); } static int qm_register_eq_irq(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; u32 irq_vector, val; int ret; val = hisi_qm_get_hw_info(qm, qm_basic_info, QM_EQ_IRQ_TYPE_CAP, qm->cap_ver); if (!((val >> QM_IRQ_TYPE_SHIFT) & QM_IRQ_TYPE_MASK)) return 0; irq_vector = val & QM_IRQ_VECTOR_MASK; ret = request_irq(pci_irq_vector(pdev, irq_vector), qm_eq_irq, 0, qm->dev_name, qm); if (ret) dev_err(&pdev->dev, "failed to request eq irq, ret = %d", ret); return ret; } static void qm_irqs_unregister(struct hisi_qm *qm) { qm_unregister_mb_cmd_irq(qm); qm_unregister_abnormal_irq(qm); qm_unregister_aeq_irq(qm); qm_unregister_eq_irq(qm); } static int qm_irqs_register(struct hisi_qm *qm) { int ret; ret = qm_register_eq_irq(qm); if (ret) return ret; ret = qm_register_aeq_irq(qm); if (ret) goto free_eq_irq; ret = qm_register_abnormal_irq(qm); if (ret) goto free_aeq_irq; ret = qm_register_mb_cmd_irq(qm); if (ret) goto free_abnormal_irq; return 0; free_abnormal_irq: qm_unregister_abnormal_irq(qm); free_aeq_irq: qm_unregister_aeq_irq(qm); free_eq_irq: qm_unregister_eq_irq(qm); return ret; } static int qm_get_qp_num(struct hisi_qm *qm) { bool is_db_isolation; /* VF's qp_num assigned by PF in v2, and VF can get qp_num by vft. */ if (qm->fun_type == QM_HW_VF) { if (qm->ver != QM_HW_V1) /* v2 starts to support get vft by mailbox */ return hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num); return 0; } is_db_isolation = test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); qm->ctrl_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_TOTAL_QP_NUM_CAP, true); qm->max_qp_num = hisi_qm_get_hw_info(qm, qm_basic_info, QM_FUNC_MAX_QP_CAP, is_db_isolation); /* check if qp number is valid */ if (qm->qp_num > qm->max_qp_num) { dev_err(&qm->pdev->dev, "qp num(%u) is more than max qp num(%u)!\n", qm->qp_num, qm->max_qp_num); return -EINVAL; } return 0; } static void qm_get_hw_caps(struct hisi_qm *qm) { const struct hisi_qm_cap_info *cap_info = qm->fun_type == QM_HW_PF ? qm_cap_info_pf : qm_cap_info_vf; u32 size = qm->fun_type == QM_HW_PF ? ARRAY_SIZE(qm_cap_info_pf) : ARRAY_SIZE(qm_cap_info_vf); u32 val, i; /* Doorbell isolate register is a independent register. */ val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, QM_SUPPORT_DB_ISOLATION, true); if (val) set_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps); if (qm->ver >= QM_HW_V3) { val = readl(qm->io_base + QM_FUNC_CAPS_REG); qm->cap_ver = val & QM_CAPBILITY_VERSION; } /* Get PF/VF common capbility */ for (i = 1; i < ARRAY_SIZE(qm_cap_info_comm); i++) { val = hisi_qm_get_hw_info(qm, qm_cap_info_comm, i, qm->cap_ver); if (val) set_bit(qm_cap_info_comm[i].type, &qm->caps); } /* Get PF/VF different capbility */ for (i = 0; i < size; i++) { val = hisi_qm_get_hw_info(qm, cap_info, i, qm->cap_ver); if (val) set_bit(cap_info[i].type, &qm->caps); } } static int qm_get_pci_res(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; struct device *dev = &pdev->dev; int ret; ret = pci_request_mem_regions(pdev, qm->dev_name); if (ret < 0) { dev_err(dev, "Failed to request mem regions!\n"); return ret; } qm->phys_base = pci_resource_start(pdev, PCI_BAR_2); qm->io_base = ioremap(qm->phys_base, pci_resource_len(pdev, PCI_BAR_2)); if (!qm->io_base) { ret = -EIO; goto err_request_mem_regions; } qm_get_hw_caps(qm); if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) { qm->db_interval = QM_QP_DB_INTERVAL; qm->db_phys_base = pci_resource_start(pdev, PCI_BAR_4); qm->db_io_base = ioremap(qm->db_phys_base, pci_resource_len(pdev, PCI_BAR_4)); if (!qm->db_io_base) { ret = -EIO; goto err_ioremap; } } else { qm->db_phys_base = qm->phys_base; qm->db_io_base = qm->io_base; qm->db_interval = 0; } ret = qm_get_qp_num(qm); if (ret) goto err_db_ioremap; return 0; err_db_ioremap: if (test_bit(QM_SUPPORT_DB_ISOLATION, &qm->caps)) iounmap(qm->db_io_base); err_ioremap: iounmap(qm->io_base); err_request_mem_regions: pci_release_mem_regions(pdev); return ret; } static int hisi_qm_pci_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; struct device *dev = &pdev->dev; unsigned int num_vec; int ret; ret = pci_enable_device_mem(pdev); if (ret < 0) { dev_err(dev, "Failed to enable device mem!\n"); return ret; } ret = qm_get_pci_res(qm); if (ret) goto err_disable_pcidev; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret < 0) goto err_get_pci_res; pci_set_master(pdev); num_vec = qm_get_irq_num(qm); ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI); if (ret < 0) { dev_err(dev, "Failed to enable MSI vectors!\n"); goto err_get_pci_res; } return 0; err_get_pci_res: qm_put_pci_res(qm); err_disable_pcidev: pci_disable_device(pdev); return ret; } static int hisi_qm_init_work(struct hisi_qm *qm) { int i; for (i = 0; i < qm->qp_num; i++) INIT_WORK(&qm->poll_data[i].work, qm_work_process); if (qm->fun_type == QM_HW_PF) INIT_WORK(&qm->rst_work, hisi_qm_controller_reset); if (qm->ver > QM_HW_V2) INIT_WORK(&qm->cmd_process, qm_cmd_process); qm->wq = alloc_workqueue("%s", WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, num_online_cpus(), pci_name(qm->pdev)); if (!qm->wq) { pci_err(qm->pdev, "failed to alloc workqueue!\n"); return -ENOMEM; } return 0; } static int hisi_qp_alloc_memory(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; u16 sq_depth, cq_depth; size_t qp_dma_size; int i, ret; qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL); if (!qm->qp_array) return -ENOMEM; qm->poll_data = kcalloc(qm->qp_num, sizeof(struct hisi_qm_poll_data), GFP_KERNEL); if (!qm->poll_data) { kfree(qm->qp_array); return -ENOMEM; } qm_get_xqc_depth(qm, &sq_depth, &cq_depth, QM_QP_DEPTH_CAP); /* one more page for device or qp statuses */ qp_dma_size = qm->sqe_size * sq_depth + sizeof(struct qm_cqe) * cq_depth; qp_dma_size = PAGE_ALIGN(qp_dma_size) + PAGE_SIZE; for (i = 0; i < qm->qp_num; i++) { qm->poll_data[i].qm = qm; ret = hisi_qp_memory_init(qm, qp_dma_size, i, sq_depth, cq_depth); if (ret) goto err_init_qp_mem; dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size); } return 0; err_init_qp_mem: hisi_qp_memory_uninit(qm, i); return ret; } static int hisi_qm_memory_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; int ret, total_func; size_t off = 0; if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) { total_func = pci_sriov_get_totalvfs(qm->pdev) + 1; qm->factor = kcalloc(total_func, sizeof(struct qm_shaper_factor), GFP_KERNEL); if (!qm->factor) return -ENOMEM; /* Only the PF value needs to be initialized */ qm->factor[0].func_qos = QM_QOS_MAX_VAL; } #define QM_INIT_BUF(qm, type, num) do { \ (qm)->type = ((qm)->qdma.va + (off)); \ (qm)->type##_dma = (qm)->qdma.dma + (off); \ off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \ } while (0) idr_init(&qm->qp_idr); qm_get_xqc_depth(qm, &qm->eq_depth, &qm->aeq_depth, QM_XEQ_DEPTH_CAP); qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * qm->eq_depth) + QMC_ALIGN(sizeof(struct qm_aeqe) * qm->aeq_depth) + QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) + QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num); qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma, GFP_ATOMIC); dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size); if (!qm->qdma.va) { ret = -ENOMEM; goto err_destroy_idr; } QM_INIT_BUF(qm, eqe, qm->eq_depth); QM_INIT_BUF(qm, aeqe, qm->aeq_depth); QM_INIT_BUF(qm, sqc, qm->qp_num); QM_INIT_BUF(qm, cqc, qm->qp_num); ret = hisi_qp_alloc_memory(qm); if (ret) goto err_alloc_qp_array; return 0; err_alloc_qp_array: dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma); err_destroy_idr: idr_destroy(&qm->qp_idr); if (test_bit(QM_SUPPORT_FUNC_QOS, &qm->caps)) kfree(qm->factor); return ret; } /** * hisi_qm_init() - Initialize configures about qm. * @qm: The qm needing init. * * This function init qm, then we can call hisi_qm_start to put qm into work. */ int hisi_qm_init(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; struct device *dev = &pdev->dev; int ret; hisi_qm_pre_init(qm); ret = hisi_qm_pci_init(qm); if (ret) return ret; ret = qm_irqs_register(qm); if (ret) goto err_pci_init; if (qm->fun_type == QM_HW_PF) { /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); qm_disable_clock_gate(qm); ret = qm_dev_mem_reset(qm); if (ret) { dev_err(dev, "failed to reset device memory\n"); goto err_irq_register; } } if (qm->mode == UACCE_MODE_SVA) { ret = qm_alloc_uacce(qm); if (ret < 0) dev_warn(dev, "fail to alloc uacce (%d)\n", ret); } ret = hisi_qm_memory_init(qm); if (ret) goto err_alloc_uacce; ret = hisi_qm_init_work(qm); if (ret) goto err_free_qm_memory; qm_cmd_init(qm); atomic_set(&qm->status.flags, QM_INIT); return 0; err_free_qm_memory: hisi_qm_memory_uninit(qm); err_alloc_uacce: qm_remove_uacce(qm); err_irq_register: qm_irqs_unregister(qm); err_pci_init: hisi_qm_pci_uninit(qm); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_init); /** * hisi_qm_get_dfx_access() - Try to get dfx access. * @qm: pointer to accelerator device. * * Try to get dfx access, then user can get message. * * If device is in suspended, return failure, otherwise * bump up the runtime PM usage counter. */ int hisi_qm_get_dfx_access(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; if (pm_runtime_suspended(dev)) { dev_info(dev, "can not read/write - device in suspended.\n"); return -EAGAIN; } return qm_pm_get_sync(qm); } EXPORT_SYMBOL_GPL(hisi_qm_get_dfx_access); /** * hisi_qm_put_dfx_access() - Put dfx access. * @qm: pointer to accelerator device. * * Put dfx access, drop runtime PM usage counter. */ void hisi_qm_put_dfx_access(struct hisi_qm *qm) { qm_pm_put_sync(qm); } EXPORT_SYMBOL_GPL(hisi_qm_put_dfx_access); /** * hisi_qm_pm_init() - Initialize qm runtime PM. * @qm: pointer to accelerator device. * * Function that initialize qm runtime PM. */ void hisi_qm_pm_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) return; pm_runtime_set_autosuspend_delay(dev, QM_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(dev); pm_runtime_put_noidle(dev); } EXPORT_SYMBOL_GPL(hisi_qm_pm_init); /** * hisi_qm_pm_uninit() - Uninitialize qm runtime PM. * @qm: pointer to accelerator device. * * Function that uninitialize qm runtime PM. */ void hisi_qm_pm_uninit(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; if (!test_bit(QM_SUPPORT_RPM, &qm->caps)) return; pm_runtime_get_noresume(dev); pm_runtime_dont_use_autosuspend(dev); } EXPORT_SYMBOL_GPL(hisi_qm_pm_uninit); static int qm_prepare_for_suspend(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; u32 val; ret = qm->ops->set_msi(qm, false); if (ret) { pci_err(pdev, "failed to disable MSI before suspending!\n"); return ret; } /* shutdown OOO register */ writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN, qm->io_base + ACC_MASTER_GLOBAL_CTRL); ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN, val, (val == ACC_MASTER_TRANS_RETURN_RW), POLL_PERIOD, POLL_TIMEOUT); if (ret) { pci_emerg(pdev, "Bus lock! Please reset system.\n"); return ret; } ret = qm_set_pf_mse(qm, false); if (ret) pci_err(pdev, "failed to disable MSE before suspending!\n"); return ret; } static int qm_rebuild_for_resume(struct hisi_qm *qm) { struct pci_dev *pdev = qm->pdev; int ret; ret = qm_set_pf_mse(qm, true); if (ret) { pci_err(pdev, "failed to enable MSE after resuming!\n"); return ret; } ret = qm->ops->set_msi(qm, true); if (ret) { pci_err(pdev, "failed to enable MSI after resuming!\n"); return ret; } ret = qm_dev_hw_init(qm); if (ret) { pci_err(pdev, "failed to init device after resuming\n"); return ret; } qm_cmd_init(qm); hisi_qm_dev_err_init(qm); /* Set the doorbell timeout to QM_DB_TIMEOUT_CFG ns. */ writel(QM_DB_TIMEOUT_SET, qm->io_base + QM_DB_TIMEOUT_CFG); qm_disable_clock_gate(qm); ret = qm_dev_mem_reset(qm); if (ret) pci_err(pdev, "failed to reset device memory\n"); return ret; } /** * hisi_qm_suspend() - Runtime suspend of given device. * @dev: device to suspend. * * Function that suspend the device. */ int hisi_qm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct hisi_qm *qm = pci_get_drvdata(pdev); int ret; pci_info(pdev, "entering suspended state\n"); ret = hisi_qm_stop(qm, QM_NORMAL); if (ret) { pci_err(pdev, "failed to stop qm(%d)\n", ret); return ret; } ret = qm_prepare_for_suspend(qm); if (ret) pci_err(pdev, "failed to prepare suspended(%d)\n", ret); return ret; } EXPORT_SYMBOL_GPL(hisi_qm_suspend); /** * hisi_qm_resume() - Runtime resume of given device. * @dev: device to resume. * * Function that resume the device. */ int hisi_qm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct hisi_qm *qm = pci_get_drvdata(pdev); int ret; pci_info(pdev, "resuming from suspend state\n"); ret = qm_rebuild_for_resume(qm); if (ret) { pci_err(pdev, "failed to rebuild resume(%d)\n", ret); return ret; } ret = hisi_qm_start(qm); if (ret) { if (qm_check_dev_error(qm)) { pci_info(pdev, "failed to start qm due to device error, device will be reset!\n"); return 0; } pci_err(pdev, "failed to start qm(%d)!\n", ret); } return ret; } EXPORT_SYMBOL_GPL(hisi_qm_resume); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zhou Wang <[email protected]>"); MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
linux-master
drivers/crypto/hisilicon/qm.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include <crypto/akcipher.h> #include <crypto/curve25519.h> #include <crypto/dh.h> #include <crypto/ecc_curve.h> #include <crypto/ecdh.h> #include <crypto/rng.h> #include <crypto/internal/akcipher.h> #include <crypto/internal/kpp.h> #include <crypto/internal/rsa.h> #include <crypto/kpp.h> #include <crypto/scatterwalk.h> #include <linux/dma-mapping.h> #include <linux/fips.h> #include <linux/module.h> #include <linux/time.h> #include "hpre.h" struct hpre_ctx; #define HPRE_CRYPTO_ALG_PRI 1000 #define HPRE_ALIGN_SZ 64 #define HPRE_BITS_2_BYTES_SHIFT 3 #define HPRE_RSA_512BITS_KSZ 64 #define HPRE_RSA_1536BITS_KSZ 192 #define HPRE_CRT_PRMS 5 #define HPRE_CRT_Q 2 #define HPRE_CRT_P 3 #define HPRE_CRT_INV 4 #define HPRE_DH_G_FLAG 0x02 #define HPRE_TRY_SEND_TIMES 100 #define HPRE_INVLD_REQ_ID (-1) #define HPRE_SQE_ALG_BITS 5 #define HPRE_SQE_DONE_SHIFT 30 #define HPRE_DH_MAX_P_SZ 512 #define HPRE_DFX_SEC_TO_US 1000000 #define HPRE_DFX_US_TO_NS 1000 /* due to nist p521 */ #define HPRE_ECC_MAX_KSZ 66 /* size in bytes of the n prime */ #define HPRE_ECC_NIST_P192_N_SIZE 24 #define HPRE_ECC_NIST_P256_N_SIZE 32 #define HPRE_ECC_NIST_P384_N_SIZE 48 /* size in bytes */ #define HPRE_ECC_HW256_KSZ_B 32 #define HPRE_ECC_HW384_KSZ_B 48 /* capability register mask of driver */ #define HPRE_DRV_RSA_MASK_CAP BIT(0) #define HPRE_DRV_DH_MASK_CAP BIT(1) #define HPRE_DRV_ECDH_MASK_CAP BIT(2) #define HPRE_DRV_X25519_MASK_CAP BIT(5) typedef void (*hpre_cb)(struct hpre_ctx *ctx, void *sqe); struct hpre_rsa_ctx { /* low address: e--->n */ char *pubkey; dma_addr_t dma_pubkey; /* low address: d--->n */ char *prikey; dma_addr_t dma_prikey; /* low address: dq->dp->q->p->qinv */ char *crt_prikey; dma_addr_t dma_crt_prikey; struct crypto_akcipher *soft_tfm; }; struct hpre_dh_ctx { /* * If base is g we compute the public key * ya = g^xa mod p; [RFC2631 sec 2.1.1] * else if base if the counterpart public key we * compute the shared secret * ZZ = yb^xa mod p; [RFC2631 sec 2.1.1] * low address: d--->n, please refer to Hisilicon HPRE UM */ char *xa_p; dma_addr_t dma_xa_p; char *g; /* m */ dma_addr_t dma_g; }; struct hpre_ecdh_ctx { /* low address: p->a->k->b */ unsigned char *p; dma_addr_t dma_p; /* low address: x->y */ unsigned char *g; dma_addr_t dma_g; }; struct hpre_curve25519_ctx { /* low address: p->a->k */ unsigned char *p; dma_addr_t dma_p; /* gx coordinate */ unsigned char *g; dma_addr_t dma_g; }; struct hpre_ctx { struct hisi_qp *qp; struct device *dev; struct hpre_asym_request **req_list; struct hpre *hpre; spinlock_t req_lock; unsigned int key_sz; bool crt_g2_mode; struct idr req_idr; union { struct hpre_rsa_ctx rsa; struct hpre_dh_ctx dh; struct hpre_ecdh_ctx ecdh; struct hpre_curve25519_ctx curve25519; }; /* for ecc algorithms */ unsigned int curve_id; }; struct hpre_asym_request { char *src; char *dst; struct hpre_sqe req; struct hpre_ctx *ctx; union { struct akcipher_request *rsa; struct kpp_request *dh; struct kpp_request *ecdh; struct kpp_request *curve25519; } areq; int err; int req_id; hpre_cb cb; struct timespec64 req_time; }; static inline unsigned int hpre_align_sz(void) { return ((crypto_dma_align() - 1) | (HPRE_ALIGN_SZ - 1)) + 1; } static inline unsigned int hpre_align_pd(void) { return (hpre_align_sz() - 1) & ~(crypto_tfm_ctx_alignment() - 1); } static int hpre_alloc_req_id(struct hpre_ctx *ctx) { unsigned long flags; int id; spin_lock_irqsave(&ctx->req_lock, flags); id = idr_alloc(&ctx->req_idr, NULL, 0, ctx->qp->sq_depth, GFP_ATOMIC); spin_unlock_irqrestore(&ctx->req_lock, flags); return id; } static void hpre_free_req_id(struct hpre_ctx *ctx, int req_id) { unsigned long flags; spin_lock_irqsave(&ctx->req_lock, flags); idr_remove(&ctx->req_idr, req_id); spin_unlock_irqrestore(&ctx->req_lock, flags); } static int hpre_add_req_to_ctx(struct hpre_asym_request *hpre_req) { struct hpre_ctx *ctx; struct hpre_dfx *dfx; int id; ctx = hpre_req->ctx; id = hpre_alloc_req_id(ctx); if (unlikely(id < 0)) return -EINVAL; ctx->req_list[id] = hpre_req; hpre_req->req_id = id; dfx = ctx->hpre->debug.dfx; if (atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value)) ktime_get_ts64(&hpre_req->req_time); return id; } static void hpre_rm_req_from_ctx(struct hpre_asym_request *hpre_req) { struct hpre_ctx *ctx = hpre_req->ctx; int id = hpre_req->req_id; if (hpre_req->req_id >= 0) { hpre_req->req_id = HPRE_INVLD_REQ_ID; ctx->req_list[id] = NULL; hpre_free_req_id(ctx, id); } } static struct hisi_qp *hpre_get_qp_and_start(u8 type) { struct hisi_qp *qp; int ret; qp = hpre_create_qp(type); if (!qp) { pr_err("Can not create hpre qp!\n"); return ERR_PTR(-ENODEV); } ret = hisi_qm_start_qp(qp, 0); if (ret < 0) { hisi_qm_free_qps(&qp, 1); pci_err(qp->qm->pdev, "Can not start qp!\n"); return ERR_PTR(-EINVAL); } return qp; } static int hpre_get_data_dma_addr(struct hpre_asym_request *hpre_req, struct scatterlist *data, unsigned int len, int is_src, dma_addr_t *tmp) { struct device *dev = hpre_req->ctx->dev; enum dma_data_direction dma_dir; if (is_src) { hpre_req->src = NULL; dma_dir = DMA_TO_DEVICE; } else { hpre_req->dst = NULL; dma_dir = DMA_FROM_DEVICE; } *tmp = dma_map_single(dev, sg_virt(data), len, dma_dir); if (unlikely(dma_mapping_error(dev, *tmp))) { dev_err(dev, "dma map data err!\n"); return -ENOMEM; } return 0; } static int hpre_prepare_dma_buf(struct hpre_asym_request *hpre_req, struct scatterlist *data, unsigned int len, int is_src, dma_addr_t *tmp) { struct hpre_ctx *ctx = hpre_req->ctx; struct device *dev = ctx->dev; void *ptr; int shift; shift = ctx->key_sz - len; if (unlikely(shift < 0)) return -EINVAL; ptr = dma_alloc_coherent(dev, ctx->key_sz, tmp, GFP_ATOMIC); if (unlikely(!ptr)) return -ENOMEM; if (is_src) { scatterwalk_map_and_copy(ptr + shift, data, 0, len, 0); hpre_req->src = ptr; } else { hpre_req->dst = ptr; } return 0; } static int hpre_hw_data_init(struct hpre_asym_request *hpre_req, struct scatterlist *data, unsigned int len, int is_src, int is_dh) { struct hpre_sqe *msg = &hpre_req->req; struct hpre_ctx *ctx = hpre_req->ctx; dma_addr_t tmp = 0; int ret; /* when the data is dh's source, we should format it */ if ((sg_is_last(data) && len == ctx->key_sz) && ((is_dh && !is_src) || !is_dh)) ret = hpre_get_data_dma_addr(hpre_req, data, len, is_src, &tmp); else ret = hpre_prepare_dma_buf(hpre_req, data, len, is_src, &tmp); if (unlikely(ret)) return ret; if (is_src) msg->in = cpu_to_le64(tmp); else msg->out = cpu_to_le64(tmp); return 0; } static void hpre_hw_data_clr_all(struct hpre_ctx *ctx, struct hpre_asym_request *req, struct scatterlist *dst, struct scatterlist *src) { struct device *dev = ctx->dev; struct hpre_sqe *sqe = &req->req; dma_addr_t tmp; tmp = le64_to_cpu(sqe->in); if (unlikely(dma_mapping_error(dev, tmp))) return; if (src) { if (req->src) dma_free_coherent(dev, ctx->key_sz, req->src, tmp); else dma_unmap_single(dev, tmp, ctx->key_sz, DMA_TO_DEVICE); } tmp = le64_to_cpu(sqe->out); if (unlikely(dma_mapping_error(dev, tmp))) return; if (req->dst) { if (dst) scatterwalk_map_and_copy(req->dst, dst, 0, ctx->key_sz, 1); dma_free_coherent(dev, ctx->key_sz, req->dst, tmp); } else { dma_unmap_single(dev, tmp, ctx->key_sz, DMA_FROM_DEVICE); } } static int hpre_alg_res_post_hf(struct hpre_ctx *ctx, struct hpre_sqe *sqe, void **kreq) { struct hpre_asym_request *req; unsigned int err, done, alg; int id; #define HPRE_NO_HW_ERR 0 #define HPRE_HW_TASK_DONE 3 #define HREE_HW_ERR_MASK GENMASK(10, 0) #define HREE_SQE_DONE_MASK GENMASK(1, 0) #define HREE_ALG_TYPE_MASK GENMASK(4, 0) id = (int)le16_to_cpu(sqe->tag); req = ctx->req_list[id]; hpre_rm_req_from_ctx(req); *kreq = req; err = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_ALG_BITS) & HREE_HW_ERR_MASK; done = (le32_to_cpu(sqe->dw0) >> HPRE_SQE_DONE_SHIFT) & HREE_SQE_DONE_MASK; if (likely(err == HPRE_NO_HW_ERR && done == HPRE_HW_TASK_DONE)) return 0; alg = le32_to_cpu(sqe->dw0) & HREE_ALG_TYPE_MASK; dev_err_ratelimited(ctx->dev, "alg[0x%x] error: done[0x%x], etype[0x%x]\n", alg, done, err); return -EINVAL; } static int hpre_ctx_set(struct hpre_ctx *ctx, struct hisi_qp *qp, int qlen) { struct hpre *hpre; if (!ctx || !qp || qlen < 0) return -EINVAL; spin_lock_init(&ctx->req_lock); ctx->qp = qp; ctx->dev = &qp->qm->pdev->dev; hpre = container_of(ctx->qp->qm, struct hpre, qm); ctx->hpre = hpre; ctx->req_list = kcalloc(qlen, sizeof(void *), GFP_KERNEL); if (!ctx->req_list) return -ENOMEM; ctx->key_sz = 0; ctx->crt_g2_mode = false; idr_init(&ctx->req_idr); return 0; } static void hpre_ctx_clear(struct hpre_ctx *ctx, bool is_clear_all) { if (is_clear_all) { idr_destroy(&ctx->req_idr); kfree(ctx->req_list); hisi_qm_free_qps(&ctx->qp, 1); } ctx->crt_g2_mode = false; ctx->key_sz = 0; } static bool hpre_is_bd_timeout(struct hpre_asym_request *req, u64 overtime_thrhld) { struct timespec64 reply_time; u64 time_use_us; ktime_get_ts64(&reply_time); time_use_us = (reply_time.tv_sec - req->req_time.tv_sec) * HPRE_DFX_SEC_TO_US + (reply_time.tv_nsec - req->req_time.tv_nsec) / HPRE_DFX_US_TO_NS; if (time_use_us <= overtime_thrhld) return false; return true; } static void hpre_dh_cb(struct hpre_ctx *ctx, void *resp) { struct hpre_dfx *dfx = ctx->hpre->debug.dfx; struct hpre_asym_request *req; struct kpp_request *areq; u64 overtime_thrhld; int ret; ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); areq = req->areq.dh; areq->dst_len = ctx->key_sz; overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); kpp_request_complete(areq, ret); atomic64_inc(&dfx[HPRE_RECV_CNT].value); } static void hpre_rsa_cb(struct hpre_ctx *ctx, void *resp) { struct hpre_dfx *dfx = ctx->hpre->debug.dfx; struct hpre_asym_request *req; struct akcipher_request *areq; u64 overtime_thrhld; int ret; ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); areq = req->areq.rsa; areq->dst_len = ctx->key_sz; hpre_hw_data_clr_all(ctx, req, areq->dst, areq->src); akcipher_request_complete(areq, ret); atomic64_inc(&dfx[HPRE_RECV_CNT].value); } static void hpre_alg_cb(struct hisi_qp *qp, void *resp) { struct hpre_ctx *ctx = qp->qp_ctx; struct hpre_dfx *dfx = ctx->hpre->debug.dfx; struct hpre_sqe *sqe = resp; struct hpre_asym_request *req = ctx->req_list[le16_to_cpu(sqe->tag)]; if (unlikely(!req)) { atomic64_inc(&dfx[HPRE_INVALID_REQ_CNT].value); return; } req->cb(ctx, resp); } static void hpre_stop_qp_and_put(struct hisi_qp *qp) { hisi_qm_stop_qp(qp); hisi_qm_free_qps(&qp, 1); } static int hpre_ctx_init(struct hpre_ctx *ctx, u8 type) { struct hisi_qp *qp; int ret; qp = hpre_get_qp_and_start(type); if (IS_ERR(qp)) return PTR_ERR(qp); qp->qp_ctx = ctx; qp->req_cb = hpre_alg_cb; ret = hpre_ctx_set(ctx, qp, qp->sq_depth); if (ret) hpre_stop_qp_and_put(qp); return ret; } static int hpre_msg_request_set(struct hpre_ctx *ctx, void *req, bool is_rsa) { struct hpre_asym_request *h_req; struct hpre_sqe *msg; int req_id; void *tmp; if (is_rsa) { struct akcipher_request *akreq = req; if (akreq->dst_len < ctx->key_sz) { akreq->dst_len = ctx->key_sz; return -EOVERFLOW; } tmp = akcipher_request_ctx(akreq); h_req = PTR_ALIGN(tmp, hpre_align_sz()); h_req->cb = hpre_rsa_cb; h_req->areq.rsa = akreq; msg = &h_req->req; memset(msg, 0, sizeof(*msg)); } else { struct kpp_request *kreq = req; if (kreq->dst_len < ctx->key_sz) { kreq->dst_len = ctx->key_sz; return -EOVERFLOW; } tmp = kpp_request_ctx(kreq); h_req = PTR_ALIGN(tmp, hpre_align_sz()); h_req->cb = hpre_dh_cb; h_req->areq.dh = kreq; msg = &h_req->req; memset(msg, 0, sizeof(*msg)); msg->key = cpu_to_le64(ctx->dh.dma_xa_p); } msg->in = cpu_to_le64(DMA_MAPPING_ERROR); msg->out = cpu_to_le64(DMA_MAPPING_ERROR); msg->dw0 |= cpu_to_le32(0x1 << HPRE_SQE_DONE_SHIFT); msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; h_req->ctx = ctx; req_id = hpre_add_req_to_ctx(h_req); if (req_id < 0) return -EBUSY; msg->tag = cpu_to_le16((u16)req_id); return 0; } static int hpre_send(struct hpre_ctx *ctx, struct hpre_sqe *msg) { struct hpre_dfx *dfx = ctx->hpre->debug.dfx; int ctr = 0; int ret; do { atomic64_inc(&dfx[HPRE_SEND_CNT].value); ret = hisi_qp_send(ctx->qp, msg); if (ret != -EBUSY) break; atomic64_inc(&dfx[HPRE_SEND_BUSY_CNT].value); } while (ctr++ < HPRE_TRY_SEND_TIMES); if (likely(!ret)) return ret; if (ret != -EBUSY) atomic64_inc(&dfx[HPRE_SEND_FAIL_CNT].value); return ret; } static int hpre_dh_compute_value(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); void *tmp = kpp_request_ctx(req); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); struct hpre_sqe *msg = &hpre_req->req; int ret; ret = hpre_msg_request_set(ctx, req, false); if (unlikely(ret)) return ret; if (req->src) { ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 1); if (unlikely(ret)) goto clear_all; } else { msg->in = cpu_to_le64(ctx->dh.dma_g); } ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 1); if (unlikely(ret)) goto clear_all; if (ctx->crt_g2_mode && !req->src) msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH_G2); else msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_DH); /* success */ ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; clear_all: hpre_rm_req_from_ctx(hpre_req); hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); return ret; } static int hpre_is_dh_params_length_valid(unsigned int key_sz) { #define _HPRE_DH_GRP1 768 #define _HPRE_DH_GRP2 1024 #define _HPRE_DH_GRP5 1536 #define _HPRE_DH_GRP14 2048 #define _HPRE_DH_GRP15 3072 #define _HPRE_DH_GRP16 4096 switch (key_sz) { case _HPRE_DH_GRP1: case _HPRE_DH_GRP2: case _HPRE_DH_GRP5: case _HPRE_DH_GRP14: case _HPRE_DH_GRP15: case _HPRE_DH_GRP16: return 0; default: return -EINVAL; } } static int hpre_dh_set_params(struct hpre_ctx *ctx, struct dh *params) { struct device *dev = ctx->dev; unsigned int sz; if (params->p_size > HPRE_DH_MAX_P_SZ) return -EINVAL; if (hpre_is_dh_params_length_valid(params->p_size << HPRE_BITS_2_BYTES_SHIFT)) return -EINVAL; sz = ctx->key_sz = params->p_size; ctx->dh.xa_p = dma_alloc_coherent(dev, sz << 1, &ctx->dh.dma_xa_p, GFP_KERNEL); if (!ctx->dh.xa_p) return -ENOMEM; memcpy(ctx->dh.xa_p + sz, params->p, sz); /* If g equals 2 don't copy it */ if (params->g_size == 1 && *(char *)params->g == HPRE_DH_G_FLAG) { ctx->crt_g2_mode = true; return 0; } ctx->dh.g = dma_alloc_coherent(dev, sz, &ctx->dh.dma_g, GFP_KERNEL); if (!ctx->dh.g) { dma_free_coherent(dev, sz << 1, ctx->dh.xa_p, ctx->dh.dma_xa_p); ctx->dh.xa_p = NULL; return -ENOMEM; } memcpy(ctx->dh.g + (sz - params->g_size), params->g, params->g_size); return 0; } static void hpre_dh_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) { struct device *dev = ctx->dev; unsigned int sz = ctx->key_sz; if (is_clear_all) hisi_qm_stop_qp(ctx->qp); if (ctx->dh.g) { dma_free_coherent(dev, sz, ctx->dh.g, ctx->dh.dma_g); ctx->dh.g = NULL; } if (ctx->dh.xa_p) { memzero_explicit(ctx->dh.xa_p, sz); dma_free_coherent(dev, sz << 1, ctx->dh.xa_p, ctx->dh.dma_xa_p); ctx->dh.xa_p = NULL; } hpre_ctx_clear(ctx, is_clear_all); } static int hpre_dh_set_secret(struct crypto_kpp *tfm, const void *buf, unsigned int len) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); struct dh params; int ret; if (crypto_dh_decode_key(buf, len, &params) < 0) return -EINVAL; /* Free old secret if any */ hpre_dh_clear_ctx(ctx, false); ret = hpre_dh_set_params(ctx, &params); if (ret < 0) goto err_clear_ctx; memcpy(ctx->dh.xa_p + (ctx->key_sz - params.key_size), params.key, params.key_size); return 0; err_clear_ctx: hpre_dh_clear_ctx(ctx, false); return ret; } static unsigned int hpre_dh_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); return ctx->key_sz; } static int hpre_dh_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); return hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); } static void hpre_dh_exit_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); hpre_dh_clear_ctx(ctx, true); } static void hpre_rsa_drop_leading_zeros(const char **ptr, size_t *len) { while (!**ptr && *len) { (*ptr)++; (*len)--; } } static bool hpre_rsa_key_size_is_support(unsigned int len) { unsigned int bits = len << HPRE_BITS_2_BYTES_SHIFT; #define _RSA_1024BITS_KEY_WDTH 1024 #define _RSA_2048BITS_KEY_WDTH 2048 #define _RSA_3072BITS_KEY_WDTH 3072 #define _RSA_4096BITS_KEY_WDTH 4096 switch (bits) { case _RSA_1024BITS_KEY_WDTH: case _RSA_2048BITS_KEY_WDTH: case _RSA_3072BITS_KEY_WDTH: case _RSA_4096BITS_KEY_WDTH: return true; default: return false; } } static int hpre_rsa_enc(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); void *tmp = akcipher_request_ctx(req); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); struct hpre_sqe *msg = &hpre_req->req; int ret; /* For 512 and 1536 bits key size, use soft tfm instead */ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); ret = crypto_akcipher_encrypt(req); akcipher_request_set_tfm(req, tfm); return ret; } if (unlikely(!ctx->rsa.pubkey)) return -EINVAL; ret = hpre_msg_request_set(ctx, req, true); if (unlikely(ret)) return ret; msg->dw0 |= cpu_to_le32(HPRE_ALG_NC_NCRT); msg->key = cpu_to_le64(ctx->rsa.dma_pubkey); ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); if (unlikely(ret)) goto clear_all; ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0); if (unlikely(ret)) goto clear_all; /* success */ ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; clear_all: hpre_rm_req_from_ctx(hpre_req); hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); return ret; } static int hpre_rsa_dec(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); void *tmp = akcipher_request_ctx(req); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); struct hpre_sqe *msg = &hpre_req->req; int ret; /* For 512 and 1536 bits key size, use soft tfm instead */ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || ctx->key_sz == HPRE_RSA_1536BITS_KSZ) { akcipher_request_set_tfm(req, ctx->rsa.soft_tfm); ret = crypto_akcipher_decrypt(req); akcipher_request_set_tfm(req, tfm); return ret; } if (unlikely(!ctx->rsa.prikey)) return -EINVAL; ret = hpre_msg_request_set(ctx, req, true); if (unlikely(ret)) return ret; if (ctx->crt_g2_mode) { msg->key = cpu_to_le64(ctx->rsa.dma_crt_prikey); msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_NC_CRT); } else { msg->key = cpu_to_le64(ctx->rsa.dma_prikey); msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_NC_NCRT); } ret = hpre_hw_data_init(hpre_req, req->src, req->src_len, 1, 0); if (unlikely(ret)) goto clear_all; ret = hpre_hw_data_init(hpre_req, req->dst, req->dst_len, 0, 0); if (unlikely(ret)) goto clear_all; /* success */ ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; clear_all: hpre_rm_req_from_ctx(hpre_req); hpre_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); return ret; } static int hpre_rsa_set_n(struct hpre_ctx *ctx, const char *value, size_t vlen, bool private) { const char *ptr = value; hpre_rsa_drop_leading_zeros(&ptr, &vlen); ctx->key_sz = vlen; /* if invalid key size provided, we use software tfm */ if (!hpre_rsa_key_size_is_support(ctx->key_sz)) return 0; ctx->rsa.pubkey = dma_alloc_coherent(ctx->dev, vlen << 1, &ctx->rsa.dma_pubkey, GFP_KERNEL); if (!ctx->rsa.pubkey) return -ENOMEM; if (private) { ctx->rsa.prikey = dma_alloc_coherent(ctx->dev, vlen << 1, &ctx->rsa.dma_prikey, GFP_KERNEL); if (!ctx->rsa.prikey) { dma_free_coherent(ctx->dev, vlen << 1, ctx->rsa.pubkey, ctx->rsa.dma_pubkey); ctx->rsa.pubkey = NULL; return -ENOMEM; } memcpy(ctx->rsa.prikey + vlen, ptr, vlen); } memcpy(ctx->rsa.pubkey + vlen, ptr, vlen); /* Using hardware HPRE to do RSA */ return 1; } static int hpre_rsa_set_e(struct hpre_ctx *ctx, const char *value, size_t vlen) { const char *ptr = value; hpre_rsa_drop_leading_zeros(&ptr, &vlen); if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) return -EINVAL; memcpy(ctx->rsa.pubkey + ctx->key_sz - vlen, ptr, vlen); return 0; } static int hpre_rsa_set_d(struct hpre_ctx *ctx, const char *value, size_t vlen) { const char *ptr = value; hpre_rsa_drop_leading_zeros(&ptr, &vlen); if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) return -EINVAL; memcpy(ctx->rsa.prikey + ctx->key_sz - vlen, ptr, vlen); return 0; } static int hpre_crt_para_get(char *para, size_t para_sz, const char *raw, size_t raw_sz) { const char *ptr = raw; size_t len = raw_sz; hpre_rsa_drop_leading_zeros(&ptr, &len); if (!len || len > para_sz) return -EINVAL; memcpy(para + para_sz - len, ptr, len); return 0; } static int hpre_rsa_setkey_crt(struct hpre_ctx *ctx, struct rsa_key *rsa_key) { unsigned int hlf_ksz = ctx->key_sz >> 1; struct device *dev = ctx->dev; u64 offset; int ret; ctx->rsa.crt_prikey = dma_alloc_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, &ctx->rsa.dma_crt_prikey, GFP_KERNEL); if (!ctx->rsa.crt_prikey) return -ENOMEM; ret = hpre_crt_para_get(ctx->rsa.crt_prikey, hlf_ksz, rsa_key->dq, rsa_key->dq_sz); if (ret) goto free_key; offset = hlf_ksz; ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, rsa_key->dp, rsa_key->dp_sz); if (ret) goto free_key; offset = hlf_ksz * HPRE_CRT_Q; ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, rsa_key->q, rsa_key->q_sz); if (ret) goto free_key; offset = hlf_ksz * HPRE_CRT_P; ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, rsa_key->p, rsa_key->p_sz); if (ret) goto free_key; offset = hlf_ksz * HPRE_CRT_INV; ret = hpre_crt_para_get(ctx->rsa.crt_prikey + offset, hlf_ksz, rsa_key->qinv, rsa_key->qinv_sz); if (ret) goto free_key; ctx->crt_g2_mode = true; return 0; free_key: offset = hlf_ksz * HPRE_CRT_PRMS; memzero_explicit(ctx->rsa.crt_prikey, offset); dma_free_coherent(dev, hlf_ksz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey); ctx->rsa.crt_prikey = NULL; ctx->crt_g2_mode = false; return ret; } /* If it is clear all, all the resources of the QP will be cleaned. */ static void hpre_rsa_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all) { unsigned int half_key_sz = ctx->key_sz >> 1; struct device *dev = ctx->dev; if (is_clear_all) hisi_qm_stop_qp(ctx->qp); if (ctx->rsa.pubkey) { dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.pubkey, ctx->rsa.dma_pubkey); ctx->rsa.pubkey = NULL; } if (ctx->rsa.crt_prikey) { memzero_explicit(ctx->rsa.crt_prikey, half_key_sz * HPRE_CRT_PRMS); dma_free_coherent(dev, half_key_sz * HPRE_CRT_PRMS, ctx->rsa.crt_prikey, ctx->rsa.dma_crt_prikey); ctx->rsa.crt_prikey = NULL; } if (ctx->rsa.prikey) { memzero_explicit(ctx->rsa.prikey, ctx->key_sz); dma_free_coherent(dev, ctx->key_sz << 1, ctx->rsa.prikey, ctx->rsa.dma_prikey); ctx->rsa.prikey = NULL; } hpre_ctx_clear(ctx, is_clear_all); } /* * we should judge if it is CRT or not, * CRT: return true, N-CRT: return false . */ static bool hpre_is_crt_key(struct rsa_key *key) { u16 len = key->p_sz + key->q_sz + key->dp_sz + key->dq_sz + key->qinv_sz; #define LEN_OF_NCRT_PARA 5 /* N-CRT less than 5 parameters */ return len > LEN_OF_NCRT_PARA; } static int hpre_rsa_setkey(struct hpre_ctx *ctx, const void *key, unsigned int keylen, bool private) { struct rsa_key rsa_key; int ret; hpre_rsa_clear_ctx(ctx, false); if (private) ret = rsa_parse_priv_key(&rsa_key, key, keylen); else ret = rsa_parse_pub_key(&rsa_key, key, keylen); if (ret < 0) return ret; ret = hpre_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz, private); if (ret <= 0) return ret; if (private) { ret = hpre_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz); if (ret < 0) goto free; if (hpre_is_crt_key(&rsa_key)) { ret = hpre_rsa_setkey_crt(ctx, &rsa_key); if (ret < 0) goto free; } } ret = hpre_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz); if (ret < 0) goto free; if ((private && !ctx->rsa.prikey) || !ctx->rsa.pubkey) { ret = -EINVAL; goto free; } return 0; free: hpre_rsa_clear_ctx(ctx, false); return ret; } static int hpre_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); int ret; ret = crypto_akcipher_set_pub_key(ctx->rsa.soft_tfm, key, keylen); if (ret) return ret; return hpre_rsa_setkey(ctx, key, keylen, false); } static int hpre_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); int ret; ret = crypto_akcipher_set_priv_key(ctx->rsa.soft_tfm, key, keylen); if (ret) return ret; return hpre_rsa_setkey(ctx, key, keylen, true); } static unsigned int hpre_rsa_max_size(struct crypto_akcipher *tfm) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); /* For 512 and 1536 bits key size, use soft tfm instead */ if (ctx->key_sz == HPRE_RSA_512BITS_KSZ || ctx->key_sz == HPRE_RSA_1536BITS_KSZ) return crypto_akcipher_maxsize(ctx->rsa.soft_tfm); return ctx->key_sz; } static int hpre_rsa_init_tfm(struct crypto_akcipher *tfm) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); int ret; ctx->rsa.soft_tfm = crypto_alloc_akcipher("rsa-generic", 0, 0); if (IS_ERR(ctx->rsa.soft_tfm)) { pr_err("Can not alloc_akcipher!\n"); return PTR_ERR(ctx->rsa.soft_tfm); } akcipher_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); ret = hpre_ctx_init(ctx, HPRE_V2_ALG_TYPE); if (ret) crypto_free_akcipher(ctx->rsa.soft_tfm); return ret; } static void hpre_rsa_exit_tfm(struct crypto_akcipher *tfm) { struct hpre_ctx *ctx = akcipher_tfm_ctx(tfm); hpre_rsa_clear_ctx(ctx, true); crypto_free_akcipher(ctx->rsa.soft_tfm); } static void hpre_key_to_big_end(u8 *data, int len) { int i, j; for (i = 0; i < len / 2; i++) { j = len - i - 1; swap(data[j], data[i]); } } static void hpre_ecc_clear_ctx(struct hpre_ctx *ctx, bool is_clear_all, bool is_ecdh) { struct device *dev = ctx->dev; unsigned int sz = ctx->key_sz; unsigned int shift = sz << 1; if (is_clear_all) hisi_qm_stop_qp(ctx->qp); if (is_ecdh && ctx->ecdh.p) { /* ecdh: p->a->k->b */ memzero_explicit(ctx->ecdh.p + shift, sz); dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p); ctx->ecdh.p = NULL; } else if (!is_ecdh && ctx->curve25519.p) { /* curve25519: p->a->k */ memzero_explicit(ctx->curve25519.p + shift, sz); dma_free_coherent(dev, sz << 2, ctx->curve25519.p, ctx->curve25519.dma_p); ctx->curve25519.p = NULL; } hpre_ctx_clear(ctx, is_clear_all); } /* * The bits of 192/224/256/384/521 are supported by HPRE, * and convert the bits like: * bits<=256, bits=256; 256<bits<=384, bits=384; 384<bits<=576, bits=576; * If the parameter bit width is insufficient, then we fill in the * high-order zeros by soft, so TASK_LENGTH1 is 0x3/0x5/0x8; */ static unsigned int hpre_ecdh_supported_curve(unsigned short id) { switch (id) { case ECC_CURVE_NIST_P192: case ECC_CURVE_NIST_P256: return HPRE_ECC_HW256_KSZ_B; case ECC_CURVE_NIST_P384: return HPRE_ECC_HW384_KSZ_B; default: break; } return 0; } static void fill_curve_param(void *addr, u64 *param, unsigned int cur_sz, u8 ndigits) { unsigned int sz = cur_sz - (ndigits - 1) * sizeof(u64); u8 i = 0; while (i < ndigits - 1) { memcpy(addr + sizeof(u64) * i, &param[i], sizeof(u64)); i++; } memcpy(addr + sizeof(u64) * i, &param[ndigits - 1], sz); hpre_key_to_big_end((u8 *)addr, cur_sz); } static int hpre_ecdh_fill_curve(struct hpre_ctx *ctx, struct ecdh *params, unsigned int cur_sz) { unsigned int shifta = ctx->key_sz << 1; unsigned int shiftb = ctx->key_sz << 2; void *p = ctx->ecdh.p + ctx->key_sz - cur_sz; void *a = ctx->ecdh.p + shifta - cur_sz; void *b = ctx->ecdh.p + shiftb - cur_sz; void *x = ctx->ecdh.g + ctx->key_sz - cur_sz; void *y = ctx->ecdh.g + shifta - cur_sz; const struct ecc_curve *curve = ecc_get_curve(ctx->curve_id); char *n; if (unlikely(!curve)) return -EINVAL; n = kzalloc(ctx->key_sz, GFP_KERNEL); if (!n) return -ENOMEM; fill_curve_param(p, curve->p, cur_sz, curve->g.ndigits); fill_curve_param(a, curve->a, cur_sz, curve->g.ndigits); fill_curve_param(b, curve->b, cur_sz, curve->g.ndigits); fill_curve_param(x, curve->g.x, cur_sz, curve->g.ndigits); fill_curve_param(y, curve->g.y, cur_sz, curve->g.ndigits); fill_curve_param(n, curve->n, cur_sz, curve->g.ndigits); if (params->key_size == cur_sz && memcmp(params->key, n, cur_sz) >= 0) { kfree(n); return -EINVAL; } kfree(n); return 0; } static unsigned int hpre_ecdh_get_curvesz(unsigned short id) { switch (id) { case ECC_CURVE_NIST_P192: return HPRE_ECC_NIST_P192_N_SIZE; case ECC_CURVE_NIST_P256: return HPRE_ECC_NIST_P256_N_SIZE; case ECC_CURVE_NIST_P384: return HPRE_ECC_NIST_P384_N_SIZE; default: break; } return 0; } static int hpre_ecdh_set_param(struct hpre_ctx *ctx, struct ecdh *params) { struct device *dev = ctx->dev; unsigned int sz, shift, curve_sz; int ret; ctx->key_sz = hpre_ecdh_supported_curve(ctx->curve_id); if (!ctx->key_sz) return -EINVAL; curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id); if (!curve_sz || params->key_size > curve_sz) return -EINVAL; sz = ctx->key_sz; if (!ctx->ecdh.p) { ctx->ecdh.p = dma_alloc_coherent(dev, sz << 3, &ctx->ecdh.dma_p, GFP_KERNEL); if (!ctx->ecdh.p) return -ENOMEM; } shift = sz << 2; ctx->ecdh.g = ctx->ecdh.p + shift; ctx->ecdh.dma_g = ctx->ecdh.dma_p + shift; ret = hpre_ecdh_fill_curve(ctx, params, curve_sz); if (ret) { dev_err(dev, "failed to fill curve_param, ret = %d!\n", ret); dma_free_coherent(dev, sz << 3, ctx->ecdh.p, ctx->ecdh.dma_p); ctx->ecdh.p = NULL; return ret; } return 0; } static bool hpre_key_is_zero(char *key, unsigned short key_sz) { int i; for (i = 0; i < key_sz; i++) if (key[i]) return false; return true; } static int ecdh_gen_privkey(struct hpre_ctx *ctx, struct ecdh *params) { struct device *dev = ctx->dev; int ret; ret = crypto_get_default_rng(); if (ret) { dev_err(dev, "failed to get default rng, ret = %d!\n", ret); return ret; } ret = crypto_rng_get_bytes(crypto_default_rng, (u8 *)params->key, params->key_size); crypto_put_default_rng(); if (ret) dev_err(dev, "failed to get rng, ret = %d!\n", ret); return ret; } static int hpre_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf, unsigned int len) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); unsigned int sz, sz_shift, curve_sz; struct device *dev = ctx->dev; char key[HPRE_ECC_MAX_KSZ]; struct ecdh params; int ret; if (crypto_ecdh_decode_key(buf, len, &params) < 0) { dev_err(dev, "failed to decode ecdh key!\n"); return -EINVAL; } /* Use stdrng to generate private key */ if (!params.key || !params.key_size) { params.key = key; curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id); if (!curve_sz) { dev_err(dev, "Invalid curve size!\n"); return -EINVAL; } params.key_size = curve_sz - 1; ret = ecdh_gen_privkey(ctx, &params); if (ret) return ret; } if (hpre_key_is_zero(params.key, params.key_size)) { dev_err(dev, "Invalid hpre key!\n"); return -EINVAL; } hpre_ecc_clear_ctx(ctx, false, true); ret = hpre_ecdh_set_param(ctx, &params); if (ret < 0) { dev_err(dev, "failed to set hpre param, ret = %d!\n", ret); return ret; } sz = ctx->key_sz; sz_shift = (sz << 1) + sz - params.key_size; memcpy(ctx->ecdh.p + sz_shift, params.key, params.key_size); return 0; } static void hpre_ecdh_hw_data_clr_all(struct hpre_ctx *ctx, struct hpre_asym_request *req, struct scatterlist *dst, struct scatterlist *src) { struct device *dev = ctx->dev; struct hpre_sqe *sqe = &req->req; dma_addr_t dma; dma = le64_to_cpu(sqe->in); if (unlikely(dma_mapping_error(dev, dma))) return; if (src && req->src) dma_free_coherent(dev, ctx->key_sz << 2, req->src, dma); dma = le64_to_cpu(sqe->out); if (unlikely(dma_mapping_error(dev, dma))) return; if (req->dst) dma_free_coherent(dev, ctx->key_sz << 1, req->dst, dma); if (dst) dma_unmap_single(dev, dma, ctx->key_sz << 1, DMA_FROM_DEVICE); } static void hpre_ecdh_cb(struct hpre_ctx *ctx, void *resp) { unsigned int curve_sz = hpre_ecdh_get_curvesz(ctx->curve_id); struct hpre_dfx *dfx = ctx->hpre->debug.dfx; struct hpre_asym_request *req = NULL; struct kpp_request *areq; u64 overtime_thrhld; char *p; int ret; ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); areq = req->areq.ecdh; areq->dst_len = ctx->key_sz << 1; overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); p = sg_virt(areq->dst); memmove(p, p + ctx->key_sz - curve_sz, curve_sz); memmove(p + curve_sz, p + areq->dst_len - curve_sz, curve_sz); hpre_ecdh_hw_data_clr_all(ctx, req, areq->dst, areq->src); kpp_request_complete(areq, ret); atomic64_inc(&dfx[HPRE_RECV_CNT].value); } static int hpre_ecdh_msg_request_set(struct hpre_ctx *ctx, struct kpp_request *req) { struct hpre_asym_request *h_req; struct hpre_sqe *msg; int req_id; void *tmp; if (req->dst_len < ctx->key_sz << 1) { req->dst_len = ctx->key_sz << 1; return -EINVAL; } tmp = kpp_request_ctx(req); h_req = PTR_ALIGN(tmp, hpre_align_sz()); h_req->cb = hpre_ecdh_cb; h_req->areq.ecdh = req; msg = &h_req->req; memset(msg, 0, sizeof(*msg)); msg->in = cpu_to_le64(DMA_MAPPING_ERROR); msg->out = cpu_to_le64(DMA_MAPPING_ERROR); msg->key = cpu_to_le64(ctx->ecdh.dma_p); msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT); msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; h_req->ctx = ctx; req_id = hpre_add_req_to_ctx(h_req); if (req_id < 0) return -EBUSY; msg->tag = cpu_to_le16((u16)req_id); return 0; } static int hpre_ecdh_src_data_init(struct hpre_asym_request *hpre_req, struct scatterlist *data, unsigned int len) { struct hpre_sqe *msg = &hpre_req->req; struct hpre_ctx *ctx = hpre_req->ctx; struct device *dev = ctx->dev; unsigned int tmpshift; dma_addr_t dma = 0; void *ptr; int shift; /* Src_data include gx and gy. */ shift = ctx->key_sz - (len >> 1); if (unlikely(shift < 0)) return -EINVAL; ptr = dma_alloc_coherent(dev, ctx->key_sz << 2, &dma, GFP_KERNEL); if (unlikely(!ptr)) return -ENOMEM; tmpshift = ctx->key_sz << 1; scatterwalk_map_and_copy(ptr + tmpshift, data, 0, len, 0); memcpy(ptr + shift, ptr + tmpshift, len >> 1); memcpy(ptr + ctx->key_sz + shift, ptr + tmpshift + (len >> 1), len >> 1); hpre_req->src = ptr; msg->in = cpu_to_le64(dma); return 0; } static int hpre_ecdh_dst_data_init(struct hpre_asym_request *hpre_req, struct scatterlist *data, unsigned int len) { struct hpre_sqe *msg = &hpre_req->req; struct hpre_ctx *ctx = hpre_req->ctx; struct device *dev = ctx->dev; dma_addr_t dma; if (unlikely(!data || !sg_is_last(data) || len != ctx->key_sz << 1)) { dev_err(dev, "data or data length is illegal!\n"); return -EINVAL; } hpre_req->dst = NULL; dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, dma))) { dev_err(dev, "dma map data err!\n"); return -ENOMEM; } msg->out = cpu_to_le64(dma); return 0; } static int hpre_ecdh_compute_value(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); struct device *dev = ctx->dev; void *tmp = kpp_request_ctx(req); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); struct hpre_sqe *msg = &hpre_req->req; int ret; ret = hpre_ecdh_msg_request_set(ctx, req); if (unlikely(ret)) { dev_err(dev, "failed to set ecdh request, ret = %d!\n", ret); return ret; } if (req->src) { ret = hpre_ecdh_src_data_init(hpre_req, req->src, req->src_len); if (unlikely(ret)) { dev_err(dev, "failed to init src data, ret = %d!\n", ret); goto clear_all; } } else { msg->in = cpu_to_le64(ctx->ecdh.dma_g); } ret = hpre_ecdh_dst_data_init(hpre_req, req->dst, req->dst_len); if (unlikely(ret)) { dev_err(dev, "failed to init dst data, ret = %d!\n", ret); goto clear_all; } msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_ECC_MUL); ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; clear_all: hpre_rm_req_from_ctx(hpre_req); hpre_ecdh_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); return ret; } static unsigned int hpre_ecdh_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); /* max size is the pub_key_size, include x and y */ return ctx->key_sz << 1; } static int hpre_ecdh_nist_p192_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); ctx->curve_id = ECC_CURVE_NIST_P192; kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } static int hpre_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); ctx->curve_id = ECC_CURVE_NIST_P256; kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } static int hpre_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); ctx->curve_id = ECC_CURVE_NIST_P384; kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } static void hpre_ecdh_exit_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); hpre_ecc_clear_ctx(ctx, true, true); } static void hpre_curve25519_fill_curve(struct hpre_ctx *ctx, const void *buf, unsigned int len) { u8 secret[CURVE25519_KEY_SIZE] = { 0 }; unsigned int sz = ctx->key_sz; const struct ecc_curve *curve; unsigned int shift = sz << 1; void *p; /* * The key from 'buf' is in little-endian, we should preprocess it as * the description in rfc7748: "k[0] &= 248, k[31] &= 127, k[31] |= 64", * then convert it to big endian. Only in this way, the result can be * the same as the software curve-25519 that exists in crypto. */ memcpy(secret, buf, len); curve25519_clamp_secret(secret); hpre_key_to_big_end(secret, CURVE25519_KEY_SIZE); p = ctx->curve25519.p + sz - len; curve = ecc_get_curve25519(); /* fill curve parameters */ fill_curve_param(p, curve->p, len, curve->g.ndigits); fill_curve_param(p + sz, curve->a, len, curve->g.ndigits); memcpy(p + shift, secret, len); fill_curve_param(p + shift + sz, curve->g.x, len, curve->g.ndigits); memzero_explicit(secret, CURVE25519_KEY_SIZE); } static int hpre_curve25519_set_param(struct hpre_ctx *ctx, const void *buf, unsigned int len) { struct device *dev = ctx->dev; unsigned int sz = ctx->key_sz; unsigned int shift = sz << 1; /* p->a->k->gx */ if (!ctx->curve25519.p) { ctx->curve25519.p = dma_alloc_coherent(dev, sz << 2, &ctx->curve25519.dma_p, GFP_KERNEL); if (!ctx->curve25519.p) return -ENOMEM; } ctx->curve25519.g = ctx->curve25519.p + shift + sz; ctx->curve25519.dma_g = ctx->curve25519.dma_p + shift + sz; hpre_curve25519_fill_curve(ctx, buf, len); return 0; } static int hpre_curve25519_set_secret(struct crypto_kpp *tfm, const void *buf, unsigned int len) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); struct device *dev = ctx->dev; int ret = -EINVAL; if (len != CURVE25519_KEY_SIZE || !crypto_memneq(buf, curve25519_null_point, CURVE25519_KEY_SIZE)) { dev_err(dev, "key is null or key len is not 32bytes!\n"); return ret; } /* Free old secret if any */ hpre_ecc_clear_ctx(ctx, false, false); ctx->key_sz = CURVE25519_KEY_SIZE; ret = hpre_curve25519_set_param(ctx, buf, CURVE25519_KEY_SIZE); if (ret) { dev_err(dev, "failed to set curve25519 param, ret = %d!\n", ret); hpre_ecc_clear_ctx(ctx, false, false); return ret; } return 0; } static void hpre_curve25519_hw_data_clr_all(struct hpre_ctx *ctx, struct hpre_asym_request *req, struct scatterlist *dst, struct scatterlist *src) { struct device *dev = ctx->dev; struct hpre_sqe *sqe = &req->req; dma_addr_t dma; dma = le64_to_cpu(sqe->in); if (unlikely(dma_mapping_error(dev, dma))) return; if (src && req->src) dma_free_coherent(dev, ctx->key_sz, req->src, dma); dma = le64_to_cpu(sqe->out); if (unlikely(dma_mapping_error(dev, dma))) return; if (req->dst) dma_free_coherent(dev, ctx->key_sz, req->dst, dma); if (dst) dma_unmap_single(dev, dma, ctx->key_sz, DMA_FROM_DEVICE); } static void hpre_curve25519_cb(struct hpre_ctx *ctx, void *resp) { struct hpre_dfx *dfx = ctx->hpre->debug.dfx; struct hpre_asym_request *req = NULL; struct kpp_request *areq; u64 overtime_thrhld; int ret; ret = hpre_alg_res_post_hf(ctx, resp, (void **)&req); areq = req->areq.curve25519; areq->dst_len = ctx->key_sz; overtime_thrhld = atomic64_read(&dfx[HPRE_OVERTIME_THRHLD].value); if (overtime_thrhld && hpre_is_bd_timeout(req, overtime_thrhld)) atomic64_inc(&dfx[HPRE_OVER_THRHLD_CNT].value); hpre_key_to_big_end(sg_virt(areq->dst), CURVE25519_KEY_SIZE); hpre_curve25519_hw_data_clr_all(ctx, req, areq->dst, areq->src); kpp_request_complete(areq, ret); atomic64_inc(&dfx[HPRE_RECV_CNT].value); } static int hpre_curve25519_msg_request_set(struct hpre_ctx *ctx, struct kpp_request *req) { struct hpre_asym_request *h_req; struct hpre_sqe *msg; int req_id; void *tmp; if (unlikely(req->dst_len < ctx->key_sz)) { req->dst_len = ctx->key_sz; return -EINVAL; } tmp = kpp_request_ctx(req); h_req = PTR_ALIGN(tmp, hpre_align_sz()); h_req->cb = hpre_curve25519_cb; h_req->areq.curve25519 = req; msg = &h_req->req; memset(msg, 0, sizeof(*msg)); msg->in = cpu_to_le64(DMA_MAPPING_ERROR); msg->out = cpu_to_le64(DMA_MAPPING_ERROR); msg->key = cpu_to_le64(ctx->curve25519.dma_p); msg->dw0 |= cpu_to_le32(0x1U << HPRE_SQE_DONE_SHIFT); msg->task_len1 = (ctx->key_sz >> HPRE_BITS_2_BYTES_SHIFT) - 1; h_req->ctx = ctx; req_id = hpre_add_req_to_ctx(h_req); if (req_id < 0) return -EBUSY; msg->tag = cpu_to_le16((u16)req_id); return 0; } static void hpre_curve25519_src_modulo_p(u8 *ptr) { int i; for (i = 0; i < CURVE25519_KEY_SIZE - 1; i++) ptr[i] = 0; /* The modulus is ptr's last byte minus '0xed'(last byte of p) */ ptr[i] -= 0xed; } static int hpre_curve25519_src_init(struct hpre_asym_request *hpre_req, struct scatterlist *data, unsigned int len) { struct hpre_sqe *msg = &hpre_req->req; struct hpre_ctx *ctx = hpre_req->ctx; struct device *dev = ctx->dev; u8 p[CURVE25519_KEY_SIZE] = { 0 }; const struct ecc_curve *curve; dma_addr_t dma = 0; u8 *ptr; if (len != CURVE25519_KEY_SIZE) { dev_err(dev, "sourc_data len is not 32bytes, len = %u!\n", len); return -EINVAL; } ptr = dma_alloc_coherent(dev, ctx->key_sz, &dma, GFP_KERNEL); if (unlikely(!ptr)) return -ENOMEM; scatterwalk_map_and_copy(ptr, data, 0, len, 0); if (!crypto_memneq(ptr, curve25519_null_point, CURVE25519_KEY_SIZE)) { dev_err(dev, "gx is null!\n"); goto err; } /* * Src_data(gx) is in little-endian order, MSB in the final byte should * be masked as described in RFC7748, then transform it to big-endian * form, then hisi_hpre can use the data. */ ptr[31] &= 0x7f; hpre_key_to_big_end(ptr, CURVE25519_KEY_SIZE); curve = ecc_get_curve25519(); fill_curve_param(p, curve->p, CURVE25519_KEY_SIZE, curve->g.ndigits); /* * When src_data equals (2^255 - 19) ~ (2^255 - 1), it is out of p, * we get its modulus to p, and then use it. */ if (memcmp(ptr, p, ctx->key_sz) == 0) { dev_err(dev, "gx is p!\n"); goto err; } else if (memcmp(ptr, p, ctx->key_sz) > 0) { hpre_curve25519_src_modulo_p(ptr); } hpre_req->src = ptr; msg->in = cpu_to_le64(dma); return 0; err: dma_free_coherent(dev, ctx->key_sz, ptr, dma); return -EINVAL; } static int hpre_curve25519_dst_init(struct hpre_asym_request *hpre_req, struct scatterlist *data, unsigned int len) { struct hpre_sqe *msg = &hpre_req->req; struct hpre_ctx *ctx = hpre_req->ctx; struct device *dev = ctx->dev; dma_addr_t dma; if (!data || !sg_is_last(data) || len != ctx->key_sz) { dev_err(dev, "data or data length is illegal!\n"); return -EINVAL; } hpre_req->dst = NULL; dma = dma_map_single(dev, sg_virt(data), len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, dma))) { dev_err(dev, "dma map data err!\n"); return -ENOMEM; } msg->out = cpu_to_le64(dma); return 0; } static int hpre_curve25519_compute_value(struct kpp_request *req) { struct crypto_kpp *tfm = crypto_kpp_reqtfm(req); struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); struct device *dev = ctx->dev; void *tmp = kpp_request_ctx(req); struct hpre_asym_request *hpre_req = PTR_ALIGN(tmp, hpre_align_sz()); struct hpre_sqe *msg = &hpre_req->req; int ret; ret = hpre_curve25519_msg_request_set(ctx, req); if (unlikely(ret)) { dev_err(dev, "failed to set curve25519 request, ret = %d!\n", ret); return ret; } if (req->src) { ret = hpre_curve25519_src_init(hpre_req, req->src, req->src_len); if (unlikely(ret)) { dev_err(dev, "failed to init src data, ret = %d!\n", ret); goto clear_all; } } else { msg->in = cpu_to_le64(ctx->curve25519.dma_g); } ret = hpre_curve25519_dst_init(hpre_req, req->dst, req->dst_len); if (unlikely(ret)) { dev_err(dev, "failed to init dst data, ret = %d!\n", ret); goto clear_all; } msg->dw0 = cpu_to_le32(le32_to_cpu(msg->dw0) | HPRE_ALG_CURVE25519_MUL); ret = hpre_send(ctx, msg); if (likely(!ret)) return -EINPROGRESS; clear_all: hpre_rm_req_from_ctx(hpre_req); hpre_curve25519_hw_data_clr_all(ctx, hpre_req, req->dst, req->src); return ret; } static unsigned int hpre_curve25519_max_size(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); return ctx->key_sz; } static int hpre_curve25519_init_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); kpp_set_reqsize(tfm, sizeof(struct hpre_asym_request) + hpre_align_pd()); return hpre_ctx_init(ctx, HPRE_V3_ECC_ALG_TYPE); } static void hpre_curve25519_exit_tfm(struct crypto_kpp *tfm) { struct hpre_ctx *ctx = kpp_tfm_ctx(tfm); hpre_ecc_clear_ctx(ctx, true, false); } static struct akcipher_alg rsa = { .sign = hpre_rsa_dec, .verify = hpre_rsa_enc, .encrypt = hpre_rsa_enc, .decrypt = hpre_rsa_dec, .set_pub_key = hpre_rsa_setpubkey, .set_priv_key = hpre_rsa_setprivkey, .max_size = hpre_rsa_max_size, .init = hpre_rsa_init_tfm, .exit = hpre_rsa_exit_tfm, .base = { .cra_ctxsize = sizeof(struct hpre_ctx), .cra_priority = HPRE_CRYPTO_ALG_PRI, .cra_name = "rsa", .cra_driver_name = "hpre-rsa", .cra_module = THIS_MODULE, }, }; static struct kpp_alg dh = { .set_secret = hpre_dh_set_secret, .generate_public_key = hpre_dh_compute_value, .compute_shared_secret = hpre_dh_compute_value, .max_size = hpre_dh_max_size, .init = hpre_dh_init_tfm, .exit = hpre_dh_exit_tfm, .base = { .cra_ctxsize = sizeof(struct hpre_ctx), .cra_priority = HPRE_CRYPTO_ALG_PRI, .cra_name = "dh", .cra_driver_name = "hpre-dh", .cra_module = THIS_MODULE, }, }; static struct kpp_alg ecdh_curves[] = { { .set_secret = hpre_ecdh_set_secret, .generate_public_key = hpre_ecdh_compute_value, .compute_shared_secret = hpre_ecdh_compute_value, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p192_init_tfm, .exit = hpre_ecdh_exit_tfm, .base = { .cra_ctxsize = sizeof(struct hpre_ctx), .cra_priority = HPRE_CRYPTO_ALG_PRI, .cra_name = "ecdh-nist-p192", .cra_driver_name = "hpre-ecdh-nist-p192", .cra_module = THIS_MODULE, }, }, { .set_secret = hpre_ecdh_set_secret, .generate_public_key = hpre_ecdh_compute_value, .compute_shared_secret = hpre_ecdh_compute_value, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p256_init_tfm, .exit = hpre_ecdh_exit_tfm, .base = { .cra_ctxsize = sizeof(struct hpre_ctx), .cra_priority = HPRE_CRYPTO_ALG_PRI, .cra_name = "ecdh-nist-p256", .cra_driver_name = "hpre-ecdh-nist-p256", .cra_module = THIS_MODULE, }, }, { .set_secret = hpre_ecdh_set_secret, .generate_public_key = hpre_ecdh_compute_value, .compute_shared_secret = hpre_ecdh_compute_value, .max_size = hpre_ecdh_max_size, .init = hpre_ecdh_nist_p384_init_tfm, .exit = hpre_ecdh_exit_tfm, .base = { .cra_ctxsize = sizeof(struct hpre_ctx), .cra_priority = HPRE_CRYPTO_ALG_PRI, .cra_name = "ecdh-nist-p384", .cra_driver_name = "hpre-ecdh-nist-p384", .cra_module = THIS_MODULE, }, } }; static struct kpp_alg curve25519_alg = { .set_secret = hpre_curve25519_set_secret, .generate_public_key = hpre_curve25519_compute_value, .compute_shared_secret = hpre_curve25519_compute_value, .max_size = hpre_curve25519_max_size, .init = hpre_curve25519_init_tfm, .exit = hpre_curve25519_exit_tfm, .base = { .cra_ctxsize = sizeof(struct hpre_ctx), .cra_priority = HPRE_CRYPTO_ALG_PRI, .cra_name = "curve25519", .cra_driver_name = "hpre-curve25519", .cra_module = THIS_MODULE, }, }; static int hpre_register_rsa(struct hisi_qm *qm) { int ret; if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP)) return 0; rsa.base.cra_flags = 0; ret = crypto_register_akcipher(&rsa); if (ret) dev_err(&qm->pdev->dev, "failed to register rsa (%d)!\n", ret); return ret; } static void hpre_unregister_rsa(struct hisi_qm *qm) { if (!hpre_check_alg_support(qm, HPRE_DRV_RSA_MASK_CAP)) return; crypto_unregister_akcipher(&rsa); } static int hpre_register_dh(struct hisi_qm *qm) { int ret; if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP)) return 0; ret = crypto_register_kpp(&dh); if (ret) dev_err(&qm->pdev->dev, "failed to register dh (%d)!\n", ret); return ret; } static void hpre_unregister_dh(struct hisi_qm *qm) { if (!hpre_check_alg_support(qm, HPRE_DRV_DH_MASK_CAP)) return; crypto_unregister_kpp(&dh); } static int hpre_register_ecdh(struct hisi_qm *qm) { int ret, i; if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP)) return 0; for (i = 0; i < ARRAY_SIZE(ecdh_curves); i++) { ret = crypto_register_kpp(&ecdh_curves[i]); if (ret) { dev_err(&qm->pdev->dev, "failed to register %s (%d)!\n", ecdh_curves[i].base.cra_name, ret); goto unreg_kpp; } } return 0; unreg_kpp: for (--i; i >= 0; --i) crypto_unregister_kpp(&ecdh_curves[i]); return ret; } static void hpre_unregister_ecdh(struct hisi_qm *qm) { int i; if (!hpre_check_alg_support(qm, HPRE_DRV_ECDH_MASK_CAP)) return; for (i = ARRAY_SIZE(ecdh_curves) - 1; i >= 0; --i) crypto_unregister_kpp(&ecdh_curves[i]); } static int hpre_register_x25519(struct hisi_qm *qm) { int ret; if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP)) return 0; ret = crypto_register_kpp(&curve25519_alg); if (ret) dev_err(&qm->pdev->dev, "failed to register x25519 (%d)!\n", ret); return ret; } static void hpre_unregister_x25519(struct hisi_qm *qm) { if (!hpre_check_alg_support(qm, HPRE_DRV_X25519_MASK_CAP)) return; crypto_unregister_kpp(&curve25519_alg); } int hpre_algs_register(struct hisi_qm *qm) { int ret; ret = hpre_register_rsa(qm); if (ret) return ret; ret = hpre_register_dh(qm); if (ret) goto unreg_rsa; ret = hpre_register_ecdh(qm); if (ret) goto unreg_dh; ret = hpre_register_x25519(qm); if (ret) goto unreg_ecdh; return ret; unreg_ecdh: hpre_unregister_ecdh(qm); unreg_dh: hpre_unregister_dh(qm); unreg_rsa: hpre_unregister_rsa(qm); return ret; } void hpre_algs_unregister(struct hisi_qm *qm) { hpre_unregister_x25519(qm); hpre_unregister_ecdh(qm); hpre_unregister_dh(qm); hpre_unregister_rsa(qm); }
linux-master
drivers/crypto/hisilicon/hpre/hpre_crypto.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2018-2019 HiSilicon Limited. */ #include <linux/acpi.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/topology.h> #include <linux/uacce.h> #include "hpre.h" #define HPRE_QM_ABNML_INT_MASK 0x100004 #define HPRE_CTRL_CNT_CLR_CE_BIT BIT(0) #define HPRE_COMM_CNT_CLR_CE 0x0 #define HPRE_CTRL_CNT_CLR_CE 0x301000 #define HPRE_FSM_MAX_CNT 0x301008 #define HPRE_VFG_AXQOS 0x30100c #define HPRE_VFG_AXCACHE 0x301010 #define HPRE_RDCHN_INI_CFG 0x301014 #define HPRE_AWUSR_FP_CFG 0x301018 #define HPRE_BD_ENDIAN 0x301020 #define HPRE_ECC_BYPASS 0x301024 #define HPRE_RAS_WIDTH_CFG 0x301028 #define HPRE_POISON_BYPASS 0x30102c #define HPRE_BD_ARUSR_CFG 0x301030 #define HPRE_BD_AWUSR_CFG 0x301034 #define HPRE_TYPES_ENB 0x301038 #define HPRE_RSA_ENB BIT(0) #define HPRE_ECC_ENB BIT(1) #define HPRE_DATA_RUSER_CFG 0x30103c #define HPRE_DATA_WUSER_CFG 0x301040 #define HPRE_INT_MASK 0x301400 #define HPRE_INT_STATUS 0x301800 #define HPRE_HAC_INT_MSK 0x301400 #define HPRE_HAC_RAS_CE_ENB 0x301410 #define HPRE_HAC_RAS_NFE_ENB 0x301414 #define HPRE_HAC_RAS_FE_ENB 0x301418 #define HPRE_HAC_INT_SET 0x301500 #define HPRE_RNG_TIMEOUT_NUM 0x301A34 #define HPRE_CORE_INT_ENABLE 0 #define HPRE_CORE_INT_DISABLE GENMASK(21, 0) #define HPRE_RDCHN_INI_ST 0x301a00 #define HPRE_CLSTR_BASE 0x302000 #define HPRE_CORE_EN_OFFSET 0x04 #define HPRE_CORE_INI_CFG_OFFSET 0x20 #define HPRE_CORE_INI_STATUS_OFFSET 0x80 #define HPRE_CORE_HTBT_WARN_OFFSET 0x8c #define HPRE_CORE_IS_SCHD_OFFSET 0x90 #define HPRE_RAS_CE_ENB 0x301410 #define HPRE_RAS_NFE_ENB 0x301414 #define HPRE_RAS_FE_ENB 0x301418 #define HPRE_OOO_SHUTDOWN_SEL 0x301a3c #define HPRE_HAC_RAS_FE_ENABLE 0 #define HPRE_CORE_ENB (HPRE_CLSTR_BASE + HPRE_CORE_EN_OFFSET) #define HPRE_CORE_INI_CFG (HPRE_CLSTR_BASE + HPRE_CORE_INI_CFG_OFFSET) #define HPRE_CORE_INI_STATUS (HPRE_CLSTR_BASE + HPRE_CORE_INI_STATUS_OFFSET) #define HPRE_HAC_ECC1_CNT 0x301a04 #define HPRE_HAC_ECC2_CNT 0x301a08 #define HPRE_HAC_SOURCE_INT 0x301600 #define HPRE_CLSTR_ADDR_INTRVL 0x1000 #define HPRE_CLUSTER_INQURY 0x100 #define HPRE_CLSTR_ADDR_INQRY_RSLT 0x104 #define HPRE_TIMEOUT_ABNML_BIT 6 #define HPRE_PASID_EN_BIT 9 #define HPRE_REG_RD_INTVRL_US 10 #define HPRE_REG_RD_TMOUT_US 1000 #define HPRE_DBGFS_VAL_MAX_LEN 20 #define PCI_DEVICE_ID_HUAWEI_HPRE_PF 0xa258 #define HPRE_QM_USR_CFG_MASK GENMASK(31, 1) #define HPRE_QM_AXI_CFG_MASK GENMASK(15, 0) #define HPRE_QM_VFG_AX_MASK GENMASK(7, 0) #define HPRE_BD_USR_MASK GENMASK(1, 0) #define HPRE_PREFETCH_CFG 0x301130 #define HPRE_SVA_PREFTCH_DFX 0x30115C #define HPRE_PREFETCH_ENABLE (~(BIT(0) | BIT(30))) #define HPRE_PREFETCH_DISABLE BIT(30) #define HPRE_SVA_DISABLE_READY (BIT(4) | BIT(8)) /* clock gate */ #define HPRE_CLKGATE_CTL 0x301a10 #define HPRE_PEH_CFG_AUTO_GATE 0x301a2c #define HPRE_CLUSTER_DYN_CTL 0x302010 #define HPRE_CORE_SHB_CFG 0x302088 #define HPRE_CLKGATE_CTL_EN BIT(0) #define HPRE_PEH_CFG_AUTO_GATE_EN BIT(0) #define HPRE_CLUSTER_DYN_CTL_EN BIT(0) #define HPRE_CORE_GATE_EN (BIT(30) | BIT(31)) #define HPRE_AM_OOO_SHUTDOWN_ENB 0x301044 #define HPRE_AM_OOO_SHUTDOWN_ENABLE BIT(0) #define HPRE_WR_MSI_PORT BIT(2) #define HPRE_CORE_ECC_2BIT_ERR BIT(1) #define HPRE_OOO_ECC_2BIT_ERR BIT(5) #define HPRE_QM_BME_FLR BIT(7) #define HPRE_QM_PM_FLR BIT(11) #define HPRE_QM_SRIOV_FLR BIT(12) #define HPRE_SHAPER_TYPE_RATE 640 #define HPRE_VIA_MSI_DSM 1 #define HPRE_SQE_MASK_OFFSET 8 #define HPRE_SQE_MASK_LEN 24 #define HPRE_DFX_BASE 0x301000 #define HPRE_DFX_COMMON1 0x301400 #define HPRE_DFX_COMMON2 0x301A00 #define HPRE_DFX_CORE 0x302000 #define HPRE_DFX_BASE_LEN 0x55 #define HPRE_DFX_COMMON1_LEN 0x41 #define HPRE_DFX_COMMON2_LEN 0xE #define HPRE_DFX_CORE_LEN 0x43 #define HPRE_DEV_ALG_MAX_LEN 256 static const char hpre_name[] = "hisi_hpre"; static struct dentry *hpre_debugfs_root; static const struct pci_device_id hpre_dev_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_PF) }, { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_HPRE_VF) }, { 0, } }; MODULE_DEVICE_TABLE(pci, hpre_dev_ids); struct hpre_hw_error { u32 int_msk; const char *msg; }; struct hpre_dev_alg { u32 alg_msk; const char *alg; }; static const struct hpre_dev_alg hpre_dev_algs[] = { { .alg_msk = BIT(0), .alg = "rsa\n" }, { .alg_msk = BIT(1), .alg = "dh\n" }, { .alg_msk = BIT(2), .alg = "ecdh\n" }, { .alg_msk = BIT(3), .alg = "ecdsa\n" }, { .alg_msk = BIT(4), .alg = "sm2\n" }, { .alg_msk = BIT(5), .alg = "x25519\n" }, { .alg_msk = BIT(6), .alg = "x448\n" }, { /* sentinel */ } }; static struct hisi_qm_list hpre_devices = { .register_to_crypto = hpre_algs_register, .unregister_from_crypto = hpre_algs_unregister, }; static const char * const hpre_debug_file_name[] = { [HPRE_CLEAR_ENABLE] = "rdclr_en", [HPRE_CLUSTER_CTRL] = "cluster_ctrl", }; enum hpre_cap_type { HPRE_QM_NFE_MASK_CAP, HPRE_QM_RESET_MASK_CAP, HPRE_QM_OOO_SHUTDOWN_MASK_CAP, HPRE_QM_CE_MASK_CAP, HPRE_NFE_MASK_CAP, HPRE_RESET_MASK_CAP, HPRE_OOO_SHUTDOWN_MASK_CAP, HPRE_CE_MASK_CAP, HPRE_CLUSTER_NUM_CAP, HPRE_CORE_TYPE_NUM_CAP, HPRE_CORE_NUM_CAP, HPRE_CLUSTER_CORE_NUM_CAP, HPRE_CORE_ENABLE_BITMAP_CAP, HPRE_DRV_ALG_BITMAP_CAP, HPRE_DEV_ALG_BITMAP_CAP, HPRE_CORE1_ALG_BITMAP_CAP, HPRE_CORE2_ALG_BITMAP_CAP, HPRE_CORE3_ALG_BITMAP_CAP, HPRE_CORE4_ALG_BITMAP_CAP, HPRE_CORE5_ALG_BITMAP_CAP, HPRE_CORE6_ALG_BITMAP_CAP, HPRE_CORE7_ALG_BITMAP_CAP, HPRE_CORE8_ALG_BITMAP_CAP, HPRE_CORE9_ALG_BITMAP_CAP, HPRE_CORE10_ALG_BITMAP_CAP }; static const struct hisi_qm_cap_info hpre_basic_info[] = { {HPRE_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C37, 0x7C37}, {HPRE_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC37, 0x6C37}, {HPRE_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C37}, {HPRE_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, {HPRE_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0x1FFFFFE}, {HPRE_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x3FFFFE, 0xBFFFFE}, {HPRE_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x22, 0xBFFFFE}, {HPRE_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, {HPRE_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x0, 0x4, 0x1}, {HPRE_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x0, 0x2, 0x2}, {HPRE_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x0, 0x8, 0xA}, {HPRE_CLUSTER_CORE_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x0, 0x2, 0xA}, {HPRE_CORE_ENABLE_BITMAP_CAP, 0x3140, 0, GENMASK(31, 0), 0x0, 0xF, 0x3FF}, {HPRE_DRV_ALG_BITMAP_CAP, 0x3144, 0, GENMASK(31, 0), 0x0, 0x03, 0x27}, {HPRE_DEV_ALG_BITMAP_CAP, 0x3148, 0, GENMASK(31, 0), 0x0, 0x03, 0x7F}, {HPRE_CORE1_ALG_BITMAP_CAP, 0x314c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, {HPRE_CORE2_ALG_BITMAP_CAP, 0x3150, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, {HPRE_CORE3_ALG_BITMAP_CAP, 0x3154, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, {HPRE_CORE4_ALG_BITMAP_CAP, 0x3158, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, {HPRE_CORE5_ALG_BITMAP_CAP, 0x315c, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, {HPRE_CORE6_ALG_BITMAP_CAP, 0x3160, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, {HPRE_CORE7_ALG_BITMAP_CAP, 0x3164, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, {HPRE_CORE8_ALG_BITMAP_CAP, 0x3168, 0, GENMASK(31, 0), 0x0, 0x7F, 0x7F}, {HPRE_CORE9_ALG_BITMAP_CAP, 0x316c, 0, GENMASK(31, 0), 0x0, 0x10, 0x10}, {HPRE_CORE10_ALG_BITMAP_CAP, 0x3170, 0, GENMASK(31, 0), 0x0, 0x10, 0x10} }; static const struct hpre_hw_error hpre_hw_errors[] = { { .int_msk = BIT(0), .msg = "core_ecc_1bit_err_int_set" }, { .int_msk = BIT(1), .msg = "core_ecc_2bit_err_int_set" }, { .int_msk = BIT(2), .msg = "dat_wb_poison_int_set" }, { .int_msk = BIT(3), .msg = "dat_rd_poison_int_set" }, { .int_msk = BIT(4), .msg = "bd_rd_poison_int_set" }, { .int_msk = BIT(5), .msg = "ooo_ecc_2bit_err_int_set" }, { .int_msk = BIT(6), .msg = "cluster1_shb_timeout_int_set" }, { .int_msk = BIT(7), .msg = "cluster2_shb_timeout_int_set" }, { .int_msk = BIT(8), .msg = "cluster3_shb_timeout_int_set" }, { .int_msk = BIT(9), .msg = "cluster4_shb_timeout_int_set" }, { .int_msk = GENMASK(15, 10), .msg = "ooo_rdrsp_err_int_set" }, { .int_msk = GENMASK(21, 16), .msg = "ooo_wrrsp_err_int_set" }, { .int_msk = BIT(22), .msg = "pt_rng_timeout_int_set" }, { .int_msk = BIT(23), .msg = "sva_fsm_timeout_int_set" }, { .int_msk = BIT(24), .msg = "sva_int_set" }, { /* sentinel */ } }; static const u64 hpre_cluster_offsets[] = { [HPRE_CLUSTER0] = HPRE_CLSTR_BASE + HPRE_CLUSTER0 * HPRE_CLSTR_ADDR_INTRVL, [HPRE_CLUSTER1] = HPRE_CLSTR_BASE + HPRE_CLUSTER1 * HPRE_CLSTR_ADDR_INTRVL, [HPRE_CLUSTER2] = HPRE_CLSTR_BASE + HPRE_CLUSTER2 * HPRE_CLSTR_ADDR_INTRVL, [HPRE_CLUSTER3] = HPRE_CLSTR_BASE + HPRE_CLUSTER3 * HPRE_CLSTR_ADDR_INTRVL, }; static const struct debugfs_reg32 hpre_cluster_dfx_regs[] = { {"CORES_EN_STATUS ", HPRE_CORE_EN_OFFSET}, {"CORES_INI_CFG ", HPRE_CORE_INI_CFG_OFFSET}, {"CORES_INI_STATUS ", HPRE_CORE_INI_STATUS_OFFSET}, {"CORES_HTBT_WARN ", HPRE_CORE_HTBT_WARN_OFFSET}, {"CORES_IS_SCHD ", HPRE_CORE_IS_SCHD_OFFSET}, }; static const struct debugfs_reg32 hpre_com_dfx_regs[] = { {"READ_CLR_EN ", HPRE_CTRL_CNT_CLR_CE}, {"AXQOS ", HPRE_VFG_AXQOS}, {"AWUSR_CFG ", HPRE_AWUSR_FP_CFG}, {"BD_ENDIAN ", HPRE_BD_ENDIAN}, {"ECC_CHECK_CTRL ", HPRE_ECC_BYPASS}, {"RAS_INT_WIDTH ", HPRE_RAS_WIDTH_CFG}, {"POISON_BYPASS ", HPRE_POISON_BYPASS}, {"BD_ARUSER ", HPRE_BD_ARUSR_CFG}, {"BD_AWUSER ", HPRE_BD_AWUSR_CFG}, {"DATA_ARUSER ", HPRE_DATA_RUSER_CFG}, {"DATA_AWUSER ", HPRE_DATA_WUSER_CFG}, {"INT_STATUS ", HPRE_INT_STATUS}, {"INT_MASK ", HPRE_HAC_INT_MSK}, {"RAS_CE_ENB ", HPRE_HAC_RAS_CE_ENB}, {"RAS_NFE_ENB ", HPRE_HAC_RAS_NFE_ENB}, {"RAS_FE_ENB ", HPRE_HAC_RAS_FE_ENB}, {"INT_SET ", HPRE_HAC_INT_SET}, {"RNG_TIMEOUT_NUM ", HPRE_RNG_TIMEOUT_NUM}, }; static const char *hpre_dfx_files[HPRE_DFX_FILE_NUM] = { "send_cnt", "recv_cnt", "send_fail_cnt", "send_busy_cnt", "over_thrhld_cnt", "overtime_thrhld", "invalid_req_cnt" }; /* define the HPRE's dfx regs region and region length */ static struct dfx_diff_registers hpre_diff_regs[] = { { .reg_offset = HPRE_DFX_BASE, .reg_len = HPRE_DFX_BASE_LEN, }, { .reg_offset = HPRE_DFX_COMMON1, .reg_len = HPRE_DFX_COMMON1_LEN, }, { .reg_offset = HPRE_DFX_COMMON2, .reg_len = HPRE_DFX_COMMON2_LEN, }, { .reg_offset = HPRE_DFX_CORE, .reg_len = HPRE_DFX_CORE_LEN, }, }; bool hpre_check_alg_support(struct hisi_qm *qm, u32 alg) { u32 cap_val; cap_val = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DRV_ALG_BITMAP_CAP, qm->cap_ver); if (alg & cap_val) return true; return false; } static int hpre_set_qm_algs(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; char *algs, *ptr; u32 alg_msk; int i; if (!qm->use_sva) return 0; algs = devm_kzalloc(dev, HPRE_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); if (!algs) return -ENOMEM; alg_msk = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_DEV_ALG_BITMAP_CAP, qm->cap_ver); for (i = 0; i < ARRAY_SIZE(hpre_dev_algs); i++) if (alg_msk & hpre_dev_algs[i].alg_msk) strcat(algs, hpre_dev_algs[i].alg); ptr = strrchr(algs, '\n'); if (ptr) *ptr = '\0'; qm->uacce->algs = algs; return 0; } static int hpre_diff_regs_show(struct seq_file *s, void *unused) { struct hisi_qm *qm = s->private; hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, ARRAY_SIZE(hpre_diff_regs)); return 0; } DEFINE_SHOW_ATTRIBUTE(hpre_diff_regs); static int hpre_com_regs_show(struct seq_file *s, void *unused) { hisi_qm_regs_dump(s, s->private); return 0; } DEFINE_SHOW_ATTRIBUTE(hpre_com_regs); static int hpre_cluster_regs_show(struct seq_file *s, void *unused) { hisi_qm_regs_dump(s, s->private); return 0; } DEFINE_SHOW_ATTRIBUTE(hpre_cluster_regs); static const struct kernel_param_ops hpre_uacce_mode_ops = { .set = uacce_mode_set, .get = param_get_int, }; /* * uacce_mode = 0 means hpre only register to crypto, * uacce_mode = 1 means hpre both register to crypto and uacce. */ static u32 uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &hpre_uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); static int pf_q_num_set(const char *val, const struct kernel_param *kp) { return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_HPRE_PF); } static const struct kernel_param_ops hpre_pf_q_num_ops = { .set = pf_q_num_set, .get = param_get_int, }; static u32 pf_q_num = HPRE_PF_DEF_Q_NUM; module_param_cb(pf_q_num, &hpre_pf_q_num_ops, &pf_q_num, 0444); MODULE_PARM_DESC(pf_q_num, "Number of queues in PF of CS(2-1024)"); static const struct kernel_param_ops vfs_num_ops = { .set = vfs_num_set, .get = param_get_int, }; static u32 vfs_num; module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); static inline int hpre_cluster_num(struct hisi_qm *qm) { return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CLUSTER_NUM_CAP, qm->cap_ver); } static inline int hpre_cluster_core_mask(struct hisi_qm *qm) { return hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CORE_ENABLE_BITMAP_CAP, qm->cap_ver); } struct hisi_qp *hpre_create_qp(u8 type) { int node = cpu_to_node(smp_processor_id()); struct hisi_qp *qp = NULL; int ret; if (type != HPRE_V2_ALG_TYPE && type != HPRE_V3_ECC_ALG_TYPE) return NULL; /* * type: 0 - RSA/DH. algorithm supported in V2, * 1 - ECC algorithm in V3. */ ret = hisi_qm_alloc_qps_node(&hpre_devices, 1, type, node, &qp); if (!ret) return qp; return NULL; } static void hpre_config_pasid(struct hisi_qm *qm) { u32 val1, val2; if (qm->ver >= QM_HW_V3) return; val1 = readl_relaxed(qm->io_base + HPRE_DATA_RUSER_CFG); val2 = readl_relaxed(qm->io_base + HPRE_DATA_WUSER_CFG); if (qm->use_sva) { val1 |= BIT(HPRE_PASID_EN_BIT); val2 |= BIT(HPRE_PASID_EN_BIT); } else { val1 &= ~BIT(HPRE_PASID_EN_BIT); val2 &= ~BIT(HPRE_PASID_EN_BIT); } writel_relaxed(val1, qm->io_base + HPRE_DATA_RUSER_CFG); writel_relaxed(val2, qm->io_base + HPRE_DATA_WUSER_CFG); } static int hpre_cfg_by_dsm(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; union acpi_object *obj; guid_t guid; if (guid_parse("b06b81ab-0134-4a45-9b0c-483447b95fa7", &guid)) { dev_err(dev, "Hpre GUID failed\n"); return -EINVAL; } /* Switch over to MSI handling due to non-standard PCI implementation */ obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &guid, 0, HPRE_VIA_MSI_DSM, NULL); if (!obj) { dev_err(dev, "ACPI handle failed!\n"); return -EIO; } ACPI_FREE(obj); return 0; } static int hpre_set_cluster(struct hisi_qm *qm) { u32 cluster_core_mask = hpre_cluster_core_mask(qm); u8 clusters_num = hpre_cluster_num(qm); struct device *dev = &qm->pdev->dev; unsigned long offset; u32 val = 0; int ret, i; for (i = 0; i < clusters_num; i++) { offset = i * HPRE_CLSTR_ADDR_INTRVL; /* clusters initiating */ writel(cluster_core_mask, qm->io_base + offset + HPRE_CORE_ENB); writel(0x1, qm->io_base + offset + HPRE_CORE_INI_CFG); ret = readl_relaxed_poll_timeout(qm->io_base + offset + HPRE_CORE_INI_STATUS, val, ((val & cluster_core_mask) == cluster_core_mask), HPRE_REG_RD_INTVRL_US, HPRE_REG_RD_TMOUT_US); if (ret) { dev_err(dev, "cluster %d int st status timeout!\n", i); return -ETIMEDOUT; } } return 0; } /* * For Kunpeng 920, we should disable FLR triggered by hardware (BME/PM/SRIOV). * Or it may stay in D3 state when we bind and unbind hpre quickly, * as it does FLR triggered by hardware. */ static void disable_flr_of_bme(struct hisi_qm *qm) { u32 val; val = readl(qm->io_base + QM_PEH_AXUSER_CFG); val &= ~(HPRE_QM_BME_FLR | HPRE_QM_SRIOV_FLR); val |= HPRE_QM_PM_FLR; writel(val, qm->io_base + QM_PEH_AXUSER_CFG); writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); } static void hpre_open_sva_prefetch(struct hisi_qm *qm) { u32 val; int ret; if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; /* Enable prefetch */ val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); val &= HPRE_PREFETCH_ENABLE; writel(val, qm->io_base + HPRE_PREFETCH_CFG); ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_PREFETCH_CFG, val, !(val & HPRE_PREFETCH_DISABLE), HPRE_REG_RD_INTVRL_US, HPRE_REG_RD_TMOUT_US); if (ret) pci_err(qm->pdev, "failed to open sva prefetch\n"); } static void hpre_close_sva_prefetch(struct hisi_qm *qm) { u32 val; int ret; if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; val = readl_relaxed(qm->io_base + HPRE_PREFETCH_CFG); val |= HPRE_PREFETCH_DISABLE; writel(val, qm->io_base + HPRE_PREFETCH_CFG); ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_SVA_PREFTCH_DFX, val, !(val & HPRE_SVA_DISABLE_READY), HPRE_REG_RD_INTVRL_US, HPRE_REG_RD_TMOUT_US); if (ret) pci_err(qm->pdev, "failed to close sva prefetch\n"); } static void hpre_enable_clock_gate(struct hisi_qm *qm) { u32 val; if (qm->ver < QM_HW_V3) return; val = readl(qm->io_base + HPRE_CLKGATE_CTL); val |= HPRE_CLKGATE_CTL_EN; writel(val, qm->io_base + HPRE_CLKGATE_CTL); val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE); val |= HPRE_PEH_CFG_AUTO_GATE_EN; writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE); val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL); val |= HPRE_CLUSTER_DYN_CTL_EN; writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL); val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG); val |= HPRE_CORE_GATE_EN; writel(val, qm->io_base + HPRE_CORE_SHB_CFG); } static void hpre_disable_clock_gate(struct hisi_qm *qm) { u32 val; if (qm->ver < QM_HW_V3) return; val = readl(qm->io_base + HPRE_CLKGATE_CTL); val &= ~HPRE_CLKGATE_CTL_EN; writel(val, qm->io_base + HPRE_CLKGATE_CTL); val = readl(qm->io_base + HPRE_PEH_CFG_AUTO_GATE); val &= ~HPRE_PEH_CFG_AUTO_GATE_EN; writel(val, qm->io_base + HPRE_PEH_CFG_AUTO_GATE); val = readl(qm->io_base + HPRE_CLUSTER_DYN_CTL); val &= ~HPRE_CLUSTER_DYN_CTL_EN; writel(val, qm->io_base + HPRE_CLUSTER_DYN_CTL); val = readl_relaxed(qm->io_base + HPRE_CORE_SHB_CFG); val &= ~HPRE_CORE_GATE_EN; writel(val, qm->io_base + HPRE_CORE_SHB_CFG); } static int hpre_set_user_domain_and_cache(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; u32 val; int ret; /* disabel dynamic clock gate before sram init */ hpre_disable_clock_gate(qm); writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_ARUSER_M_CFG_ENABLE); writel(HPRE_QM_USR_CFG_MASK, qm->io_base + QM_AWUSER_M_CFG_ENABLE); writel_relaxed(HPRE_QM_AXI_CFG_MASK, qm->io_base + QM_AXI_M_CFG); /* HPRE need more time, we close this interrupt */ val = readl_relaxed(qm->io_base + HPRE_QM_ABNML_INT_MASK); val |= BIT(HPRE_TIMEOUT_ABNML_BIT); writel_relaxed(val, qm->io_base + HPRE_QM_ABNML_INT_MASK); if (qm->ver >= QM_HW_V3) writel(HPRE_RSA_ENB | HPRE_ECC_ENB, qm->io_base + HPRE_TYPES_ENB); else writel(HPRE_RSA_ENB, qm->io_base + HPRE_TYPES_ENB); writel(HPRE_QM_VFG_AX_MASK, qm->io_base + HPRE_VFG_AXCACHE); writel(0x0, qm->io_base + HPRE_BD_ENDIAN); writel(0x0, qm->io_base + HPRE_INT_MASK); writel(0x0, qm->io_base + HPRE_POISON_BYPASS); writel(0x0, qm->io_base + HPRE_COMM_CNT_CLR_CE); writel(0x0, qm->io_base + HPRE_ECC_BYPASS); writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_ARUSR_CFG); writel(HPRE_BD_USR_MASK, qm->io_base + HPRE_BD_AWUSR_CFG); writel(0x1, qm->io_base + HPRE_RDCHN_INI_CFG); ret = readl_relaxed_poll_timeout(qm->io_base + HPRE_RDCHN_INI_ST, val, val & BIT(0), HPRE_REG_RD_INTVRL_US, HPRE_REG_RD_TMOUT_US); if (ret) { dev_err(dev, "read rd channel timeout fail!\n"); return -ETIMEDOUT; } ret = hpre_set_cluster(qm); if (ret) return -ETIMEDOUT; /* This setting is only needed by Kunpeng 920. */ if (qm->ver == QM_HW_V2) { ret = hpre_cfg_by_dsm(qm); if (ret) return ret; disable_flr_of_bme(qm); } /* Config data buffer pasid needed by Kunpeng 920 */ hpre_config_pasid(qm); hpre_enable_clock_gate(qm); return ret; } static void hpre_cnt_regs_clear(struct hisi_qm *qm) { u8 clusters_num = hpre_cluster_num(qm); unsigned long offset; int i; /* clear clusterX/cluster_ctrl */ for (i = 0; i < clusters_num; i++) { offset = HPRE_CLSTR_BASE + i * HPRE_CLSTR_ADDR_INTRVL; writel(0x0, qm->io_base + offset + HPRE_CLUSTER_INQURY); } /* clear rdclr_en */ writel(0x0, qm->io_base + HPRE_CTRL_CNT_CLR_CE); hisi_qm_debug_regs_clear(qm); } static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable) { u32 val1, val2; val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); if (enable) { val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE; val2 = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); } else { val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE; val2 = 0x0; } if (qm->ver > QM_HW_V2) writel(val2, qm->io_base + HPRE_OOO_SHUTDOWN_SEL); writel(val1, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); } static void hpre_hw_error_disable(struct hisi_qm *qm) { u32 ce, nfe; ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); /* disable hpre hw error interrupts */ writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK); /* disable HPRE block master OOO when nfe occurs on Kunpeng930 */ hpre_master_ooo_ctrl(qm, false); } static void hpre_hw_error_enable(struct hisi_qm *qm) { u32 ce, nfe; ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver); nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); /* clear HPRE hw error source if having */ writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT); /* configure error type */ writel(ce, qm->io_base + HPRE_RAS_CE_ENB); writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB); /* enable HPRE block master OOO when nfe occurs on Kunpeng930 */ hpre_master_ooo_ctrl(qm, true); /* enable hpre hw error interrupts */ writel(HPRE_CORE_INT_ENABLE, qm->io_base + HPRE_INT_MASK); } static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file) { struct hpre *hpre = container_of(file->debug, struct hpre, debug); return &hpre->qm; } static u32 hpre_clear_enable_read(struct hpre_debugfs_file *file) { struct hisi_qm *qm = hpre_file_to_qm(file); return readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) & HPRE_CTRL_CNT_CLR_CE_BIT; } static int hpre_clear_enable_write(struct hpre_debugfs_file *file, u32 val) { struct hisi_qm *qm = hpre_file_to_qm(file); u32 tmp; if (val != 1 && val != 0) return -EINVAL; tmp = (readl(qm->io_base + HPRE_CTRL_CNT_CLR_CE) & ~HPRE_CTRL_CNT_CLR_CE_BIT) | val; writel(tmp, qm->io_base + HPRE_CTRL_CNT_CLR_CE); return 0; } static u32 hpre_cluster_inqry_read(struct hpre_debugfs_file *file) { struct hisi_qm *qm = hpre_file_to_qm(file); int cluster_index = file->index - HPRE_CLUSTER_CTRL; unsigned long offset = HPRE_CLSTR_BASE + cluster_index * HPRE_CLSTR_ADDR_INTRVL; return readl(qm->io_base + offset + HPRE_CLSTR_ADDR_INQRY_RSLT); } static void hpre_cluster_inqry_write(struct hpre_debugfs_file *file, u32 val) { struct hisi_qm *qm = hpre_file_to_qm(file); int cluster_index = file->index - HPRE_CLUSTER_CTRL; unsigned long offset = HPRE_CLSTR_BASE + cluster_index * HPRE_CLSTR_ADDR_INTRVL; writel(val, qm->io_base + offset + HPRE_CLUSTER_INQURY); } static ssize_t hpre_ctrl_debug_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct hpre_debugfs_file *file = filp->private_data; struct hisi_qm *qm = hpre_file_to_qm(file); char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; u32 val; int ret; ret = hisi_qm_get_dfx_access(qm); if (ret) return ret; spin_lock_irq(&file->lock); switch (file->type) { case HPRE_CLEAR_ENABLE: val = hpre_clear_enable_read(file); break; case HPRE_CLUSTER_CTRL: val = hpre_cluster_inqry_read(file); break; default: goto err_input; } spin_unlock_irq(&file->lock); hisi_qm_put_dfx_access(qm); ret = snprintf(tbuf, HPRE_DBGFS_VAL_MAX_LEN, "%u\n", val); return simple_read_from_buffer(buf, count, pos, tbuf, ret); err_input: spin_unlock_irq(&file->lock); hisi_qm_put_dfx_access(qm); return -EINVAL; } static ssize_t hpre_ctrl_debug_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct hpre_debugfs_file *file = filp->private_data; struct hisi_qm *qm = hpre_file_to_qm(file); char tbuf[HPRE_DBGFS_VAL_MAX_LEN]; unsigned long val; int len, ret; if (*pos != 0) return 0; if (count >= HPRE_DBGFS_VAL_MAX_LEN) return -ENOSPC; len = simple_write_to_buffer(tbuf, HPRE_DBGFS_VAL_MAX_LEN - 1, pos, buf, count); if (len < 0) return len; tbuf[len] = '\0'; if (kstrtoul(tbuf, 0, &val)) return -EFAULT; ret = hisi_qm_get_dfx_access(qm); if (ret) return ret; spin_lock_irq(&file->lock); switch (file->type) { case HPRE_CLEAR_ENABLE: ret = hpre_clear_enable_write(file, val); if (ret) goto err_input; break; case HPRE_CLUSTER_CTRL: hpre_cluster_inqry_write(file, val); break; default: ret = -EINVAL; goto err_input; } ret = count; err_input: spin_unlock_irq(&file->lock); hisi_qm_put_dfx_access(qm); return ret; } static const struct file_operations hpre_ctrl_debug_fops = { .owner = THIS_MODULE, .open = simple_open, .read = hpre_ctrl_debug_read, .write = hpre_ctrl_debug_write, }; static int hpre_debugfs_atomic64_get(void *data, u64 *val) { struct hpre_dfx *dfx_item = data; *val = atomic64_read(&dfx_item->value); return 0; } static int hpre_debugfs_atomic64_set(void *data, u64 val) { struct hpre_dfx *dfx_item = data; struct hpre_dfx *hpre_dfx = NULL; if (dfx_item->type == HPRE_OVERTIME_THRHLD) { hpre_dfx = dfx_item - HPRE_OVERTIME_THRHLD; atomic64_set(&hpre_dfx[HPRE_OVER_THRHLD_CNT].value, 0); } else if (val) { return -EINVAL; } atomic64_set(&dfx_item->value, val); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(hpre_atomic64_ops, hpre_debugfs_atomic64_get, hpre_debugfs_atomic64_set, "%llu\n"); static int hpre_create_debugfs_file(struct hisi_qm *qm, struct dentry *dir, enum hpre_ctrl_dbgfs_file type, int indx) { struct hpre *hpre = container_of(qm, struct hpre, qm); struct hpre_debug *dbg = &hpre->debug; struct dentry *file_dir; if (dir) file_dir = dir; else file_dir = qm->debug.debug_root; if (type >= HPRE_DEBUG_FILE_NUM) return -EINVAL; spin_lock_init(&dbg->files[indx].lock); dbg->files[indx].debug = dbg; dbg->files[indx].type = type; dbg->files[indx].index = indx; debugfs_create_file(hpre_debug_file_name[type], 0600, file_dir, dbg->files + indx, &hpre_ctrl_debug_fops); return 0; } static int hpre_pf_comm_regs_debugfs_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; struct debugfs_regset32 *regset; regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) return -ENOMEM; regset->regs = hpre_com_dfx_regs; regset->nregs = ARRAY_SIZE(hpre_com_dfx_regs); regset->base = qm->io_base; regset->dev = dev; debugfs_create_file("regs", 0444, qm->debug.debug_root, regset, &hpre_com_regs_fops); return 0; } static int hpre_cluster_debugfs_init(struct hisi_qm *qm) { u8 clusters_num = hpre_cluster_num(qm); struct device *dev = &qm->pdev->dev; char buf[HPRE_DBGFS_VAL_MAX_LEN]; struct debugfs_regset32 *regset; struct dentry *tmp_d; int i, ret; for (i = 0; i < clusters_num; i++) { ret = snprintf(buf, HPRE_DBGFS_VAL_MAX_LEN, "cluster%d", i); if (ret < 0) return -EINVAL; tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) return -ENOMEM; regset->regs = hpre_cluster_dfx_regs; regset->nregs = ARRAY_SIZE(hpre_cluster_dfx_regs); regset->base = qm->io_base + hpre_cluster_offsets[i]; regset->dev = dev; debugfs_create_file("regs", 0444, tmp_d, regset, &hpre_cluster_regs_fops); ret = hpre_create_debugfs_file(qm, tmp_d, HPRE_CLUSTER_CTRL, i + HPRE_CLUSTER_CTRL); if (ret) return ret; } return 0; } static int hpre_ctrl_debug_init(struct hisi_qm *qm) { int ret; ret = hpre_create_debugfs_file(qm, NULL, HPRE_CLEAR_ENABLE, HPRE_CLEAR_ENABLE); if (ret) return ret; ret = hpre_pf_comm_regs_debugfs_init(qm); if (ret) return ret; return hpre_cluster_debugfs_init(qm); } static void hpre_dfx_debug_init(struct hisi_qm *qm) { struct dfx_diff_registers *hpre_regs = qm->debug.acc_diff_regs; struct hpre *hpre = container_of(qm, struct hpre, qm); struct hpre_dfx *dfx = hpre->debug.dfx; struct dentry *parent; int i; parent = debugfs_create_dir("hpre_dfx", qm->debug.debug_root); for (i = 0; i < HPRE_DFX_FILE_NUM; i++) { dfx[i].type = i; debugfs_create_file(hpre_dfx_files[i], 0644, parent, &dfx[i], &hpre_atomic64_ops); } if (qm->fun_type == QM_HW_PF && hpre_regs) debugfs_create_file("diff_regs", 0444, parent, qm, &hpre_diff_regs_fops); } static int hpre_debugfs_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; int ret; qm->debug.debug_root = debugfs_create_dir(dev_name(dev), hpre_debugfs_root); qm->debug.sqe_mask_offset = HPRE_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = HPRE_SQE_MASK_LEN; ret = hisi_qm_regs_debugfs_init(qm, hpre_diff_regs, ARRAY_SIZE(hpre_diff_regs)); if (ret) { dev_warn(dev, "Failed to init HPRE diff regs!\n"); goto debugfs_remove; } hisi_qm_debug_init(qm); if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) { ret = hpre_ctrl_debug_init(qm); if (ret) goto failed_to_create; } hpre_dfx_debug_init(qm); return 0; failed_to_create: hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); debugfs_remove: debugfs_remove_recursive(qm->debug.debug_root); return ret; } static void hpre_debugfs_exit(struct hisi_qm *qm) { hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hpre_diff_regs)); debugfs_remove_recursive(qm->debug.debug_root); } static int hpre_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { int ret; if (pdev->revision == QM_HW_V1) { pci_warn(pdev, "HPRE version 1 is not supported!\n"); return -EINVAL; } qm->mode = uacce_mode; qm->pdev = pdev; qm->ver = pdev->revision; qm->sqe_size = HPRE_SQE_SIZE; qm->dev_name = hpre_name; qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_HPRE_PF) ? QM_HW_PF : QM_HW_VF; if (qm->fun_type == QM_HW_PF) { qm->qp_base = HPRE_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &hpre_devices; } ret = hisi_qm_init(qm); if (ret) { pci_err(pdev, "Failed to init hpre qm configures!\n"); return ret; } ret = hpre_set_qm_algs(qm); if (ret) { pci_err(pdev, "Failed to set hpre algs!\n"); hisi_qm_uninit(qm); } return ret; } static int hpre_show_last_regs_init(struct hisi_qm *qm) { int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs); int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs); u8 clusters_num = hpre_cluster_num(qm); struct qm_debug *debug = &qm->debug; void __iomem *io_base; int i, j, idx; debug->last_words = kcalloc(cluster_dfx_regs_num * clusters_num + com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); if (!debug->last_words) return -ENOMEM; for (i = 0; i < com_dfx_regs_num; i++) debug->last_words[i] = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset); for (i = 0; i < clusters_num; i++) { io_base = qm->io_base + hpre_cluster_offsets[i]; for (j = 0; j < cluster_dfx_regs_num; j++) { idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j; debug->last_words[idx] = readl_relaxed( io_base + hpre_cluster_dfx_regs[j].offset); } } return 0; } static void hpre_show_last_regs_uninit(struct hisi_qm *qm) { struct qm_debug *debug = &qm->debug; if (qm->fun_type == QM_HW_VF || !debug->last_words) return; kfree(debug->last_words); debug->last_words = NULL; } static void hpre_show_last_dfx_regs(struct hisi_qm *qm) { int cluster_dfx_regs_num = ARRAY_SIZE(hpre_cluster_dfx_regs); int com_dfx_regs_num = ARRAY_SIZE(hpre_com_dfx_regs); u8 clusters_num = hpre_cluster_num(qm); struct qm_debug *debug = &qm->debug; struct pci_dev *pdev = qm->pdev; void __iomem *io_base; int i, j, idx; u32 val; if (qm->fun_type == QM_HW_VF || !debug->last_words) return; /* dumps last word of the debugging registers during controller reset */ for (i = 0; i < com_dfx_regs_num; i++) { val = readl_relaxed(qm->io_base + hpre_com_dfx_regs[i].offset); if (debug->last_words[i] != val) pci_info(pdev, "Common_core:%s \t= 0x%08x => 0x%08x\n", hpre_com_dfx_regs[i].name, debug->last_words[i], val); } for (i = 0; i < clusters_num; i++) { io_base = qm->io_base + hpre_cluster_offsets[i]; for (j = 0; j < cluster_dfx_regs_num; j++) { val = readl_relaxed(io_base + hpre_cluster_dfx_regs[j].offset); idx = com_dfx_regs_num + i * cluster_dfx_regs_num + j; if (debug->last_words[idx] != val) pci_info(pdev, "cluster-%d:%s \t= 0x%08x => 0x%08x\n", i, hpre_cluster_dfx_regs[j].name, debug->last_words[idx], val); } } } static void hpre_log_hw_error(struct hisi_qm *qm, u32 err_sts) { const struct hpre_hw_error *err = hpre_hw_errors; struct device *dev = &qm->pdev->dev; while (err->msg) { if (err->int_msk & err_sts) dev_warn(dev, "%s [error status=0x%x] found\n", err->msg, err->int_msk); err++; } } static u32 hpre_get_hw_err_status(struct hisi_qm *qm) { return readl(qm->io_base + HPRE_INT_STATUS); } static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) { u32 nfe; writel(err_sts, qm->io_base + HPRE_HAC_SOURCE_INT); nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver); writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB); } static void hpre_open_axi_master_ooo(struct hisi_qm *qm) { u32 value; value = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); writel(value & ~HPRE_AM_OOO_SHUTDOWN_ENABLE, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); writel(value | HPRE_AM_OOO_SHUTDOWN_ENABLE, qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB); } static void hpre_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; err_info->fe = HPRE_HAC_RAS_FE_ENABLE; err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver); err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver); err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR; err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_RESET_MASK_CAP, qm->cap_ver); err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_RESET_MASK_CAP, qm->cap_ver); err_info->msi_wr_port = HPRE_WR_MSI_PORT; err_info->acpi_rst = "HRST"; } static const struct hisi_qm_err_ini hpre_err_ini = { .hw_init = hpre_set_user_domain_and_cache, .hw_err_enable = hpre_hw_error_enable, .hw_err_disable = hpre_hw_error_disable, .get_dev_hw_err_status = hpre_get_hw_err_status, .clear_dev_hw_err_status = hpre_clear_hw_err_status, .log_dev_hw_err = hpre_log_hw_error, .open_axi_master_ooo = hpre_open_axi_master_ooo, .open_sva_prefetch = hpre_open_sva_prefetch, .close_sva_prefetch = hpre_close_sva_prefetch, .show_last_dfx_regs = hpre_show_last_dfx_regs, .err_info_init = hpre_err_info_init, }; static int hpre_pf_probe_init(struct hpre *hpre) { struct hisi_qm *qm = &hpre->qm; int ret; ret = hpre_set_user_domain_and_cache(qm); if (ret) return ret; hpre_open_sva_prefetch(qm); qm->err_ini = &hpre_err_ini; qm->err_ini->err_info_init(qm); hisi_qm_dev_err_init(qm); ret = hpre_show_last_regs_init(qm); if (ret) pci_err(qm->pdev, "Failed to init last word regs!\n"); return ret; } static int hpre_probe_init(struct hpre *hpre) { u32 type_rate = HPRE_SHAPER_TYPE_RATE; struct hisi_qm *qm = &hpre->qm; int ret; if (qm->fun_type == QM_HW_PF) { ret = hpre_pf_probe_init(hpre); if (ret) return ret; /* Enable shaper type 0 */ if (qm->ver >= QM_HW_V3) { type_rate |= QM_SHAPER_ENABLE; qm->type_rate = type_rate; } } return 0; } static int hpre_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_qm *qm; struct hpre *hpre; int ret; hpre = devm_kzalloc(&pdev->dev, sizeof(*hpre), GFP_KERNEL); if (!hpre) return -ENOMEM; qm = &hpre->qm; ret = hpre_qm_init(qm, pdev); if (ret) { pci_err(pdev, "Failed to init HPRE QM (%d)!\n", ret); return ret; } ret = hpre_probe_init(hpre); if (ret) { pci_err(pdev, "Failed to probe (%d)!\n", ret); goto err_with_qm_init; } ret = hisi_qm_start(qm); if (ret) goto err_with_err_init; ret = hpre_debugfs_init(qm); if (ret) dev_warn(&pdev->dev, "init debugfs fail!\n"); ret = hisi_qm_alg_register(qm, &hpre_devices); if (ret < 0) { pci_err(pdev, "fail to register algs to crypto!\n"); goto err_with_qm_start; } if (qm->uacce) { ret = uacce_register(qm->uacce); if (ret) { pci_err(pdev, "failed to register uacce (%d)!\n", ret); goto err_with_alg_register; } } if (qm->fun_type == QM_HW_PF && vfs_num) { ret = hisi_qm_sriov_enable(pdev, vfs_num); if (ret < 0) goto err_with_alg_register; } hisi_qm_pm_init(qm); return 0; err_with_alg_register: hisi_qm_alg_unregister(qm, &hpre_devices); err_with_qm_start: hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_with_err_init: hpre_show_last_regs_uninit(qm); hisi_qm_dev_err_uninit(qm); err_with_qm_init: hisi_qm_uninit(qm); return ret; } static void hpre_remove(struct pci_dev *pdev) { struct hisi_qm *qm = pci_get_drvdata(pdev); hisi_qm_pm_uninit(qm); hisi_qm_wait_task_finish(qm, &hpre_devices); hisi_qm_alg_unregister(qm, &hpre_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev, true); hpre_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); if (qm->fun_type == QM_HW_PF) { hpre_cnt_regs_clear(qm); qm->debug.curr_qm_qp_num = 0; hpre_show_last_regs_uninit(qm); hisi_qm_dev_err_uninit(qm); } hisi_qm_uninit(qm); } static const struct dev_pm_ops hpre_pm_ops = { SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL) }; static const struct pci_error_handlers hpre_err_handler = { .error_detected = hisi_qm_dev_err_detected, .slot_reset = hisi_qm_dev_slot_reset, .reset_prepare = hisi_qm_reset_prepare, .reset_done = hisi_qm_reset_done, }; static struct pci_driver hpre_pci_driver = { .name = hpre_name, .id_table = hpre_dev_ids, .probe = hpre_probe, .remove = hpre_remove, .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? hisi_qm_sriov_configure : NULL, .err_handler = &hpre_err_handler, .shutdown = hisi_qm_dev_shutdown, .driver.pm = &hpre_pm_ops, }; struct pci_driver *hisi_hpre_get_pf_driver(void) { return &hpre_pci_driver; } EXPORT_SYMBOL_GPL(hisi_hpre_get_pf_driver); static void hpre_register_debugfs(void) { if (!debugfs_initialized()) return; hpre_debugfs_root = debugfs_create_dir(hpre_name, NULL); } static void hpre_unregister_debugfs(void) { debugfs_remove_recursive(hpre_debugfs_root); } static int __init hpre_init(void) { int ret; hisi_qm_init_list(&hpre_devices); hpre_register_debugfs(); ret = pci_register_driver(&hpre_pci_driver); if (ret) { hpre_unregister_debugfs(); pr_err("hpre: can't register hisi hpre driver.\n"); } return ret; } static void __exit hpre_exit(void) { pci_unregister_driver(&hpre_pci_driver); hpre_unregister_debugfs(); } module_init(hpre_init); module_exit(hpre_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zaibo Xu <[email protected]>"); MODULE_AUTHOR("Meng Yu <[email protected]>"); MODULE_DESCRIPTION("Driver for HiSilicon HPRE accelerator");
linux-master
drivers/crypto/hisilicon/hpre/hpre_main.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2016-2017 HiSilicon Limited. */ #include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/internal/des.h> #include <crypto/skcipher.h> #include <crypto/xts.h> #include <crypto/internal/skcipher.h> #include "sec_drv.h" #define SEC_MAX_CIPHER_KEY 64 #define SEC_REQ_LIMIT SZ_32M struct sec_c_alg_cfg { unsigned c_alg : 3; unsigned c_mode : 3; unsigned key_len : 2; unsigned c_width : 2; }; static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = { [SEC_C_DES_ECB_64] = { .c_alg = SEC_C_ALG_DES, .c_mode = SEC_C_MODE_ECB, .key_len = SEC_KEY_LEN_DES, }, [SEC_C_DES_CBC_64] = { .c_alg = SEC_C_ALG_DES, .c_mode = SEC_C_MODE_CBC, .key_len = SEC_KEY_LEN_DES, }, [SEC_C_3DES_ECB_192_3KEY] = { .c_alg = SEC_C_ALG_3DES, .c_mode = SEC_C_MODE_ECB, .key_len = SEC_KEY_LEN_3DES_3_KEY, }, [SEC_C_3DES_ECB_192_2KEY] = { .c_alg = SEC_C_ALG_3DES, .c_mode = SEC_C_MODE_ECB, .key_len = SEC_KEY_LEN_3DES_2_KEY, }, [SEC_C_3DES_CBC_192_3KEY] = { .c_alg = SEC_C_ALG_3DES, .c_mode = SEC_C_MODE_CBC, .key_len = SEC_KEY_LEN_3DES_3_KEY, }, [SEC_C_3DES_CBC_192_2KEY] = { .c_alg = SEC_C_ALG_3DES, .c_mode = SEC_C_MODE_CBC, .key_len = SEC_KEY_LEN_3DES_2_KEY, }, [SEC_C_AES_ECB_128] = { .c_alg = SEC_C_ALG_AES, .c_mode = SEC_C_MODE_ECB, .key_len = SEC_KEY_LEN_AES_128, }, [SEC_C_AES_ECB_192] = { .c_alg = SEC_C_ALG_AES, .c_mode = SEC_C_MODE_ECB, .key_len = SEC_KEY_LEN_AES_192, }, [SEC_C_AES_ECB_256] = { .c_alg = SEC_C_ALG_AES, .c_mode = SEC_C_MODE_ECB, .key_len = SEC_KEY_LEN_AES_256, }, [SEC_C_AES_CBC_128] = { .c_alg = SEC_C_ALG_AES, .c_mode = SEC_C_MODE_CBC, .key_len = SEC_KEY_LEN_AES_128, }, [SEC_C_AES_CBC_192] = { .c_alg = SEC_C_ALG_AES, .c_mode = SEC_C_MODE_CBC, .key_len = SEC_KEY_LEN_AES_192, }, [SEC_C_AES_CBC_256] = { .c_alg = SEC_C_ALG_AES, .c_mode = SEC_C_MODE_CBC, .key_len = SEC_KEY_LEN_AES_256, }, [SEC_C_AES_CTR_128] = { .c_alg = SEC_C_ALG_AES, .c_mode = SEC_C_MODE_CTR, .key_len = SEC_KEY_LEN_AES_128, }, [SEC_C_AES_CTR_192] = { .c_alg = SEC_C_ALG_AES, .c_mode = SEC_C_MODE_CTR, .key_len = SEC_KEY_LEN_AES_192, }, [SEC_C_AES_CTR_256] = { .c_alg = SEC_C_ALG_AES, .c_mode = SEC_C_MODE_CTR, .key_len = SEC_KEY_LEN_AES_256, }, [SEC_C_AES_XTS_128] = { .c_alg = SEC_C_ALG_AES, .c_mode = SEC_C_MODE_XTS, .key_len = SEC_KEY_LEN_AES_128, }, [SEC_C_AES_XTS_256] = { .c_alg = SEC_C_ALG_AES, .c_mode = SEC_C_MODE_XTS, .key_len = SEC_KEY_LEN_AES_256, }, [SEC_C_NULL] = { }, }; /* * Mutex used to ensure safe operation of reference count of * alg providers */ static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx, struct sec_bd_info *req, enum sec_cipher_alg alg) { const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg]; memset(req, 0, sizeof(*req)); req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S; req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S; req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S; req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S; req->cipher_key_addr_lo = lower_32_bits(ctx->pkey); req->cipher_key_addr_hi = upper_32_bits(ctx->pkey); } static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm, const u8 *key, unsigned int keylen, enum sec_cipher_alg alg) { struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm); struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm); ctx->cipher_alg = alg; memcpy(ctx->key, key, keylen); sec_alg_skcipher_init_template(ctx, &ctx->req_template, ctx->cipher_alg); } static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl, dma_addr_t psec_sgl, struct sec_dev_info *info) { struct sec_hw_sgl *sgl_current, *sgl_next; dma_addr_t sgl_next_dma; sgl_current = hw_sgl; while (sgl_current) { sgl_next = sgl_current->next; sgl_next_dma = sgl_current->next_sgl; dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl); sgl_current = sgl_next; psec_sgl = sgl_next_dma; } } static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, dma_addr_t *psec_sgl, struct scatterlist *sgl, int count, struct sec_dev_info *info, gfp_t gfp) { struct sec_hw_sgl *sgl_current = NULL; struct sec_hw_sgl *sgl_next; dma_addr_t sgl_next_dma; struct scatterlist *sg; int ret, sge_index, i; if (!count) return -EINVAL; for_each_sg(sgl, sg, count, i) { sge_index = i % SEC_MAX_SGE_NUM; if (sge_index == 0) { sgl_next = dma_pool_zalloc(info->hw_sgl_pool, gfp, &sgl_next_dma); if (!sgl_next) { ret = -ENOMEM; goto err_free_hw_sgls; } if (!sgl_current) { /* First one */ *psec_sgl = sgl_next_dma; *sec_sgl = sgl_next; } else { /* Chained */ sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM; sgl_current->next_sgl = sgl_next_dma; sgl_current->next = sgl_next; } sgl_current = sgl_next; } sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg); sgl_current->sge_entries[sge_index].len = sg_dma_len(sg); sgl_current->data_bytes_in_sgl += sg_dma_len(sg); } sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM; sgl_current->next_sgl = 0; (*sec_sgl)->entry_sum_in_chain = count; return 0; err_free_hw_sgls: sec_free_hw_sgl(*sec_sgl, *psec_sgl, info); *psec_sgl = 0; return ret; } static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen, enum sec_cipher_alg alg) { struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct device *dev = ctx->queue->dev_info->dev; mutex_lock(&ctx->lock); if (ctx->key) { /* rekeying */ memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); } else { /* new key */ ctx->key = dma_alloc_coherent(dev, SEC_MAX_CIPHER_KEY, &ctx->pkey, GFP_KERNEL); if (!ctx->key) { mutex_unlock(&ctx->lock); return -ENOMEM; } } mutex_unlock(&ctx->lock); sec_alg_skcipher_init_context(tfm, key, keylen, alg); return 0; } static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { enum sec_cipher_alg alg; switch (keylen) { case AES_KEYSIZE_128: alg = SEC_C_AES_ECB_128; break; case AES_KEYSIZE_192: alg = SEC_C_AES_ECB_192; break; case AES_KEYSIZE_256: alg = SEC_C_AES_ECB_256; break; default: return -EINVAL; } return sec_alg_skcipher_setkey(tfm, key, keylen, alg); } static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { enum sec_cipher_alg alg; switch (keylen) { case AES_KEYSIZE_128: alg = SEC_C_AES_CBC_128; break; case AES_KEYSIZE_192: alg = SEC_C_AES_CBC_192; break; case AES_KEYSIZE_256: alg = SEC_C_AES_CBC_256; break; default: return -EINVAL; } return sec_alg_skcipher_setkey(tfm, key, keylen, alg); } static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { enum sec_cipher_alg alg; switch (keylen) { case AES_KEYSIZE_128: alg = SEC_C_AES_CTR_128; break; case AES_KEYSIZE_192: alg = SEC_C_AES_CTR_192; break; case AES_KEYSIZE_256: alg = SEC_C_AES_CTR_256; break; default: return -EINVAL; } return sec_alg_skcipher_setkey(tfm, key, keylen, alg); } static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { enum sec_cipher_alg alg; int ret; ret = xts_verify_key(tfm, key, keylen); if (ret) return ret; switch (keylen) { case AES_KEYSIZE_128 * 2: alg = SEC_C_AES_XTS_128; break; case AES_KEYSIZE_256 * 2: alg = SEC_C_AES_XTS_256; break; default: return -EINVAL; } return sec_alg_skcipher_setkey(tfm, key, keylen, alg); } static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return verify_skcipher_des_key(tfm, key) ?: sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64); } static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return verify_skcipher_des_key(tfm, key) ?: sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64); } static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return verify_skcipher_des3_key(tfm, key) ?: sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_3DES_ECB_192_3KEY); } static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm, const u8 *key, unsigned int keylen) { return verify_skcipher_des3_key(tfm, key) ?: sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_3DES_CBC_192_3KEY); } static void sec_alg_free_el(struct sec_request_el *el, struct sec_dev_info *info) { sec_free_hw_sgl(el->out, el->dma_out, info); sec_free_hw_sgl(el->in, el->dma_in, info); kfree(el->sgl_in); kfree(el->sgl_out); kfree(el); } /* queuelock must be held */ static int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue) { struct sec_request_el *el, *temp; int ret = 0; mutex_lock(&sec_req->lock); list_for_each_entry_safe(el, temp, &sec_req->elements, head) { /* * Add to hardware queue only under following circumstances * 1) Software and hardware queue empty so no chain dependencies * 2) No dependencies as new IV - (check software queue empty * to maintain order) * 3) No dependencies because the mode does no chaining. * * In other cases first insert onto the software queue which * is then emptied as requests complete */ if (!queue->havesoftqueue || (kfifo_is_empty(&queue->softqueue) && sec_queue_empty(queue))) { ret = sec_queue_send(queue, &el->req, sec_req); if (ret == -EAGAIN) { /* Wait unti we can send then try again */ /* DEAD if here - should not happen */ ret = -EBUSY; goto err_unlock; } } else { kfifo_put(&queue->softqueue, el); } } err_unlock: mutex_unlock(&sec_req->lock); return ret; } static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, struct crypto_async_request *req_base) { struct skcipher_request *skreq = container_of(req_base, struct skcipher_request, base); struct sec_request *sec_req = skcipher_request_ctx(skreq); struct sec_request *backlog_req; struct sec_request_el *sec_req_el, *nextrequest; struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx; struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq); struct device *dev = ctx->queue->dev_info->dev; int icv_or_skey_en, ret; bool done; sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el, head); icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >> SEC_BD_W0_ICV_OR_SKEY_EN_S; if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) { dev_err(dev, "Got an invalid answer %lu %d\n", sec_resp->w1 & SEC_BD_W1_BD_INVALID, icv_or_skey_en); sec_req->err = -EINVAL; /* * We need to muddle on to avoid getting stuck with elements * on the queue. Error will be reported so requester so * it should be able to handle appropriately. */ } spin_lock_bh(&ctx->queue->queuelock); /* Put the IV in place for chained cases */ switch (ctx->cipher_alg) { case SEC_C_AES_CBC_128: case SEC_C_AES_CBC_192: case SEC_C_AES_CBC_256: if (sec_req_el->req.w0 & SEC_BD_W0_DE) sg_pcopy_to_buffer(sec_req_el->sgl_out, sg_nents(sec_req_el->sgl_out), skreq->iv, crypto_skcipher_ivsize(atfm), sec_req_el->el_length - crypto_skcipher_ivsize(atfm)); else sg_pcopy_to_buffer(sec_req_el->sgl_in, sg_nents(sec_req_el->sgl_in), skreq->iv, crypto_skcipher_ivsize(atfm), sec_req_el->el_length - crypto_skcipher_ivsize(atfm)); /* No need to sync to the device as coherent DMA */ break; case SEC_C_AES_CTR_128: case SEC_C_AES_CTR_192: case SEC_C_AES_CTR_256: crypto_inc(skreq->iv, 16); break; default: /* Do not update */ break; } if (ctx->queue->havesoftqueue && !kfifo_is_empty(&ctx->queue->softqueue) && sec_queue_empty(ctx->queue)) { ret = kfifo_get(&ctx->queue->softqueue, &nextrequest); if (ret <= 0) dev_err(dev, "Error getting next element from kfifo %d\n", ret); else /* We know there is space so this cannot fail */ sec_queue_send(ctx->queue, &nextrequest->req, nextrequest->sec_req); } else if (!list_empty(&ctx->backlog)) { /* Need to verify there is room first */ backlog_req = list_first_entry(&ctx->backlog, typeof(*backlog_req), backlog_head); if (sec_queue_can_enqueue(ctx->queue, backlog_req->num_elements) || (ctx->queue->havesoftqueue && kfifo_avail(&ctx->queue->softqueue) > backlog_req->num_elements)) { sec_send_request(backlog_req, ctx->queue); crypto_request_complete(backlog_req->req_base, -EINPROGRESS); list_del(&backlog_req->backlog_head); } } spin_unlock_bh(&ctx->queue->queuelock); mutex_lock(&sec_req->lock); list_del(&sec_req_el->head); mutex_unlock(&sec_req->lock); sec_alg_free_el(sec_req_el, ctx->queue->dev_info); /* * Request is done. * The dance is needed as the lock is freed in the completion */ mutex_lock(&sec_req->lock); done = list_empty(&sec_req->elements); mutex_unlock(&sec_req->lock); if (done) { if (crypto_skcipher_ivsize(atfm)) { dma_unmap_single(dev, sec_req->dma_iv, crypto_skcipher_ivsize(atfm), DMA_TO_DEVICE); } dma_unmap_sg(dev, skreq->src, sec_req->len_in, DMA_BIDIRECTIONAL); if (skreq->src != skreq->dst) dma_unmap_sg(dev, skreq->dst, sec_req->len_out, DMA_BIDIRECTIONAL); skcipher_request_complete(skreq, sec_req->err); } } void sec_alg_callback(struct sec_bd_info *resp, void *shadow) { struct sec_request *sec_req = shadow; sec_req->cb(resp, sec_req->req_base); } static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes, int *steps, gfp_t gfp) { size_t *sizes; int i; /* Split into suitable sized blocks */ *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT; sizes = kcalloc(*steps, sizeof(*sizes), gfp); if (!sizes) return -ENOMEM; for (i = 0; i < *steps - 1; i++) sizes[i] = SEC_REQ_LIMIT; sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1); *split_sizes = sizes; return 0; } static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, int steps, struct scatterlist ***splits, int **splits_nents, int sgl_len_in, struct device *dev, gfp_t gfp) { int ret, count; count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL); if (!count) return -EINVAL; *splits = kcalloc(steps, sizeof(struct scatterlist *), gfp); if (!*splits) { ret = -ENOMEM; goto err_unmap_sg; } *splits_nents = kcalloc(steps, sizeof(int), gfp); if (!*splits_nents) { ret = -ENOMEM; goto err_free_splits; } /* output the scatter list before and after this */ ret = sg_split(sgl, count, 0, steps, split_sizes, *splits, *splits_nents, gfp); if (ret) { ret = -ENOMEM; goto err_free_splits_nents; } return 0; err_free_splits_nents: kfree(*splits_nents); err_free_splits: kfree(*splits); err_unmap_sg: dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL); return ret; } /* * Reverses the sec_map_and_split_sg call for messages not yet added to * the queues. */ static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps, struct scatterlist **splits, int *splits_nents, int sgl_len_in, struct device *dev) { int i; for (i = 0; i < steps; i++) kfree(splits[i]); kfree(splits_nents); kfree(splits); dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL); } static struct sec_request_el *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt, int el_size, bool different_dest, struct scatterlist *sgl_in, int n_ents_in, struct scatterlist *sgl_out, int n_ents_out, struct sec_dev_info *info, gfp_t gfp) { struct sec_request_el *el; struct sec_bd_info *req; int ret; el = kzalloc(sizeof(*el), gfp); if (!el) return ERR_PTR(-ENOMEM); el->el_length = el_size; req = &el->req; memcpy(req, template, sizeof(*req)); req->w0 &= ~SEC_BD_W0_CIPHER_M; if (encrypt) req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S; else req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S; req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M; req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) & SEC_BD_W0_C_GRAN_SIZE_19_16_M; req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M; req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) & SEC_BD_W0_C_GRAN_SIZE_21_20_M; /* Writing whole u32 so no need to take care of masking */ req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) | ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) & SEC_BD_W2_C_GRAN_SIZE_15_0_M); req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M; req->w1 |= SEC_BD_W1_ADDR_TYPE; el->sgl_in = sgl_in; ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in, n_ents_in, info, gfp); if (ret) goto err_free_el; req->data_addr_lo = lower_32_bits(el->dma_in); req->data_addr_hi = upper_32_bits(el->dma_in); if (different_dest) { el->sgl_out = sgl_out; ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out, el->sgl_out, n_ents_out, info, gfp); if (ret) goto err_free_hw_sgl_in; req->w0 |= SEC_BD_W0_DE; req->cipher_destin_addr_lo = lower_32_bits(el->dma_out); req->cipher_destin_addr_hi = upper_32_bits(el->dma_out); } else { req->w0 &= ~SEC_BD_W0_DE; req->cipher_destin_addr_lo = lower_32_bits(el->dma_in); req->cipher_destin_addr_hi = upper_32_bits(el->dma_in); } return el; err_free_hw_sgl_in: sec_free_hw_sgl(el->in, el->dma_in, info); err_free_el: kfree(el); return ERR_PTR(ret); } static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, bool encrypt) { struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq); struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm); struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm); struct sec_queue *queue = ctx->queue; struct sec_request *sec_req = skcipher_request_ctx(skreq); struct sec_dev_info *info = queue->dev_info; int i, ret, steps; size_t *split_sizes; struct scatterlist **splits_in; struct scatterlist **splits_out = NULL; int *splits_in_nents; int *splits_out_nents = NULL; struct sec_request_el *el, *temp; bool split = skreq->src != skreq->dst; gfp_t gfp = skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; mutex_init(&sec_req->lock); sec_req->req_base = &skreq->base; sec_req->err = 0; /* SGL mapping out here to allow us to break it up as necessary */ sec_req->len_in = sg_nents(skreq->src); ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes, &steps, gfp); if (ret) return ret; sec_req->num_elements = steps; ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in, &splits_in_nents, sec_req->len_in, info->dev, gfp); if (ret) goto err_free_split_sizes; if (split) { sec_req->len_out = sg_nents(skreq->dst); ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, &splits_out, &splits_out_nents, sec_req->len_out, info->dev, gfp); if (ret) goto err_unmap_in_sg; } /* Shared info stored in seq_req - applies to all BDs */ sec_req->tfm_ctx = ctx; sec_req->cb = sec_skcipher_alg_callback; INIT_LIST_HEAD(&sec_req->elements); /* * Future optimization. * In the chaining case we can't use a dma pool bounce buffer * but in the case where we know there is no chaining we can */ if (crypto_skcipher_ivsize(atfm)) { sec_req->dma_iv = dma_map_single(info->dev, skreq->iv, crypto_skcipher_ivsize(atfm), DMA_TO_DEVICE); if (dma_mapping_error(info->dev, sec_req->dma_iv)) { ret = -ENOMEM; goto err_unmap_out_sg; } } /* Set them all up then queue - cleaner error handling. */ for (i = 0; i < steps; i++) { el = sec_alg_alloc_and_fill_el(&ctx->req_template, encrypt ? 1 : 0, split_sizes[i], skreq->src != skreq->dst, splits_in[i], splits_in_nents[i], split ? splits_out[i] : NULL, split ? splits_out_nents[i] : 0, info, gfp); if (IS_ERR(el)) { ret = PTR_ERR(el); goto err_free_elements; } el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv); el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv); el->sec_req = sec_req; list_add_tail(&el->head, &sec_req->elements); } /* * Only attempt to queue if the whole lot can fit in the queue - * we can't successfully cleanup after a partial queing so this * must succeed or fail atomically. * * Big hammer test of both software and hardware queues - could be * more refined but this is unlikely to happen so no need. */ /* Grab a big lock for a long time to avoid concurrency issues */ spin_lock_bh(&queue->queuelock); /* * Can go on to queue if we have space in either: * 1) The hardware queue and no software queue * 2) The software queue * AND there is nothing in the backlog. If there is backlog we * have to only queue to the backlog queue and return busy. */ if ((!sec_queue_can_enqueue(queue, steps) && (!queue->havesoftqueue || kfifo_avail(&queue->softqueue) > steps)) || !list_empty(&ctx->backlog)) { ret = -EBUSY; if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { list_add_tail(&sec_req->backlog_head, &ctx->backlog); spin_unlock_bh(&queue->queuelock); goto out; } spin_unlock_bh(&queue->queuelock); goto err_free_elements; } ret = sec_send_request(sec_req, queue); spin_unlock_bh(&queue->queuelock); if (ret) goto err_free_elements; ret = -EINPROGRESS; out: /* Cleanup - all elements in pointer arrays have been copied */ kfree(splits_in_nents); kfree(splits_in); kfree(splits_out_nents); kfree(splits_out); kfree(split_sizes); return ret; err_free_elements: list_for_each_entry_safe(el, temp, &sec_req->elements, head) { list_del(&el->head); sec_alg_free_el(el, info); } if (crypto_skcipher_ivsize(atfm)) dma_unmap_single(info->dev, sec_req->dma_iv, crypto_skcipher_ivsize(atfm), DMA_BIDIRECTIONAL); err_unmap_out_sg: if (split) sec_unmap_sg_on_err(skreq->dst, steps, splits_out, splits_out_nents, sec_req->len_out, info->dev); err_unmap_in_sg: sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents, sec_req->len_in, info->dev); err_free_split_sizes: kfree(split_sizes); return ret; } static int sec_alg_skcipher_encrypt(struct skcipher_request *req) { return sec_alg_skcipher_crypto(req, true); } static int sec_alg_skcipher_decrypt(struct skcipher_request *req) { return sec_alg_skcipher_crypto(req, false); } static int sec_alg_skcipher_init(struct crypto_skcipher *tfm) { struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); mutex_init(&ctx->lock); INIT_LIST_HEAD(&ctx->backlog); crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request)); ctx->queue = sec_queue_alloc_start_safe(); if (IS_ERR(ctx->queue)) return PTR_ERR(ctx->queue); spin_lock_init(&ctx->queue->queuelock); ctx->queue->havesoftqueue = false; return 0; } static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm) { struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); struct device *dev = ctx->queue->dev_info->dev; if (ctx->key) { memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY); dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key, ctx->pkey); } sec_queue_stop_release(ctx->queue); } static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm) { struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; ret = sec_alg_skcipher_init(tfm); if (ret) return ret; INIT_KFIFO(ctx->queue->softqueue); ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL); if (ret) { sec_alg_skcipher_exit(tfm); return ret; } ctx->queue->havesoftqueue = true; return 0; } static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm) { struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); kfifo_free(&ctx->queue->softqueue); sec_alg_skcipher_exit(tfm); } static struct skcipher_alg sec_algs[] = { { .base = { .cra_name = "ecb(aes)", .cra_driver_name = "hisi_sec_aes_ecb", .cra_priority = 4001, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .init = sec_alg_skcipher_init, .exit = sec_alg_skcipher_exit, .setkey = sec_alg_skcipher_setkey_aes_ecb, .decrypt = sec_alg_skcipher_decrypt, .encrypt = sec_alg_skcipher_encrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = 0, }, { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "hisi_sec_aes_cbc", .cra_priority = 4001, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .init = sec_alg_skcipher_init_with_queue, .exit = sec_alg_skcipher_exit_with_queue, .setkey = sec_alg_skcipher_setkey_aes_cbc, .decrypt = sec_alg_skcipher_decrypt, .encrypt = sec_alg_skcipher_encrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, { .base = { .cra_name = "ctr(aes)", .cra_driver_name = "hisi_sec_aes_ctr", .cra_priority = 4001, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .init = sec_alg_skcipher_init_with_queue, .exit = sec_alg_skcipher_exit_with_queue, .setkey = sec_alg_skcipher_setkey_aes_ctr, .decrypt = sec_alg_skcipher_decrypt, .encrypt = sec_alg_skcipher_encrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, { .base = { .cra_name = "xts(aes)", .cra_driver_name = "hisi_sec_aes_xts", .cra_priority = 4001, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .init = sec_alg_skcipher_init, .exit = sec_alg_skcipher_exit, .setkey = sec_alg_skcipher_setkey_aes_xts, .decrypt = sec_alg_skcipher_decrypt, .encrypt = sec_alg_skcipher_encrypt, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, { /* Unable to find any test vectors so untested */ .base = { .cra_name = "ecb(des)", .cra_driver_name = "hisi_sec_des_ecb", .cra_priority = 4001, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .init = sec_alg_skcipher_init, .exit = sec_alg_skcipher_exit, .setkey = sec_alg_skcipher_setkey_des_ecb, .decrypt = sec_alg_skcipher_decrypt, .encrypt = sec_alg_skcipher_encrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = 0, }, { .base = { .cra_name = "cbc(des)", .cra_driver_name = "hisi_sec_des_cbc", .cra_priority = 4001, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .init = sec_alg_skcipher_init_with_queue, .exit = sec_alg_skcipher_exit_with_queue, .setkey = sec_alg_skcipher_setkey_des_cbc, .decrypt = sec_alg_skcipher_decrypt, .encrypt = sec_alg_skcipher_encrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, }, { .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "hisi_sec_3des_cbc", .cra_priority = 4001, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .init = sec_alg_skcipher_init_with_queue, .exit = sec_alg_skcipher_exit_with_queue, .setkey = sec_alg_skcipher_setkey_3des_cbc, .decrypt = sec_alg_skcipher_decrypt, .encrypt = sec_alg_skcipher_encrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, }, { .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "hisi_sec_3des_ecb", .cra_priority = 4001, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .init = sec_alg_skcipher_init, .exit = sec_alg_skcipher_exit, .setkey = sec_alg_skcipher_setkey_3des_ecb, .decrypt = sec_alg_skcipher_decrypt, .encrypt = sec_alg_skcipher_encrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = 0, } }; int sec_algs_register(void) { int ret = 0; mutex_lock(&algs_lock); if (++active_devs != 1) goto unlock; ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs)); if (ret) --active_devs; unlock: mutex_unlock(&algs_lock); return ret; } void sec_algs_unregister(void) { mutex_lock(&algs_lock); if (--active_devs != 0) goto unlock; crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs)); unlock: mutex_unlock(&algs_lock); }
linux-master
drivers/crypto/hisilicon/sec/sec_algs.c
// SPDX-License-Identifier: GPL-2.0 /* * Driver for the HiSilicon SEC units found on Hip06 Hip07 * * Copyright (c) 2016-2017 HiSilicon Limited. */ #include <linux/acpi.h> #include <linux/atomic.h> #include <linux/delay.h> #include <linux/dma-direction.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/io.h> #include <linux/iommu.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqreturn.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> #include "sec_drv.h" #define SEC_QUEUE_AR_FROCE_ALLOC 0 #define SEC_QUEUE_AR_FROCE_NOALLOC 1 #define SEC_QUEUE_AR_FROCE_DIS 2 #define SEC_QUEUE_AW_FROCE_ALLOC 0 #define SEC_QUEUE_AW_FROCE_NOALLOC 1 #define SEC_QUEUE_AW_FROCE_DIS 2 /* SEC_ALGSUB registers */ #define SEC_ALGSUB_CLK_EN_REG 0x03b8 #define SEC_ALGSUB_CLK_DIS_REG 0x03bc #define SEC_ALGSUB_CLK_ST_REG 0x535c #define SEC_ALGSUB_RST_REQ_REG 0x0aa8 #define SEC_ALGSUB_RST_DREQ_REG 0x0aac #define SEC_ALGSUB_RST_ST_REG 0x5a54 #define SEC_ALGSUB_RST_ST_IS_RST BIT(0) #define SEC_ALGSUB_BUILD_RST_REQ_REG 0x0ab8 #define SEC_ALGSUB_BUILD_RST_DREQ_REG 0x0abc #define SEC_ALGSUB_BUILD_RST_ST_REG 0x5a5c #define SEC_ALGSUB_BUILD_RST_ST_IS_RST BIT(0) #define SEC_SAA_BASE 0x00001000UL /* SEC_SAA registers */ #define SEC_SAA_CTRL_REG(x) ((x) * SEC_SAA_ADDR_SIZE) #define SEC_SAA_CTRL_GET_QM_EN BIT(0) #define SEC_ST_INTMSK1_REG 0x0200 #define SEC_ST_RINT1_REG 0x0400 #define SEC_ST_INTSTS1_REG 0x0600 #define SEC_BD_MNG_STAT_REG 0x0800 #define SEC_PARSING_STAT_REG 0x0804 #define SEC_LOAD_TIME_OUT_CNT_REG 0x0808 #define SEC_CORE_WORK_TIME_OUT_CNT_REG 0x080c #define SEC_BACK_TIME_OUT_CNT_REG 0x0810 #define SEC_BD1_PARSING_RD_TIME_OUT_CNT_REG 0x0814 #define SEC_BD1_PARSING_WR_TIME_OUT_CNT_REG 0x0818 #define SEC_BD2_PARSING_RD_TIME_OUT_CNT_REG 0x081c #define SEC_BD2_PARSING_WR_TIME_OUT_CNT_REG 0x0820 #define SEC_SAA_ACC_REG 0x083c #define SEC_BD_NUM_CNT_IN_SEC_REG 0x0858 #define SEC_LOAD_WORK_TIME_CNT_REG 0x0860 #define SEC_CORE_WORK_WORK_TIME_CNT_REG 0x0864 #define SEC_BACK_WORK_TIME_CNT_REG 0x0868 #define SEC_SAA_IDLE_TIME_CNT_REG 0x086c #define SEC_SAA_CLK_CNT_REG 0x0870 /* SEC_COMMON registers */ #define SEC_CLK_EN_REG 0x0000 #define SEC_CTRL_REG 0x0004 #define SEC_COMMON_CNT_CLR_CE_REG 0x0008 #define SEC_COMMON_CNT_CLR_CE_CLEAR BIT(0) #define SEC_COMMON_CNT_CLR_CE_SNAP_EN BIT(1) #define SEC_SECURE_CTRL_REG 0x000c #define SEC_AXI_CACHE_CFG_REG 0x0010 #define SEC_AXI_QOS_CFG_REG 0x0014 #define SEC_IPV4_MASK_TABLE_REG 0x0020 #define SEC_IPV6_MASK_TABLE_X_REG(x) (0x0024 + (x) * 4) #define SEC_FSM_MAX_CNT_REG 0x0064 #define SEC_CTRL2_REG 0x0068 #define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M GENMASK(3, 0) #define SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S 0 #define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M GENMASK(6, 4) #define SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S 4 #define SEC_CTRL2_CLK_GATE_EN BIT(7) #define SEC_CTRL2_ENDIAN_BD BIT(8) #define SEC_CTRL2_ENDIAN_BD_TYPE BIT(9) #define SEC_CNT_PRECISION_CFG_REG 0x006c #define SEC_DEBUG_BD_CFG_REG 0x0070 #define SEC_DEBUG_BD_CFG_WB_NORMAL BIT(0) #define SEC_DEBUG_BD_CFG_WB_EN BIT(1) #define SEC_Q_SIGHT_SEL 0x0074 #define SEC_Q_SIGHT_HIS_CLR 0x0078 #define SEC_Q_VMID_CFG_REG(q) (0x0100 + (q) * 4) #define SEC_Q_WEIGHT_CFG_REG(q) (0x200 + (q) * 4) #define SEC_STAT_CLR_REG 0x0a00 #define SEC_SAA_IDLE_CNT_CLR_REG 0x0a04 #define SEC_QM_CPL_Q_IDBUF_DFX_CFG_REG 0x0b00 #define SEC_QM_CPL_Q_IDBUF_DFX_RESULT_REG 0x0b04 #define SEC_QM_BD_DFX_CFG_REG 0x0b08 #define SEC_QM_BD_DFX_RESULT_REG 0x0b0c #define SEC_QM_BDID_DFX_RESULT_REG 0x0b10 #define SEC_QM_BD_DFIFO_STATUS_REG 0x0b14 #define SEC_QM_BD_DFX_CFG2_REG 0x0b1c #define SEC_QM_BD_DFX_RESULT2_REG 0x0b20 #define SEC_QM_BD_IDFIFO_STATUS_REG 0x0b18 #define SEC_QM_BD_DFIFO_STATUS2_REG 0x0b28 #define SEC_QM_BD_IDFIFO_STATUS2_REG 0x0b2c #define SEC_HASH_IPV4_MASK 0xfff00000 #define SEC_MAX_SAA_NUM 0xa #define SEC_SAA_ADDR_SIZE 0x1000 #define SEC_Q_INIT_REG 0x0 #define SEC_Q_INIT_WO_STAT_CLEAR 0x2 #define SEC_Q_INIT_AND_STAT_CLEAR 0x3 #define SEC_Q_CFG_REG 0x8 #define SEC_Q_CFG_REORDER BIT(0) #define SEC_Q_PROC_NUM_CFG_REG 0x10 #define SEC_QUEUE_ENB_REG 0x18 #define SEC_Q_DEPTH_CFG_REG 0x50 #define SEC_Q_DEPTH_CFG_DEPTH_M GENMASK(11, 0) #define SEC_Q_DEPTH_CFG_DEPTH_S 0 #define SEC_Q_BASE_HADDR_REG 0x54 #define SEC_Q_BASE_LADDR_REG 0x58 #define SEC_Q_WR_PTR_REG 0x5c #define SEC_Q_OUTORDER_BASE_HADDR_REG 0x60 #define SEC_Q_OUTORDER_BASE_LADDR_REG 0x64 #define SEC_Q_OUTORDER_RD_PTR_REG 0x68 #define SEC_Q_OT_TH_REG 0x6c #define SEC_Q_ARUSER_CFG_REG 0x70 #define SEC_Q_ARUSER_CFG_FA BIT(0) #define SEC_Q_ARUSER_CFG_FNA BIT(1) #define SEC_Q_ARUSER_CFG_RINVLD BIT(2) #define SEC_Q_ARUSER_CFG_PKG BIT(3) #define SEC_Q_AWUSER_CFG_REG 0x74 #define SEC_Q_AWUSER_CFG_FA BIT(0) #define SEC_Q_AWUSER_CFG_FNA BIT(1) #define SEC_Q_AWUSER_CFG_PKG BIT(2) #define SEC_Q_ERR_BASE_HADDR_REG 0x7c #define SEC_Q_ERR_BASE_LADDR_REG 0x80 #define SEC_Q_CFG_VF_NUM_REG 0x84 #define SEC_Q_SOFT_PROC_PTR_REG 0x88 #define SEC_Q_FAIL_INT_MSK_REG 0x300 #define SEC_Q_FLOW_INT_MKS_REG 0x304 #define SEC_Q_FAIL_RINT_REG 0x400 #define SEC_Q_FLOW_RINT_REG 0x404 #define SEC_Q_FAIL_INT_STATUS_REG 0x500 #define SEC_Q_FLOW_INT_STATUS_REG 0x504 #define SEC_Q_STATUS_REG 0x600 #define SEC_Q_RD_PTR_REG 0x604 #define SEC_Q_PRO_PTR_REG 0x608 #define SEC_Q_OUTORDER_WR_PTR_REG 0x60c #define SEC_Q_OT_CNT_STATUS_REG 0x610 #define SEC_Q_INORDER_BD_NUM_ST_REG 0x650 #define SEC_Q_INORDER_GET_FLAG_ST_REG 0x654 #define SEC_Q_INORDER_ADD_FLAG_ST_REG 0x658 #define SEC_Q_INORDER_TASK_INT_NUM_LEFT_ST_REG 0x65c #define SEC_Q_RD_DONE_PTR_REG 0x660 #define SEC_Q_CPL_Q_BD_NUM_ST_REG 0x700 #define SEC_Q_CPL_Q_PTR_ST_REG 0x704 #define SEC_Q_CPL_Q_H_ADDR_ST_REG 0x708 #define SEC_Q_CPL_Q_L_ADDR_ST_REG 0x70c #define SEC_Q_CPL_TASK_INT_NUM_LEFT_ST_REG 0x710 #define SEC_Q_WRR_ID_CHECK_REG 0x714 #define SEC_Q_CPLQ_FULL_CHECK_REG 0x718 #define SEC_Q_SUCCESS_BD_CNT_REG 0x800 #define SEC_Q_FAIL_BD_CNT_REG 0x804 #define SEC_Q_GET_BD_CNT_REG 0x808 #define SEC_Q_IVLD_CNT_REG 0x80c #define SEC_Q_BD_PROC_GET_CNT_REG 0x810 #define SEC_Q_BD_PROC_DONE_CNT_REG 0x814 #define SEC_Q_LAT_CLR_REG 0x850 #define SEC_Q_PKT_LAT_MAX_REG 0x854 #define SEC_Q_PKT_LAT_AVG_REG 0x858 #define SEC_Q_PKT_LAT_MIN_REG 0x85c #define SEC_Q_ID_CLR_CFG_REG 0x900 #define SEC_Q_1ST_BD_ERR_ID_REG 0x904 #define SEC_Q_1ST_AUTH_FAIL_ID_REG 0x908 #define SEC_Q_1ST_RD_ERR_ID_REG 0x90c #define SEC_Q_1ST_ECC2_ERR_ID_REG 0x910 #define SEC_Q_1ST_IVLD_ID_REG 0x914 #define SEC_Q_1ST_BD_WR_ERR_ID_REG 0x918 #define SEC_Q_1ST_ERR_BD_WR_ERR_ID_REG 0x91c #define SEC_Q_1ST_BD_MAC_WR_ERR_ID_REG 0x920 struct sec_debug_bd_info { #define SEC_DEBUG_BD_INFO_SOFT_ERR_CHECK_M GENMASK(22, 0) u32 soft_err_check; #define SEC_DEBUG_BD_INFO_HARD_ERR_CHECK_M GENMASK(9, 0) u32 hard_err_check; u32 icv_mac1st_word; #define SEC_DEBUG_BD_INFO_GET_ID_M GENMASK(19, 0) u32 sec_get_id; /* W4---W15 */ u32 reserv_left[12]; }; struct sec_out_bd_info { #define SEC_OUT_BD_INFO_Q_ID_M GENMASK(11, 0) #define SEC_OUT_BD_INFO_ECC_2BIT_ERR BIT(14) u16 data; }; #define SEC_MAX_DEVICES 8 static struct sec_dev_info *sec_devices[SEC_MAX_DEVICES]; static DEFINE_MUTEX(sec_id_lock); static int sec_queue_map_io(struct sec_queue *queue) { struct device *dev = queue->dev_info->dev; struct resource *res; res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 2 + queue->queue_id); if (!res) { dev_err(dev, "Failed to get queue %u memory resource\n", queue->queue_id); return -ENOMEM; } queue->regs = ioremap(res->start, resource_size(res)); if (!queue->regs) return -ENOMEM; return 0; } static void sec_queue_unmap_io(struct sec_queue *queue) { iounmap(queue->regs); } static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg) { void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG; u32 regval; regval = readl_relaxed(addr); if (ar_pkg) regval |= SEC_Q_ARUSER_CFG_PKG; else regval &= ~SEC_Q_ARUSER_CFG_PKG; writel_relaxed(regval, addr); return 0; } static int sec_queue_aw_pkgattr(struct sec_queue *queue, u32 aw_pkg) { void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG; u32 regval; regval = readl_relaxed(addr); regval |= SEC_Q_AWUSER_CFG_PKG; writel_relaxed(regval, addr); return 0; } static int sec_clk_en(struct sec_dev_info *info) { void __iomem *base = info->regs[SEC_COMMON]; u32 i = 0; writel_relaxed(0x7, base + SEC_ALGSUB_CLK_EN_REG); do { usleep_range(1000, 10000); if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0x7) return 0; i++; } while (i < 10); dev_err(info->dev, "sec clock enable fail!\n"); return -EIO; } static int sec_clk_dis(struct sec_dev_info *info) { void __iomem *base = info->regs[SEC_COMMON]; u32 i = 0; writel_relaxed(0x7, base + SEC_ALGSUB_CLK_DIS_REG); do { usleep_range(1000, 10000); if ((readl_relaxed(base + SEC_ALGSUB_CLK_ST_REG) & 0x7) == 0) return 0; i++; } while (i < 10); dev_err(info->dev, "sec clock disable fail!\n"); return -EIO; } static int sec_reset_whole_module(struct sec_dev_info *info) { void __iomem *base = info->regs[SEC_COMMON]; bool is_reset, b_is_reset; u32 i = 0; writel_relaxed(1, base + SEC_ALGSUB_RST_REQ_REG); writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_REQ_REG); while (1) { usleep_range(1000, 10000); is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) & SEC_ALGSUB_RST_ST_IS_RST; b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) & SEC_ALGSUB_BUILD_RST_ST_IS_RST; if (is_reset && b_is_reset) break; i++; if (i > 10) { dev_err(info->dev, "Reset req failed\n"); return -EIO; } } i = 0; writel_relaxed(1, base + SEC_ALGSUB_RST_DREQ_REG); writel_relaxed(1, base + SEC_ALGSUB_BUILD_RST_DREQ_REG); while (1) { usleep_range(1000, 10000); is_reset = readl_relaxed(base + SEC_ALGSUB_RST_ST_REG) & SEC_ALGSUB_RST_ST_IS_RST; b_is_reset = readl_relaxed(base + SEC_ALGSUB_BUILD_RST_ST_REG) & SEC_ALGSUB_BUILD_RST_ST_IS_RST; if (!is_reset && !b_is_reset) break; i++; if (i > 10) { dev_err(info->dev, "Reset dreq failed\n"); return -EIO; } } return 0; } static void sec_bd_endian_little(struct sec_dev_info *info) { void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG; u32 regval; regval = readl_relaxed(addr); regval &= ~(SEC_CTRL2_ENDIAN_BD | SEC_CTRL2_ENDIAN_BD_TYPE); writel_relaxed(regval, addr); } /* * sec_cache_config - configure optimum cache placement */ static void sec_cache_config(struct sec_dev_info *info) { struct iommu_domain *domain; void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL_REG; domain = iommu_get_domain_for_dev(info->dev); /* Check that translation is occurring */ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING)) writel_relaxed(0x44cf9e, addr); else writel_relaxed(0x4cfd9, addr); } static void sec_data_axiwr_otsd_cfg(struct sec_dev_info *info, u32 cfg) { void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG; u32 regval; regval = readl_relaxed(addr); regval &= ~SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M; regval |= (cfg << SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_S) & SEC_CTRL2_DATA_AXI_WR_OTSD_CFG_M; writel_relaxed(regval, addr); } static void sec_data_axird_otsd_cfg(struct sec_dev_info *info, u32 cfg) { void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG; u32 regval; regval = readl_relaxed(addr); regval &= ~SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M; regval |= (cfg << SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_S) & SEC_CTRL2_DATA_AXI_RD_OTSD_CFG_M; writel_relaxed(regval, addr); } static void sec_clk_gate_en(struct sec_dev_info *info, bool clkgate) { void __iomem *addr = info->regs[SEC_SAA] + SEC_CTRL2_REG; u32 regval; regval = readl_relaxed(addr); if (clkgate) regval |= SEC_CTRL2_CLK_GATE_EN; else regval &= ~SEC_CTRL2_CLK_GATE_EN; writel_relaxed(regval, addr); } static void sec_comm_cnt_cfg(struct sec_dev_info *info, bool clr_ce) { void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG; u32 regval; regval = readl_relaxed(addr); if (clr_ce) regval |= SEC_COMMON_CNT_CLR_CE_CLEAR; else regval &= ~SEC_COMMON_CNT_CLR_CE_CLEAR; writel_relaxed(regval, addr); } static void sec_commsnap_en(struct sec_dev_info *info, bool snap_en) { void __iomem *addr = info->regs[SEC_SAA] + SEC_COMMON_CNT_CLR_CE_REG; u32 regval; regval = readl_relaxed(addr); if (snap_en) regval |= SEC_COMMON_CNT_CLR_CE_SNAP_EN; else regval &= ~SEC_COMMON_CNT_CLR_CE_SNAP_EN; writel_relaxed(regval, addr); } static void sec_ipv6_hashmask(struct sec_dev_info *info, u32 hash_mask[]) { void __iomem *base = info->regs[SEC_SAA]; int i; for (i = 0; i < 10; i++) writel_relaxed(hash_mask[0], base + SEC_IPV6_MASK_TABLE_X_REG(i)); } static int sec_ipv4_hashmask(struct sec_dev_info *info, u32 hash_mask) { if (hash_mask & SEC_HASH_IPV4_MASK) { dev_err(info->dev, "Sec Ipv4 Hash Mask Input Error!\n "); return -EINVAL; } writel_relaxed(hash_mask, info->regs[SEC_SAA] + SEC_IPV4_MASK_TABLE_REG); return 0; } static void sec_set_dbg_bd_cfg(struct sec_dev_info *info, u32 cfg) { void __iomem *addr = info->regs[SEC_SAA] + SEC_DEBUG_BD_CFG_REG; u32 regval; regval = readl_relaxed(addr); /* Always disable write back of normal bd */ regval &= ~SEC_DEBUG_BD_CFG_WB_NORMAL; if (cfg) regval &= ~SEC_DEBUG_BD_CFG_WB_EN; else regval |= SEC_DEBUG_BD_CFG_WB_EN; writel_relaxed(regval, addr); } static void sec_saa_getqm_en(struct sec_dev_info *info, u32 saa_indx, u32 en) { void __iomem *addr = info->regs[SEC_SAA] + SEC_SAA_BASE + SEC_SAA_CTRL_REG(saa_indx); u32 regval; regval = readl_relaxed(addr); if (en) regval |= SEC_SAA_CTRL_GET_QM_EN; else regval &= ~SEC_SAA_CTRL_GET_QM_EN; writel_relaxed(regval, addr); } static void sec_saa_int_mask(struct sec_dev_info *info, u32 saa_indx, u32 saa_int_mask) { writel_relaxed(saa_int_mask, info->regs[SEC_SAA] + SEC_SAA_BASE + SEC_ST_INTMSK1_REG + saa_indx * SEC_SAA_ADDR_SIZE); } static void sec_streamid(struct sec_dev_info *info, int i) { #define SEC_SID 0x600 #define SEC_VMID 0 writel_relaxed((SEC_VMID | ((SEC_SID & 0xffff) << 8)), info->regs[SEC_SAA] + SEC_Q_VMID_CFG_REG(i)); } static void sec_queue_ar_alloc(struct sec_queue *queue, u32 alloc) { void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG; u32 regval; regval = readl_relaxed(addr); if (alloc == SEC_QUEUE_AR_FROCE_ALLOC) { regval |= SEC_Q_ARUSER_CFG_FA; regval &= ~SEC_Q_ARUSER_CFG_FNA; } else { regval &= ~SEC_Q_ARUSER_CFG_FA; regval |= SEC_Q_ARUSER_CFG_FNA; } writel_relaxed(regval, addr); } static void sec_queue_aw_alloc(struct sec_queue *queue, u32 alloc) { void __iomem *addr = queue->regs + SEC_Q_AWUSER_CFG_REG; u32 regval; regval = readl_relaxed(addr); if (alloc == SEC_QUEUE_AW_FROCE_ALLOC) { regval |= SEC_Q_AWUSER_CFG_FA; regval &= ~SEC_Q_AWUSER_CFG_FNA; } else { regval &= ~SEC_Q_AWUSER_CFG_FA; regval |= SEC_Q_AWUSER_CFG_FNA; } writel_relaxed(regval, addr); } static void sec_queue_reorder(struct sec_queue *queue, bool reorder) { void __iomem *base = queue->regs; u32 regval; regval = readl_relaxed(base + SEC_Q_CFG_REG); if (reorder) regval |= SEC_Q_CFG_REORDER; else regval &= ~SEC_Q_CFG_REORDER; writel_relaxed(regval, base + SEC_Q_CFG_REG); } static void sec_queue_depth(struct sec_queue *queue, u32 depth) { void __iomem *addr = queue->regs + SEC_Q_DEPTH_CFG_REG; u32 regval; regval = readl_relaxed(addr); regval &= ~SEC_Q_DEPTH_CFG_DEPTH_M; regval |= (depth << SEC_Q_DEPTH_CFG_DEPTH_S) & SEC_Q_DEPTH_CFG_DEPTH_M; writel_relaxed(regval, addr); } static void sec_queue_cmdbase_addr(struct sec_queue *queue, u64 addr) { writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_BASE_HADDR_REG); writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_BASE_LADDR_REG); } static void sec_queue_outorder_addr(struct sec_queue *queue, u64 addr) { writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_OUTORDER_BASE_HADDR_REG); writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_OUTORDER_BASE_LADDR_REG); } static void sec_queue_errbase_addr(struct sec_queue *queue, u64 addr) { writel_relaxed(upper_32_bits(addr), queue->regs + SEC_Q_ERR_BASE_HADDR_REG); writel_relaxed(lower_32_bits(addr), queue->regs + SEC_Q_ERR_BASE_LADDR_REG); } static void sec_queue_irq_disable(struct sec_queue *queue) { writel_relaxed((u32)~0, queue->regs + SEC_Q_FLOW_INT_MKS_REG); } static void sec_queue_irq_enable(struct sec_queue *queue) { writel_relaxed(0, queue->regs + SEC_Q_FLOW_INT_MKS_REG); } static void sec_queue_abn_irq_disable(struct sec_queue *queue) { writel_relaxed((u32)~0, queue->regs + SEC_Q_FAIL_INT_MSK_REG); } static void sec_queue_stop(struct sec_queue *queue) { disable_irq(queue->task_irq); sec_queue_irq_disable(queue); writel_relaxed(0x0, queue->regs + SEC_QUEUE_ENB_REG); } static void sec_queue_start(struct sec_queue *queue) { sec_queue_irq_enable(queue); enable_irq(queue->task_irq); queue->expected = 0; writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG); writel_relaxed(0x1, queue->regs + SEC_QUEUE_ENB_REG); } static struct sec_queue *sec_alloc_queue(struct sec_dev_info *info) { int i; mutex_lock(&info->dev_lock); /* Get the first idle queue in SEC device */ for (i = 0; i < SEC_Q_NUM; i++) if (!info->queues[i].in_use) { info->queues[i].in_use = true; info->queues_in_use++; mutex_unlock(&info->dev_lock); return &info->queues[i]; } mutex_unlock(&info->dev_lock); return ERR_PTR(-ENODEV); } static int sec_queue_free(struct sec_queue *queue) { struct sec_dev_info *info = queue->dev_info; if (queue->queue_id >= SEC_Q_NUM) { dev_err(info->dev, "No queue %u\n", queue->queue_id); return -ENODEV; } if (!queue->in_use) { dev_err(info->dev, "Queue %u is idle\n", queue->queue_id); return -ENODEV; } mutex_lock(&info->dev_lock); queue->in_use = false; info->queues_in_use--; mutex_unlock(&info->dev_lock); return 0; } static irqreturn_t sec_isr_handle_th(int irq, void *q) { sec_queue_irq_disable(q); return IRQ_WAKE_THREAD; } static irqreturn_t sec_isr_handle(int irq, void *q) { struct sec_queue *queue = q; struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd; struct sec_queue_ring_cq *cq_ring = &queue->ring_cq; struct sec_out_bd_info *outorder_msg; struct sec_bd_info *msg; u32 ooo_read, ooo_write; void __iomem *base = queue->regs; int q_id; ooo_read = readl(base + SEC_Q_OUTORDER_RD_PTR_REG); ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG); outorder_msg = cq_ring->vaddr + ooo_read; q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M; msg = msg_ring->vaddr + q_id; while ((ooo_write != ooo_read) && msg->w0 & SEC_BD_W0_DONE) { /* * Must be before callback otherwise blocks adding other chained * elements */ set_bit(q_id, queue->unprocessed); if (q_id == queue->expected) while (test_bit(queue->expected, queue->unprocessed)) { clear_bit(queue->expected, queue->unprocessed); msg = msg_ring->vaddr + queue->expected; msg->w0 &= ~SEC_BD_W0_DONE; msg_ring->callback(msg, queue->shadow[queue->expected]); queue->shadow[queue->expected] = NULL; queue->expected = (queue->expected + 1) % SEC_QUEUE_LEN; atomic_dec(&msg_ring->used); } ooo_read = (ooo_read + 1) % SEC_QUEUE_LEN; writel(ooo_read, base + SEC_Q_OUTORDER_RD_PTR_REG); ooo_write = readl(base + SEC_Q_OUTORDER_WR_PTR_REG); outorder_msg = cq_ring->vaddr + ooo_read; q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M; msg = msg_ring->vaddr + q_id; } sec_queue_irq_enable(queue); return IRQ_HANDLED; } static int sec_queue_irq_init(struct sec_queue *queue) { struct sec_dev_info *info = queue->dev_info; int irq = queue->task_irq; int ret; ret = request_threaded_irq(irq, sec_isr_handle_th, sec_isr_handle, IRQF_TRIGGER_RISING, queue->name, queue); if (ret) { dev_err(info->dev, "request irq(%d) failed %d\n", irq, ret); return ret; } disable_irq(irq); return 0; } static int sec_queue_irq_uninit(struct sec_queue *queue) { free_irq(queue->task_irq, queue); return 0; } static struct sec_dev_info *sec_device_get(void) { struct sec_dev_info *sec_dev = NULL; struct sec_dev_info *this_sec_dev; int least_busy_n = SEC_Q_NUM + 1; int i; /* Find which one is least busy and use that first */ for (i = 0; i < SEC_MAX_DEVICES; i++) { this_sec_dev = sec_devices[i]; if (this_sec_dev && this_sec_dev->queues_in_use < least_busy_n) { least_busy_n = this_sec_dev->queues_in_use; sec_dev = this_sec_dev; } } return sec_dev; } static struct sec_queue *sec_queue_alloc_start(struct sec_dev_info *info) { struct sec_queue *queue; queue = sec_alloc_queue(info); if (IS_ERR(queue)) { dev_err(info->dev, "alloc sec queue failed! %ld\n", PTR_ERR(queue)); return queue; } sec_queue_start(queue); return queue; } /** * sec_queue_alloc_start_safe - get a hw queue from appropriate instance * * This function does extremely simplistic load balancing. It does not take into * account NUMA locality of the accelerator, or which cpu has requested the * queue. Future work may focus on optimizing this in order to improve full * machine throughput. */ struct sec_queue *sec_queue_alloc_start_safe(void) { struct sec_dev_info *info; struct sec_queue *queue = ERR_PTR(-ENODEV); mutex_lock(&sec_id_lock); info = sec_device_get(); if (!info) goto unlock; queue = sec_queue_alloc_start(info); unlock: mutex_unlock(&sec_id_lock); return queue; } /** * sec_queue_stop_release() - free up a hw queue for reuse * @queue: The queue we are done with. * * This will stop the current queue, terminanting any transactions * that are inflight an return it to the pool of available hw queuess */ int sec_queue_stop_release(struct sec_queue *queue) { struct device *dev = queue->dev_info->dev; int ret; sec_queue_stop(queue); ret = sec_queue_free(queue); if (ret) dev_err(dev, "Releasing queue failed %d\n", ret); return ret; } /** * sec_queue_empty() - Is this hardware queue currently empty. * @queue: The queue to test * * We need to know if we have an empty queue for some of the chaining modes * as if it is not empty we may need to hold the message in a software queue * until the hw queue is drained. */ bool sec_queue_empty(struct sec_queue *queue) { struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd; return !atomic_read(&msg_ring->used); } /** * sec_queue_send() - queue up a single operation in the hw queue * @queue: The queue in which to put the message * @msg: The message * @ctx: Context to be put in the shadow array and passed back to cb on result. * * This function will return -EAGAIN if the queue is currently full. */ int sec_queue_send(struct sec_queue *queue, struct sec_bd_info *msg, void *ctx) { struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd; void __iomem *base = queue->regs; u32 write, read; mutex_lock(&msg_ring->lock); read = readl(base + SEC_Q_RD_PTR_REG); write = readl(base + SEC_Q_WR_PTR_REG); if (write == read && atomic_read(&msg_ring->used) == SEC_QUEUE_LEN) { mutex_unlock(&msg_ring->lock); return -EAGAIN; } memcpy(msg_ring->vaddr + write, msg, sizeof(*msg)); queue->shadow[write] = ctx; write = (write + 1) % SEC_QUEUE_LEN; /* Ensure content updated before queue advance */ wmb(); writel(write, base + SEC_Q_WR_PTR_REG); atomic_inc(&msg_ring->used); mutex_unlock(&msg_ring->lock); return 0; } bool sec_queue_can_enqueue(struct sec_queue *queue, int num) { struct sec_queue_ring_cmd *msg_ring = &queue->ring_cmd; return SEC_QUEUE_LEN - atomic_read(&msg_ring->used) >= num; } static void sec_queue_hw_init(struct sec_queue *queue) { sec_queue_ar_alloc(queue, SEC_QUEUE_AR_FROCE_NOALLOC); sec_queue_aw_alloc(queue, SEC_QUEUE_AW_FROCE_NOALLOC); sec_queue_ar_pkgattr(queue, 1); sec_queue_aw_pkgattr(queue, 1); /* Enable out of order queue */ sec_queue_reorder(queue, true); /* Interrupt after a single complete element */ writel_relaxed(1, queue->regs + SEC_Q_PROC_NUM_CFG_REG); sec_queue_depth(queue, SEC_QUEUE_LEN - 1); sec_queue_cmdbase_addr(queue, queue->ring_cmd.paddr); sec_queue_outorder_addr(queue, queue->ring_cq.paddr); sec_queue_errbase_addr(queue, queue->ring_db.paddr); writel_relaxed(0x100, queue->regs + SEC_Q_OT_TH_REG); sec_queue_abn_irq_disable(queue); sec_queue_irq_disable(queue); writel_relaxed(SEC_Q_INIT_AND_STAT_CLEAR, queue->regs + SEC_Q_INIT_REG); } static int sec_hw_init(struct sec_dev_info *info) { struct iommu_domain *domain; u32 sec_ipv4_mask = 0; u32 sec_ipv6_mask[10] = {}; u32 i, ret; domain = iommu_get_domain_for_dev(info->dev); /* * Enable all available processing unit clocks. * Only the first cluster is usable with translations. */ if (domain && (domain->type & __IOMMU_DOMAIN_PAGING)) info->num_saas = 5; else info->num_saas = 10; writel_relaxed(GENMASK(info->num_saas - 1, 0), info->regs[SEC_SAA] + SEC_CLK_EN_REG); /* 32 bit little endian */ sec_bd_endian_little(info); sec_cache_config(info); /* Data axi port write and read outstanding config as per datasheet */ sec_data_axiwr_otsd_cfg(info, 0x7); sec_data_axird_otsd_cfg(info, 0x7); /* Enable clock gating */ sec_clk_gate_en(info, true); /* Set CNT_CYC register not read clear */ sec_comm_cnt_cfg(info, false); /* Enable CNT_CYC */ sec_commsnap_en(info, false); writel_relaxed((u32)~0, info->regs[SEC_SAA] + SEC_FSM_MAX_CNT_REG); ret = sec_ipv4_hashmask(info, sec_ipv4_mask); if (ret) { dev_err(info->dev, "Failed to set ipv4 hashmask %d\n", ret); return -EIO; } sec_ipv6_hashmask(info, sec_ipv6_mask); /* do not use debug bd */ sec_set_dbg_bd_cfg(info, 0); if (domain && (domain->type & __IOMMU_DOMAIN_PAGING)) { for (i = 0; i < SEC_Q_NUM; i++) { sec_streamid(info, i); /* Same QoS for all queues */ writel_relaxed(0x3f, info->regs[SEC_SAA] + SEC_Q_WEIGHT_CFG_REG(i)); } } for (i = 0; i < info->num_saas; i++) { sec_saa_getqm_en(info, i, 1); sec_saa_int_mask(info, i, 0); } return 0; } static void sec_hw_exit(struct sec_dev_info *info) { int i; for (i = 0; i < SEC_MAX_SAA_NUM; i++) { sec_saa_int_mask(info, i, (u32)~0); sec_saa_getqm_en(info, i, 0); } } static void sec_queue_base_init(struct sec_dev_info *info, struct sec_queue *queue, int queue_id) { queue->dev_info = info; queue->queue_id = queue_id; snprintf(queue->name, sizeof(queue->name), "%s_%d", dev_name(info->dev), queue->queue_id); } static int sec_map_io(struct sec_dev_info *info, struct platform_device *pdev) { struct resource *res; int i; for (i = 0; i < SEC_NUM_ADDR_REGIONS; i++) { res = platform_get_resource(pdev, IORESOURCE_MEM, i); if (!res) { dev_err(info->dev, "Memory resource %d not found\n", i); return -EINVAL; } info->regs[i] = devm_ioremap(info->dev, res->start, resource_size(res)); if (!info->regs[i]) { dev_err(info->dev, "Memory resource %d could not be remapped\n", i); return -EINVAL; } } return 0; } static int sec_base_init(struct sec_dev_info *info, struct platform_device *pdev) { int ret; ret = sec_map_io(info, pdev); if (ret) return ret; ret = sec_clk_en(info); if (ret) return ret; ret = sec_reset_whole_module(info); if (ret) goto sec_clk_disable; ret = sec_hw_init(info); if (ret) goto sec_clk_disable; return 0; sec_clk_disable: sec_clk_dis(info); return ret; } static void sec_base_exit(struct sec_dev_info *info) { sec_hw_exit(info); sec_clk_dis(info); } #define SEC_Q_CMD_SIZE \ round_up(SEC_QUEUE_LEN * sizeof(struct sec_bd_info), PAGE_SIZE) #define SEC_Q_CQ_SIZE \ round_up(SEC_QUEUE_LEN * sizeof(struct sec_out_bd_info), PAGE_SIZE) #define SEC_Q_DB_SIZE \ round_up(SEC_QUEUE_LEN * sizeof(struct sec_debug_bd_info), PAGE_SIZE) static int sec_queue_res_cfg(struct sec_queue *queue) { struct device *dev = queue->dev_info->dev; struct sec_queue_ring_cmd *ring_cmd = &queue->ring_cmd; struct sec_queue_ring_cq *ring_cq = &queue->ring_cq; struct sec_queue_ring_db *ring_db = &queue->ring_db; int ret; ring_cmd->vaddr = dma_alloc_coherent(dev, SEC_Q_CMD_SIZE, &ring_cmd->paddr, GFP_KERNEL); if (!ring_cmd->vaddr) return -ENOMEM; atomic_set(&ring_cmd->used, 0); mutex_init(&ring_cmd->lock); ring_cmd->callback = sec_alg_callback; ring_cq->vaddr = dma_alloc_coherent(dev, SEC_Q_CQ_SIZE, &ring_cq->paddr, GFP_KERNEL); if (!ring_cq->vaddr) { ret = -ENOMEM; goto err_free_ring_cmd; } ring_db->vaddr = dma_alloc_coherent(dev, SEC_Q_DB_SIZE, &ring_db->paddr, GFP_KERNEL); if (!ring_db->vaddr) { ret = -ENOMEM; goto err_free_ring_cq; } queue->task_irq = platform_get_irq(to_platform_device(dev), queue->queue_id * 2 + 1); if (queue->task_irq < 0) { ret = queue->task_irq; goto err_free_ring_db; } return 0; err_free_ring_db: dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr, queue->ring_db.paddr); err_free_ring_cq: dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr, queue->ring_cq.paddr); err_free_ring_cmd: dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr, queue->ring_cmd.paddr); return ret; } static void sec_queue_free_ring_pages(struct sec_queue *queue) { struct device *dev = queue->dev_info->dev; dma_free_coherent(dev, SEC_Q_DB_SIZE, queue->ring_db.vaddr, queue->ring_db.paddr); dma_free_coherent(dev, SEC_Q_CQ_SIZE, queue->ring_cq.vaddr, queue->ring_cq.paddr); dma_free_coherent(dev, SEC_Q_CMD_SIZE, queue->ring_cmd.vaddr, queue->ring_cmd.paddr); } static int sec_queue_config(struct sec_dev_info *info, struct sec_queue *queue, int queue_id) { int ret; sec_queue_base_init(info, queue, queue_id); ret = sec_queue_res_cfg(queue); if (ret) return ret; ret = sec_queue_map_io(queue); if (ret) { dev_err(info->dev, "Queue map failed %d\n", ret); sec_queue_free_ring_pages(queue); return ret; } sec_queue_hw_init(queue); return 0; } static void sec_queue_unconfig(struct sec_dev_info *info, struct sec_queue *queue) { sec_queue_unmap_io(queue); sec_queue_free_ring_pages(queue); } static int sec_id_alloc(struct sec_dev_info *info) { int ret = 0; int i; mutex_lock(&sec_id_lock); for (i = 0; i < SEC_MAX_DEVICES; i++) if (!sec_devices[i]) break; if (i == SEC_MAX_DEVICES) { ret = -ENOMEM; goto unlock; } info->sec_id = i; sec_devices[info->sec_id] = info; unlock: mutex_unlock(&sec_id_lock); return ret; } static void sec_id_free(struct sec_dev_info *info) { mutex_lock(&sec_id_lock); sec_devices[info->sec_id] = NULL; mutex_unlock(&sec_id_lock); } static int sec_probe(struct platform_device *pdev) { struct sec_dev_info *info; struct device *dev = &pdev->dev; int i, j; int ret; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)); if (ret) { dev_err(dev, "Failed to set 64 bit dma mask %d", ret); return -ENODEV; } info = devm_kzalloc(dev, (sizeof(*info)), GFP_KERNEL); if (!info) return -ENOMEM; info->dev = dev; mutex_init(&info->dev_lock); info->hw_sgl_pool = dmam_pool_create("sgl", dev, sizeof(struct sec_hw_sgl), 64, 0); if (!info->hw_sgl_pool) { dev_err(dev, "Failed to create sec sgl dma pool\n"); return -ENOMEM; } ret = sec_base_init(info, pdev); if (ret) { dev_err(dev, "Base initialization fail! %d\n", ret); return ret; } for (i = 0; i < SEC_Q_NUM; i++) { ret = sec_queue_config(info, &info->queues[i], i); if (ret) goto queues_unconfig; ret = sec_queue_irq_init(&info->queues[i]); if (ret) { sec_queue_unconfig(info, &info->queues[i]); goto queues_unconfig; } } ret = sec_algs_register(); if (ret) { dev_err(dev, "Failed to register algorithms with crypto %d\n", ret); goto queues_unconfig; } platform_set_drvdata(pdev, info); ret = sec_id_alloc(info); if (ret) goto algs_unregister; return 0; algs_unregister: sec_algs_unregister(); queues_unconfig: for (j = i - 1; j >= 0; j--) { sec_queue_irq_uninit(&info->queues[j]); sec_queue_unconfig(info, &info->queues[j]); } sec_base_exit(info); return ret; } static int sec_remove(struct platform_device *pdev) { struct sec_dev_info *info = platform_get_drvdata(pdev); int i; /* Unexpose as soon as possible, reuse during remove is fine */ sec_id_free(info); sec_algs_unregister(); for (i = 0; i < SEC_Q_NUM; i++) { sec_queue_irq_uninit(&info->queues[i]); sec_queue_unconfig(info, &info->queues[i]); } sec_base_exit(info); return 0; } static const __maybe_unused struct of_device_id sec_match[] = { { .compatible = "hisilicon,hip06-sec" }, { .compatible = "hisilicon,hip07-sec" }, {} }; MODULE_DEVICE_TABLE(of, sec_match); static const __maybe_unused struct acpi_device_id sec_acpi_match[] = { { "HISI02C1", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, sec_acpi_match); static struct platform_driver sec_driver = { .probe = sec_probe, .remove = sec_remove, .driver = { .name = "hisi_sec_platform_driver", .of_match_table = sec_match, .acpi_match_table = ACPI_PTR(sec_acpi_match), }, }; module_platform_driver(sec_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("HiSilicon Security Accelerators"); MODULE_AUTHOR("Zaibo Xu <[email protected]"); MODULE_AUTHOR("Jonathan Cameron <[email protected]>");
linux-master
drivers/crypto/hisilicon/sec/sec_drv.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include <linux/acpi.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/hw_random.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/random.h> #include <crypto/internal/rng.h> #define HISI_TRNG_REG 0x00F0 #define HISI_TRNG_BYTES 4 #define HISI_TRNG_QUALITY 512 #define HISI_TRNG_VERSION 0x01B8 #define HISI_TRNG_VER_V1 GENMASK(31, 0) #define SLEEP_US 10 #define TIMEOUT_US 10000 #define SW_DRBG_NUM_SHIFT 2 #define SW_DRBG_KEY_BASE 0x082C #define SW_DRBG_SEED(n) (SW_DRBG_KEY_BASE - ((n) << SW_DRBG_NUM_SHIFT)) #define SW_DRBG_SEED_REGS_NUM 12 #define SW_DRBG_SEED_SIZE 48 #define SW_DRBG_BLOCKS 0x0830 #define SW_DRBG_INIT 0x0834 #define SW_DRBG_GEN 0x083c #define SW_DRBG_STATUS 0x0840 #define SW_DRBG_BLOCKS_NUM 4095 #define SW_DRBG_DATA_BASE 0x0850 #define SW_DRBG_DATA_NUM 4 #define SW_DRBG_DATA(n) (SW_DRBG_DATA_BASE - ((n) << SW_DRBG_NUM_SHIFT)) #define SW_DRBG_BYTES 16 #define SW_DRBG_ENABLE_SHIFT 12 #define SEED_SHIFT_24 24 #define SEED_SHIFT_16 16 #define SEED_SHIFT_8 8 struct hisi_trng_list { struct mutex lock; struct list_head list; bool is_init; }; struct hisi_trng { void __iomem *base; struct hisi_trng_list *trng_list; struct list_head list; struct hwrng rng; u32 ver; bool is_used; struct mutex mutex; }; struct hisi_trng_ctx { struct hisi_trng *trng; }; static atomic_t trng_active_devs; static struct hisi_trng_list trng_devices; static void hisi_trng_set_seed(struct hisi_trng *trng, const u8 *seed) { u32 val, seed_reg, i; for (i = 0; i < SW_DRBG_SEED_SIZE; i += SW_DRBG_SEED_SIZE / SW_DRBG_SEED_REGS_NUM) { val = seed[i] << SEED_SHIFT_24; val |= seed[i + 1UL] << SEED_SHIFT_16; val |= seed[i + 2UL] << SEED_SHIFT_8; val |= seed[i + 3UL]; seed_reg = (i >> SW_DRBG_NUM_SHIFT) % SW_DRBG_SEED_REGS_NUM; writel(val, trng->base + SW_DRBG_SEED(seed_reg)); } } static int hisi_trng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { struct hisi_trng_ctx *ctx = crypto_rng_ctx(tfm); struct hisi_trng *trng = ctx->trng; u32 val = 0; int ret = 0; if (slen < SW_DRBG_SEED_SIZE) { pr_err("slen(%u) is not matched with trng(%d)\n", slen, SW_DRBG_SEED_SIZE); return -EINVAL; } writel(0x0, trng->base + SW_DRBG_BLOCKS); hisi_trng_set_seed(trng, seed); writel(SW_DRBG_BLOCKS_NUM | (0x1 << SW_DRBG_ENABLE_SHIFT), trng->base + SW_DRBG_BLOCKS); writel(0x1, trng->base + SW_DRBG_INIT); ret = readl_relaxed_poll_timeout(trng->base + SW_DRBG_STATUS, val, val & BIT(0), SLEEP_US, TIMEOUT_US); if (ret) pr_err("fail to init trng(%d)\n", ret); return ret; } static int hisi_trng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dstn, unsigned int dlen) { struct hisi_trng_ctx *ctx = crypto_rng_ctx(tfm); struct hisi_trng *trng = ctx->trng; u32 data[SW_DRBG_DATA_NUM]; u32 currsize = 0; u32 val = 0; int ret; u32 i; if (dlen > SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES || dlen == 0) { pr_err("dlen(%d) exceeds limit(%d)!\n", dlen, SW_DRBG_BLOCKS_NUM * SW_DRBG_BYTES); return -EINVAL; } do { ret = readl_relaxed_poll_timeout(trng->base + SW_DRBG_STATUS, val, val & BIT(1), SLEEP_US, TIMEOUT_US); if (ret) { pr_err("fail to generate random number(%d)!\n", ret); break; } for (i = 0; i < SW_DRBG_DATA_NUM; i++) data[i] = readl(trng->base + SW_DRBG_DATA(i)); if (dlen - currsize >= SW_DRBG_BYTES) { memcpy(dstn + currsize, data, SW_DRBG_BYTES); currsize += SW_DRBG_BYTES; } else { memcpy(dstn + currsize, data, dlen - currsize); currsize = dlen; } writel(0x1, trng->base + SW_DRBG_GEN); } while (currsize < dlen); return ret; } static int hisi_trng_init(struct crypto_tfm *tfm) { struct hisi_trng_ctx *ctx = crypto_tfm_ctx(tfm); struct hisi_trng *trng; int ret = -EBUSY; mutex_lock(&trng_devices.lock); list_for_each_entry(trng, &trng_devices.list, list) { if (!trng->is_used) { trng->is_used = true; ctx->trng = trng; ret = 0; break; } } mutex_unlock(&trng_devices.lock); return ret; } static void hisi_trng_exit(struct crypto_tfm *tfm) { struct hisi_trng_ctx *ctx = crypto_tfm_ctx(tfm); mutex_lock(&trng_devices.lock); ctx->trng->is_used = false; mutex_unlock(&trng_devices.lock); } static int hisi_trng_read(struct hwrng *rng, void *buf, size_t max, bool wait) { struct hisi_trng *trng; int currsize = 0; u32 val = 0; int ret; trng = container_of(rng, struct hisi_trng, rng); do { ret = readl_poll_timeout(trng->base + HISI_TRNG_REG, val, val, SLEEP_US, TIMEOUT_US); if (ret) return currsize; if (max - currsize >= HISI_TRNG_BYTES) { memcpy(buf + currsize, &val, HISI_TRNG_BYTES); currsize += HISI_TRNG_BYTES; if (currsize == max) return currsize; continue; } /* copy remaining bytes */ memcpy(buf + currsize, &val, max - currsize); currsize = max; } while (currsize < max); return currsize; } static struct rng_alg hisi_trng_alg = { .generate = hisi_trng_generate, .seed = hisi_trng_seed, .seedsize = SW_DRBG_SEED_SIZE, .base = { .cra_name = "stdrng", .cra_driver_name = "hisi_stdrng", .cra_priority = 300, .cra_ctxsize = sizeof(struct hisi_trng_ctx), .cra_module = THIS_MODULE, .cra_init = hisi_trng_init, .cra_exit = hisi_trng_exit, }, }; static void hisi_trng_add_to_list(struct hisi_trng *trng) { mutex_lock(&trng_devices.lock); list_add_tail(&trng->list, &trng_devices.list); mutex_unlock(&trng_devices.lock); } static int hisi_trng_del_from_list(struct hisi_trng *trng) { int ret = -EBUSY; mutex_lock(&trng_devices.lock); if (!trng->is_used) { list_del(&trng->list); ret = 0; } mutex_unlock(&trng_devices.lock); return ret; } static int hisi_trng_probe(struct platform_device *pdev) { struct hisi_trng *trng; int ret; trng = devm_kzalloc(&pdev->dev, sizeof(*trng), GFP_KERNEL); if (!trng) return -ENOMEM; platform_set_drvdata(pdev, trng); trng->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(trng->base)) return PTR_ERR(trng->base); trng->is_used = false; trng->ver = readl(trng->base + HISI_TRNG_VERSION); if (!trng_devices.is_init) { INIT_LIST_HEAD(&trng_devices.list); mutex_init(&trng_devices.lock); trng_devices.is_init = true; } hisi_trng_add_to_list(trng); if (trng->ver != HISI_TRNG_VER_V1 && atomic_inc_return(&trng_active_devs) == 1) { ret = crypto_register_rng(&hisi_trng_alg); if (ret) { dev_err(&pdev->dev, "failed to register crypto(%d)\n", ret); atomic_dec_return(&trng_active_devs); goto err_remove_from_list; } } trng->rng.name = pdev->name; trng->rng.read = hisi_trng_read; trng->rng.quality = HISI_TRNG_QUALITY; ret = devm_hwrng_register(&pdev->dev, &trng->rng); if (ret) { dev_err(&pdev->dev, "failed to register hwrng: %d!\n", ret); goto err_crypto_unregister; } return ret; err_crypto_unregister: if (trng->ver != HISI_TRNG_VER_V1 && atomic_dec_return(&trng_active_devs) == 0) crypto_unregister_rng(&hisi_trng_alg); err_remove_from_list: hisi_trng_del_from_list(trng); return ret; } static int hisi_trng_remove(struct platform_device *pdev) { struct hisi_trng *trng = platform_get_drvdata(pdev); /* Wait until the task is finished */ while (hisi_trng_del_from_list(trng)) ; if (trng->ver != HISI_TRNG_VER_V1 && atomic_dec_return(&trng_active_devs) == 0) crypto_unregister_rng(&hisi_trng_alg); return 0; } static const struct acpi_device_id hisi_trng_acpi_match[] = { { "HISI02B3", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, hisi_trng_acpi_match); static struct platform_driver hisi_trng_driver = { .probe = hisi_trng_probe, .remove = hisi_trng_remove, .driver = { .name = "hisi-trng-v2", .acpi_match_table = ACPI_PTR(hisi_trng_acpi_match), }, }; module_platform_driver(hisi_trng_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Weili Qian <[email protected]>"); MODULE_AUTHOR("Zaibo Xu <[email protected]>"); MODULE_DESCRIPTION("HiSilicon true random number generator V2 driver");
linux-master
drivers/crypto/hisilicon/trng/trng.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include <crypto/internal/acompress.h> #include <linux/bitfield.h> #include <linux/bitmap.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include "zip.h" /* hisi_zip_sqe dw3 */ #define HZIP_BD_STATUS_M GENMASK(7, 0) /* hisi_zip_sqe dw7 */ #define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0) #define HZIP_SQE_TYPE_M GENMASK(31, 28) /* hisi_zip_sqe dw8 */ #define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0) /* hisi_zip_sqe dw9 */ #define HZIP_REQ_TYPE_M GENMASK(7, 0) #define HZIP_ALG_TYPE_ZLIB 0x02 #define HZIP_ALG_TYPE_GZIP 0x03 #define HZIP_BUF_TYPE_M GENMASK(11, 8) #define HZIP_PBUFFER 0x0 #define HZIP_SGL 0x1 #define HZIP_ZLIB_HEAD_SIZE 2 #define HZIP_GZIP_HEAD_SIZE 10 #define GZIP_HEAD_FHCRC_BIT BIT(1) #define GZIP_HEAD_FEXTRA_BIT BIT(2) #define GZIP_HEAD_FNAME_BIT BIT(3) #define GZIP_HEAD_FCOMMENT_BIT BIT(4) #define GZIP_HEAD_FLG_SHIFT 3 #define GZIP_HEAD_FEXTRA_SHIFT 10 #define GZIP_HEAD_FEXTRA_XLEN 2UL #define GZIP_HEAD_FHCRC_SIZE 2 #define HZIP_GZIP_HEAD_BUF 256 #define HZIP_ALG_PRIORITY 300 #define HZIP_SGL_SGE_NR 10 #define HZIP_ALG_ZLIB GENMASK(1, 0) #define HZIP_ALG_GZIP GENMASK(3, 2) static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c}; static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = { 0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03 }; enum hisi_zip_alg_type { HZIP_ALG_TYPE_COMP = 0, HZIP_ALG_TYPE_DECOMP = 1, }; enum { HZIP_QPC_COMP, HZIP_QPC_DECOMP, HZIP_CTX_Q_NUM }; #define COMP_NAME_TO_TYPE(alg_name) \ (!strcmp((alg_name), "zlib-deflate") ? HZIP_ALG_TYPE_ZLIB : \ !strcmp((alg_name), "gzip") ? HZIP_ALG_TYPE_GZIP : 0) \ #define TO_HEAD_SIZE(req_type) \ (((req_type) == HZIP_ALG_TYPE_ZLIB) ? sizeof(zlib_head) : \ ((req_type) == HZIP_ALG_TYPE_GZIP) ? sizeof(gzip_head) : 0) \ #define TO_HEAD(req_type) \ (((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \ ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : NULL) \ struct hisi_zip_req { struct acomp_req *req; u32 sskip; u32 dskip; struct hisi_acc_hw_sgl *hw_src; struct hisi_acc_hw_sgl *hw_dst; dma_addr_t dma_src; dma_addr_t dma_dst; u16 req_id; }; struct hisi_zip_req_q { struct hisi_zip_req *q; unsigned long *req_bitmap; rwlock_t req_lock; u16 size; }; struct hisi_zip_qp_ctx { struct hisi_qp *qp; struct hisi_zip_req_q req_q; struct hisi_acc_sgl_pool *sgl_pool; struct hisi_zip *zip_dev; struct hisi_zip_ctx *ctx; }; struct hisi_zip_sqe_ops { u8 sqe_type; void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type); void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type); void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req); void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type); u32 (*get_tag)(struct hisi_zip_sqe *sqe); u32 (*get_status)(struct hisi_zip_sqe *sqe); u32 (*get_dstlen)(struct hisi_zip_sqe *sqe); }; struct hisi_zip_ctx { struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM]; const struct hisi_zip_sqe_ops *ops; }; static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp) { int ret; u16 n; if (!val) return -EINVAL; ret = kstrtou16(val, 10, &n); if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX) return -EINVAL; return param_set_ushort(val, kp); } static const struct kernel_param_ops sgl_sge_nr_ops = { .set = sgl_sge_nr_set, .get = param_get_ushort, }; static u16 sgl_sge_nr = HZIP_SGL_SGE_NR; module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444); MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)"); static u32 get_extra_field_size(const u8 *start) { return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN; } static u32 get_name_field_size(const u8 *start) { return strlen(start) + 1; } static u32 get_comment_field_size(const u8 *start) { return strlen(start) + 1; } static u32 __get_gzip_head_size(const u8 *src) { u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT); u32 size = GZIP_HEAD_FEXTRA_SHIFT; if (head_flg & GZIP_HEAD_FEXTRA_BIT) size += get_extra_field_size(src + size); if (head_flg & GZIP_HEAD_FNAME_BIT) size += get_name_field_size(src + size); if (head_flg & GZIP_HEAD_FCOMMENT_BIT) size += get_comment_field_size(src + size); if (head_flg & GZIP_HEAD_FHCRC_BIT) size += GZIP_HEAD_FHCRC_SIZE; return size; } static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl) { char buf[HZIP_GZIP_HEAD_BUF]; sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf)); return __get_gzip_head_size(buf); } static int add_comp_head(struct scatterlist *dst, u8 req_type) { int head_size = TO_HEAD_SIZE(req_type); const u8 *head = TO_HEAD(req_type); int ret; ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size); if (unlikely(ret != head_size)) { pr_err("the head size of buffer is wrong (%d)!\n", ret); return -ENOMEM; } return head_size; } static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type) { if (unlikely(!acomp_req->src || !acomp_req->slen)) return -EINVAL; if (unlikely(req_type == HZIP_ALG_TYPE_GZIP && acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT)) return -EINVAL; switch (req_type) { case HZIP_ALG_TYPE_ZLIB: return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB); case HZIP_ALG_TYPE_GZIP: return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP); default: pr_err("request type does not support!\n"); return -EINVAL; } } static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req, struct hisi_zip_qp_ctx *qp_ctx, size_t head_size, bool is_comp) { struct hisi_zip_req_q *req_q = &qp_ctx->req_q; struct hisi_zip_req *q = req_q->q; struct hisi_zip_req *req_cache; int req_id; write_lock(&req_q->req_lock); req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size); if (req_id >= req_q->size) { write_unlock(&req_q->req_lock); dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n"); return ERR_PTR(-EAGAIN); } set_bit(req_id, req_q->req_bitmap); write_unlock(&req_q->req_lock); req_cache = q + req_id; req_cache->req_id = req_id; req_cache->req = req; if (is_comp) { req_cache->sskip = 0; req_cache->dskip = head_size; } else { req_cache->sskip = head_size; req_cache->dskip = 0; } return req_cache; } static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx, struct hisi_zip_req *req) { struct hisi_zip_req_q *req_q = &qp_ctx->req_q; write_lock(&req_q->req_lock); clear_bit(req->req_id, req_q->req_bitmap); write_unlock(&req_q->req_lock); } static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) { sqe->source_addr_l = lower_32_bits(req->dma_src); sqe->source_addr_h = upper_32_bits(req->dma_src); sqe->dest_addr_l = lower_32_bits(req->dma_dst); sqe->dest_addr_h = upper_32_bits(req->dma_dst); } static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) { struct acomp_req *a_req = req->req; sqe->input_data_length = a_req->slen - req->sskip; sqe->dest_avail_out = a_req->dlen - req->dskip; sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, req->sskip); sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, req->dskip); } static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type) { u32 val; val = sqe->dw9 & ~HZIP_BUF_TYPE_M; val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type); sqe->dw9 = val; } static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type) { u32 val; val = sqe->dw9 & ~HZIP_REQ_TYPE_M; val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type); sqe->dw9 = val; } static void hisi_zip_fill_tag_v1(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) { sqe->dw13 = req->req_id; } static void hisi_zip_fill_tag_v2(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req) { sqe->dw26 = req->req_id; } static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type) { u32 val; val = sqe->dw7 & ~HZIP_SQE_TYPE_M; val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type); sqe->dw7 = val; } static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe, u8 req_type, struct hisi_zip_req *req) { const struct hisi_zip_sqe_ops *ops = ctx->ops; memset(sqe, 0, sizeof(struct hisi_zip_sqe)); ops->fill_addr(sqe, req); ops->fill_buf_size(sqe, req); ops->fill_buf_type(sqe, HZIP_SGL); ops->fill_req_type(sqe, req_type); ops->fill_tag(sqe, req); ops->fill_sqe_type(sqe, ops->sqe_type); } static int hisi_zip_do_work(struct hisi_zip_req *req, struct hisi_zip_qp_ctx *qp_ctx) { struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool; struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; struct acomp_req *a_req = req->req; struct hisi_qp *qp = qp_ctx->qp; struct device *dev = &qp->qm->pdev->dev; struct hisi_zip_sqe zip_sqe; int ret; if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen)) return -EINVAL; req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool, req->req_id << 1, &req->dma_src); if (IS_ERR(req->hw_src)) { dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n", PTR_ERR(req->hw_src)); return PTR_ERR(req->hw_src); } req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool, (req->req_id << 1) + 1, &req->dma_dst); if (IS_ERR(req->hw_dst)) { ret = PTR_ERR(req->hw_dst); dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n", ret); goto err_unmap_input; } hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req); /* send command to start a task */ atomic64_inc(&dfx->send_cnt); ret = hisi_qp_send(qp, &zip_sqe); if (unlikely(ret < 0)) { atomic64_inc(&dfx->send_busy_cnt); ret = -EAGAIN; dev_dbg_ratelimited(dev, "failed to send request!\n"); goto err_unmap_output; } return -EINPROGRESS; err_unmap_output: hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst); err_unmap_input: hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src); return ret; } static u32 hisi_zip_get_tag_v1(struct hisi_zip_sqe *sqe) { return sqe->dw13; } static u32 hisi_zip_get_tag_v2(struct hisi_zip_sqe *sqe) { return sqe->dw26; } static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe) { return sqe->dw3 & HZIP_BD_STATUS_M; } static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe) { return sqe->produced; } static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data) { struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx; const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops; struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx; struct hisi_zip_req_q *req_q = &qp_ctx->req_q; struct device *dev = &qp->qm->pdev->dev; struct hisi_zip_sqe *sqe = data; u32 tag = ops->get_tag(sqe); struct hisi_zip_req *req = req_q->q + tag; struct acomp_req *acomp_req = req->req; u32 status, dlen, head_size; int err = 0; atomic64_inc(&dfx->recv_cnt); status = ops->get_status(sqe); if (unlikely(status != 0 && status != HZIP_NC_ERR)) { dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n", (qp->alg_type == 0) ? "" : "de", qp->qp_id, status, sqe->produced); atomic64_inc(&dfx->err_bd_cnt); err = -EIO; } dlen = ops->get_dstlen(sqe); hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src); hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst); head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0; acomp_req->dlen = dlen + head_size; if (acomp_req->base.complete) acomp_request_complete(acomp_req, err); hisi_zip_remove_req(qp_ctx, req); } static int hisi_zip_acompress(struct acomp_req *acomp_req) { struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP]; struct device *dev = &qp_ctx->qp->qm->pdev->dev; struct hisi_zip_req *req; int head_size; int ret; /* let's output compression head now */ head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type); if (unlikely(head_size < 0)) { dev_err_ratelimited(dev, "failed to add comp head (%d)!\n", head_size); return head_size; } req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true); if (IS_ERR(req)) return PTR_ERR(req); ret = hisi_zip_do_work(req, qp_ctx); if (unlikely(ret != -EINPROGRESS)) { dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret); hisi_zip_remove_req(qp_ctx, req); } return ret; } static int hisi_zip_adecompress(struct acomp_req *acomp_req) { struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm); struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP]; struct device *dev = &qp_ctx->qp->qm->pdev->dev; struct hisi_zip_req *req; int head_size, ret; head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type); if (unlikely(head_size < 0)) { dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n", head_size); return head_size; } req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false); if (IS_ERR(req)) return PTR_ERR(req); ret = hisi_zip_do_work(req, qp_ctx); if (unlikely(ret != -EINPROGRESS)) { dev_info_ratelimited(dev, "failed to do decompress (%d)!\n", ret); hisi_zip_remove_req(qp_ctx, req); } return ret; } static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx, int alg_type, int req_type) { struct device *dev = &qp->qm->pdev->dev; int ret; qp->req_type = req_type; qp->alg_type = alg_type; qp->qp_ctx = qp_ctx; ret = hisi_qm_start_qp(qp, 0); if (ret < 0) { dev_err(dev, "failed to start qp (%d)!\n", ret); return ret; } qp_ctx->qp = qp; return 0; } static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx) { hisi_qm_stop_qp(qp_ctx->qp); hisi_qm_free_qps(&qp_ctx->qp, 1); } static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = { .sqe_type = 0, .fill_addr = hisi_zip_fill_addr, .fill_buf_size = hisi_zip_fill_buf_size, .fill_buf_type = hisi_zip_fill_buf_type, .fill_req_type = hisi_zip_fill_req_type, .fill_tag = hisi_zip_fill_tag_v1, .fill_sqe_type = hisi_zip_fill_sqe_type, .get_tag = hisi_zip_get_tag_v1, .get_status = hisi_zip_get_status, .get_dstlen = hisi_zip_get_dstlen, }; static const struct hisi_zip_sqe_ops hisi_zip_ops_v2 = { .sqe_type = 0x3, .fill_addr = hisi_zip_fill_addr, .fill_buf_size = hisi_zip_fill_buf_size, .fill_buf_type = hisi_zip_fill_buf_type, .fill_req_type = hisi_zip_fill_req_type, .fill_tag = hisi_zip_fill_tag_v2, .fill_sqe_type = hisi_zip_fill_sqe_type, .get_tag = hisi_zip_get_tag_v2, .get_status = hisi_zip_get_status, .get_dstlen = hisi_zip_get_dstlen, }; static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node) { struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL }; struct hisi_zip_qp_ctx *qp_ctx; struct hisi_zip *hisi_zip; int ret, i, j; ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node); if (ret) { pr_err("failed to create zip qps (%d)!\n", ret); return -ENODEV; } hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm); for (i = 0; i < HZIP_CTX_Q_NUM; i++) { /* alg_type = 0 for compress, 1 for decompress in hw sqe */ qp_ctx = &hisi_zip_ctx->qp_ctx[i]; qp_ctx->ctx = hisi_zip_ctx; ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type); if (ret) { for (j = i - 1; j >= 0; j--) hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp); hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM); return ret; } qp_ctx->zip_dev = hisi_zip; } if (hisi_zip->qm.ver < QM_HW_V3) hisi_zip_ctx->ops = &hisi_zip_ops_v1; else hisi_zip_ctx->ops = &hisi_zip_ops_v2; return 0; } static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx) { int i; for (i = 0; i < HZIP_CTX_Q_NUM; i++) hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]); } static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx) { u16 q_depth = ctx->qp_ctx[0].qp->sq_depth; struct hisi_zip_req_q *req_q; int i, ret; for (i = 0; i < HZIP_CTX_Q_NUM; i++) { req_q = &ctx->qp_ctx[i].req_q; req_q->size = q_depth; req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL); if (!req_q->req_bitmap) { ret = -ENOMEM; if (i == 0) return ret; goto err_free_comp_q; } rwlock_init(&req_q->req_lock); req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req), GFP_KERNEL); if (!req_q->q) { ret = -ENOMEM; if (i == 0) goto err_free_comp_bitmap; else goto err_free_decomp_bitmap; } } return 0; err_free_decomp_bitmap: bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap); err_free_comp_q: kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q); err_free_comp_bitmap: bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap); return ret; } static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx) { int i; for (i = 0; i < HZIP_CTX_Q_NUM; i++) { kfree(ctx->qp_ctx[i].req_q.q); bitmap_free(ctx->qp_ctx[i].req_q.req_bitmap); } } static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx) { u16 q_depth = ctx->qp_ctx[0].qp->sq_depth; struct hisi_zip_qp_ctx *tmp; struct device *dev; int i; for (i = 0; i < HZIP_CTX_Q_NUM; i++) { tmp = &ctx->qp_ctx[i]; dev = &tmp->qp->qm->pdev->dev; tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1, sgl_sge_nr); if (IS_ERR(tmp->sgl_pool)) { if (i == 1) goto err_free_sgl_pool0; return -ENOMEM; } } return 0; err_free_sgl_pool0: hisi_acc_free_sgl_pool(&ctx->qp_ctx[HZIP_QPC_COMP].qp->qm->pdev->dev, ctx->qp_ctx[HZIP_QPC_COMP].sgl_pool); return -ENOMEM; } static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx) { int i; for (i = 0; i < HZIP_CTX_Q_NUM; i++) hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev, ctx->qp_ctx[i].sgl_pool); } static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx, void (*fn)(struct hisi_qp *, void *)) { int i; for (i = 0; i < HZIP_CTX_Q_NUM; i++) ctx->qp_ctx[i].qp->req_cb = fn; } static int hisi_zip_acomp_init(struct crypto_acomp *tfm) { const char *alg_name = crypto_tfm_alg_name(&tfm->base); struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); struct device *dev; int ret; ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node); if (ret) { pr_err("failed to init ctx (%d)!\n", ret); return ret; } dev = &ctx->qp_ctx[0].qp->qm->pdev->dev; ret = hisi_zip_create_req_q(ctx); if (ret) { dev_err(dev, "failed to create request queue (%d)!\n", ret); goto err_ctx_exit; } ret = hisi_zip_create_sgl_pool(ctx); if (ret) { dev_err(dev, "failed to create sgl pool (%d)!\n", ret); goto err_release_req_q; } hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb); return 0; err_release_req_q: hisi_zip_release_req_q(ctx); err_ctx_exit: hisi_zip_ctx_exit(ctx); return ret; } static void hisi_zip_acomp_exit(struct crypto_acomp *tfm) { struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base); hisi_zip_set_acomp_cb(ctx, NULL); hisi_zip_release_sgl_pool(ctx); hisi_zip_release_req_q(ctx); hisi_zip_ctx_exit(ctx); } static struct acomp_alg hisi_zip_acomp_zlib = { .init = hisi_zip_acomp_init, .exit = hisi_zip_acomp_exit, .compress = hisi_zip_acompress, .decompress = hisi_zip_adecompress, .base = { .cra_name = "zlib-deflate", .cra_driver_name = "hisi-zlib-acomp", .cra_module = THIS_MODULE, .cra_priority = HZIP_ALG_PRIORITY, .cra_ctxsize = sizeof(struct hisi_zip_ctx), } }; static int hisi_zip_register_zlib(struct hisi_qm *qm) { int ret; if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB)) return 0; ret = crypto_register_acomp(&hisi_zip_acomp_zlib); if (ret) dev_err(&qm->pdev->dev, "failed to register to zlib (%d)!\n", ret); return ret; } static void hisi_zip_unregister_zlib(struct hisi_qm *qm) { if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB)) return; crypto_unregister_acomp(&hisi_zip_acomp_zlib); } static struct acomp_alg hisi_zip_acomp_gzip = { .init = hisi_zip_acomp_init, .exit = hisi_zip_acomp_exit, .compress = hisi_zip_acompress, .decompress = hisi_zip_adecompress, .base = { .cra_name = "gzip", .cra_driver_name = "hisi-gzip-acomp", .cra_module = THIS_MODULE, .cra_priority = HZIP_ALG_PRIORITY, .cra_ctxsize = sizeof(struct hisi_zip_ctx), } }; static int hisi_zip_register_gzip(struct hisi_qm *qm) { int ret; if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP)) return 0; ret = crypto_register_acomp(&hisi_zip_acomp_gzip); if (ret) dev_err(&qm->pdev->dev, "failed to register to gzip (%d)!\n", ret); return ret; } static void hisi_zip_unregister_gzip(struct hisi_qm *qm) { if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP)) return; crypto_unregister_acomp(&hisi_zip_acomp_gzip); } int hisi_zip_register_to_crypto(struct hisi_qm *qm) { int ret = 0; ret = hisi_zip_register_zlib(qm); if (ret) return ret; ret = hisi_zip_register_gzip(qm); if (ret) hisi_zip_unregister_zlib(qm); return ret; } void hisi_zip_unregister_from_crypto(struct hisi_qm *qm) { hisi_zip_unregister_zlib(qm); hisi_zip_unregister_gzip(qm); }
linux-master
drivers/crypto/hisilicon/zip/zip_crypto.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include <linux/acpi.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/seq_file.h> #include <linux/topology.h> #include <linux/uacce.h> #include "zip.h" #define PCI_DEVICE_ID_HUAWEI_ZIP_PF 0xa250 #define HZIP_QUEUE_NUM_V1 4096 #define HZIP_CLOCK_GATE_CTRL 0x301004 #define HZIP_DECOMP_CHECK_ENABLE BIT(16) #define HZIP_FSM_MAX_CNT 0x301008 #define HZIP_PORT_ARCA_CHE_0 0x301040 #define HZIP_PORT_ARCA_CHE_1 0x301044 #define HZIP_PORT_AWCA_CHE_0 0x301060 #define HZIP_PORT_AWCA_CHE_1 0x301064 #define HZIP_CACHE_ALL_EN 0xffffffff #define HZIP_BD_RUSER_32_63 0x301110 #define HZIP_SGL_RUSER_32_63 0x30111c #define HZIP_DATA_RUSER_32_63 0x301128 #define HZIP_DATA_WUSER_32_63 0x301134 #define HZIP_BD_WUSER_32_63 0x301140 #define HZIP_QM_IDEL_STATUS 0x3040e4 #define HZIP_CORE_DFX_BASE 0x301000 #define HZIP_CLOCK_GATED_CONTL 0X301004 #define HZIP_CORE_DFX_COMP_0 0x302000 #define HZIP_CORE_DFX_COMP_1 0x303000 #define HZIP_CORE_DFX_DECOMP_0 0x304000 #define HZIP_CORE_DFX_DECOMP_1 0x305000 #define HZIP_CORE_DFX_DECOMP_2 0x306000 #define HZIP_CORE_DFX_DECOMP_3 0x307000 #define HZIP_CORE_DFX_DECOMP_4 0x308000 #define HZIP_CORE_DFX_DECOMP_5 0x309000 #define HZIP_CORE_REGS_BASE_LEN 0xB0 #define HZIP_CORE_REGS_DFX_LEN 0x28 #define HZIP_CORE_INT_SOURCE 0x3010A0 #define HZIP_CORE_INT_MASK_REG 0x3010A4 #define HZIP_CORE_INT_SET 0x3010A8 #define HZIP_CORE_INT_STATUS 0x3010AC #define HZIP_CORE_INT_STATUS_M_ECC BIT(1) #define HZIP_CORE_SRAM_ECC_ERR_INFO 0x301148 #define HZIP_CORE_INT_RAS_CE_ENB 0x301160 #define HZIP_CORE_INT_RAS_NFE_ENB 0x301164 #define HZIP_CORE_INT_RAS_FE_ENB 0x301168 #define HZIP_CORE_INT_RAS_FE_ENB_MASK 0x0 #define HZIP_OOO_SHUTDOWN_SEL 0x30120C #define HZIP_SRAM_ECC_ERR_NUM_SHIFT 16 #define HZIP_SRAM_ECC_ERR_ADDR_SHIFT 24 #define HZIP_CORE_INT_MASK_ALL GENMASK(12, 0) #define HZIP_SQE_SIZE 128 #define HZIP_PF_DEF_Q_NUM 64 #define HZIP_PF_DEF_Q_BASE 0 #define HZIP_SOFT_CTRL_CNT_CLR_CE 0x301000 #define HZIP_SOFT_CTRL_CNT_CLR_CE_BIT BIT(0) #define HZIP_SOFT_CTRL_ZIP_CONTROL 0x30100C #define HZIP_AXI_SHUTDOWN_ENABLE BIT(14) #define HZIP_WR_PORT BIT(11) #define HZIP_DEV_ALG_MAX_LEN 256 #define HZIP_ALG_ZLIB_BIT GENMASK(1, 0) #define HZIP_ALG_GZIP_BIT GENMASK(3, 2) #define HZIP_ALG_DEFLATE_BIT GENMASK(5, 4) #define HZIP_ALG_LZ77_BIT GENMASK(7, 6) #define HZIP_BUF_SIZE 22 #define HZIP_SQE_MASK_OFFSET 64 #define HZIP_SQE_MASK_LEN 48 #define HZIP_CNT_CLR_CE_EN BIT(0) #define HZIP_RO_CNT_CLR_CE_EN BIT(2) #define HZIP_RD_CNT_CLR_CE_EN (HZIP_CNT_CLR_CE_EN | \ HZIP_RO_CNT_CLR_CE_EN) #define HZIP_PREFETCH_CFG 0x3011B0 #define HZIP_SVA_TRANS 0x3011C4 #define HZIP_PREFETCH_ENABLE (~(BIT(26) | BIT(17) | BIT(0))) #define HZIP_SVA_PREFETCH_DISABLE BIT(26) #define HZIP_SVA_DISABLE_READY (BIT(26) | BIT(30)) #define HZIP_SHAPER_RATE_COMPRESS 750 #define HZIP_SHAPER_RATE_DECOMPRESS 140 #define HZIP_DELAY_1_US 1 #define HZIP_POLL_TIMEOUT_US 1000 /* clock gating */ #define HZIP_PEH_CFG_AUTO_GATE 0x3011A8 #define HZIP_PEH_CFG_AUTO_GATE_EN BIT(0) #define HZIP_CORE_GATED_EN GENMASK(15, 8) #define HZIP_CORE_GATED_OOO_EN BIT(29) #define HZIP_CLOCK_GATED_EN (HZIP_CORE_GATED_EN | \ HZIP_CORE_GATED_OOO_EN) static const char hisi_zip_name[] = "hisi_zip"; static struct dentry *hzip_debugfs_root; struct hisi_zip_hw_error { u32 int_msk; const char *msg; }; struct zip_dfx_item { const char *name; u32 offset; }; struct zip_dev_alg { u32 alg_msk; const char *algs; }; static const struct zip_dev_alg zip_dev_algs[] = { { .alg_msk = HZIP_ALG_ZLIB_BIT, .algs = "zlib\n", }, { .alg_msk = HZIP_ALG_GZIP_BIT, .algs = "gzip\n", }, { .alg_msk = HZIP_ALG_DEFLATE_BIT, .algs = "deflate\n", }, { .alg_msk = HZIP_ALG_LZ77_BIT, .algs = "lz77_zstd\n", }, }; static struct hisi_qm_list zip_devices = { .register_to_crypto = hisi_zip_register_to_crypto, .unregister_from_crypto = hisi_zip_unregister_from_crypto, }; static struct zip_dfx_item zip_dfx_files[] = { {"send_cnt", offsetof(struct hisi_zip_dfx, send_cnt)}, {"recv_cnt", offsetof(struct hisi_zip_dfx, recv_cnt)}, {"send_busy_cnt", offsetof(struct hisi_zip_dfx, send_busy_cnt)}, {"err_bd_cnt", offsetof(struct hisi_zip_dfx, err_bd_cnt)}, }; static const struct hisi_zip_hw_error zip_hw_error[] = { { .int_msk = BIT(0), .msg = "zip_ecc_1bitt_err" }, { .int_msk = BIT(1), .msg = "zip_ecc_2bit_err" }, { .int_msk = BIT(2), .msg = "zip_axi_rresp_err" }, { .int_msk = BIT(3), .msg = "zip_axi_bresp_err" }, { .int_msk = BIT(4), .msg = "zip_src_addr_parse_err" }, { .int_msk = BIT(5), .msg = "zip_dst_addr_parse_err" }, { .int_msk = BIT(6), .msg = "zip_pre_in_addr_err" }, { .int_msk = BIT(7), .msg = "zip_pre_in_data_err" }, { .int_msk = BIT(8), .msg = "zip_com_inf_err" }, { .int_msk = BIT(9), .msg = "zip_enc_inf_err" }, { .int_msk = BIT(10), .msg = "zip_pre_out_err" }, { .int_msk = BIT(11), .msg = "zip_axi_poison_err" }, { .int_msk = BIT(12), .msg = "zip_sva_err" }, { /* sentinel */ } }; enum ctrl_debug_file_index { HZIP_CLEAR_ENABLE, HZIP_DEBUG_FILE_NUM, }; static const char * const ctrl_debug_file_name[] = { [HZIP_CLEAR_ENABLE] = "clear_enable", }; struct ctrl_debug_file { enum ctrl_debug_file_index index; spinlock_t lock; struct hisi_zip_ctrl *ctrl; }; /* * One ZIP controller has one PF and multiple VFs, some global configurations * which PF has need this structure. * * Just relevant for PF. */ struct hisi_zip_ctrl { struct hisi_zip *hisi_zip; struct ctrl_debug_file files[HZIP_DEBUG_FILE_NUM]; }; enum zip_cap_type { ZIP_QM_NFE_MASK_CAP = 0x0, ZIP_QM_RESET_MASK_CAP, ZIP_QM_OOO_SHUTDOWN_MASK_CAP, ZIP_QM_CE_MASK_CAP, ZIP_NFE_MASK_CAP, ZIP_RESET_MASK_CAP, ZIP_OOO_SHUTDOWN_MASK_CAP, ZIP_CE_MASK_CAP, ZIP_CLUSTER_NUM_CAP, ZIP_CORE_TYPE_NUM_CAP, ZIP_CORE_NUM_CAP, ZIP_CLUSTER_COMP_NUM_CAP, ZIP_CLUSTER_DECOMP_NUM_CAP, ZIP_DECOMP_ENABLE_BITMAP, ZIP_COMP_ENABLE_BITMAP, ZIP_DRV_ALG_BITMAP, ZIP_DEV_ALG_BITMAP, ZIP_CORE1_ALG_BITMAP, ZIP_CORE2_ALG_BITMAP, ZIP_CORE3_ALG_BITMAP, ZIP_CORE4_ALG_BITMAP, ZIP_CORE5_ALG_BITMAP, ZIP_CAP_MAX }; static struct hisi_qm_cap_info zip_basic_cap_info[] = { {ZIP_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C57, 0x7C77}, {ZIP_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC57, 0x6C77}, {ZIP_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77}, {ZIP_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, {ZIP_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x1FFE}, {ZIP_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x7FE, 0x7FE}, {ZIP_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x2, 0x7FE}, {ZIP_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x1, 0x1}, {ZIP_CLUSTER_NUM_CAP, 0x313C, 28, GENMASK(3, 0), 0x1, 0x1, 0x1}, {ZIP_CORE_TYPE_NUM_CAP, 0x313C, 24, GENMASK(3, 0), 0x2, 0x2, 0x2}, {ZIP_CORE_NUM_CAP, 0x313C, 16, GENMASK(7, 0), 0x8, 0x8, 0x5}, {ZIP_CLUSTER_COMP_NUM_CAP, 0x313C, 8, GENMASK(7, 0), 0x2, 0x2, 0x2}, {ZIP_CLUSTER_DECOMP_NUM_CAP, 0x313C, 0, GENMASK(7, 0), 0x6, 0x6, 0x3}, {ZIP_DECOMP_ENABLE_BITMAP, 0x3140, 16, GENMASK(15, 0), 0xFC, 0xFC, 0x1C}, {ZIP_COMP_ENABLE_BITMAP, 0x3140, 0, GENMASK(15, 0), 0x3, 0x3, 0x3}, {ZIP_DRV_ALG_BITMAP, 0x3144, 0, GENMASK(31, 0), 0xF, 0xF, 0xF}, {ZIP_DEV_ALG_BITMAP, 0x3148, 0, GENMASK(31, 0), 0xF, 0xF, 0xFF}, {ZIP_CORE1_ALG_BITMAP, 0x314C, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5}, {ZIP_CORE2_ALG_BITMAP, 0x3150, 0, GENMASK(31, 0), 0x5, 0x5, 0xD5}, {ZIP_CORE3_ALG_BITMAP, 0x3154, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A}, {ZIP_CORE4_ALG_BITMAP, 0x3158, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A}, {ZIP_CORE5_ALG_BITMAP, 0x315C, 0, GENMASK(31, 0), 0xA, 0xA, 0x2A}, {ZIP_CAP_MAX, 0x317c, 0, GENMASK(0, 0), 0x0, 0x0, 0x0} }; enum { HZIP_COMP_CORE0, HZIP_COMP_CORE1, HZIP_DECOMP_CORE0, HZIP_DECOMP_CORE1, HZIP_DECOMP_CORE2, HZIP_DECOMP_CORE3, HZIP_DECOMP_CORE4, HZIP_DECOMP_CORE5, }; static const u64 core_offsets[] = { [HZIP_COMP_CORE0] = 0x302000, [HZIP_COMP_CORE1] = 0x303000, [HZIP_DECOMP_CORE0] = 0x304000, [HZIP_DECOMP_CORE1] = 0x305000, [HZIP_DECOMP_CORE2] = 0x306000, [HZIP_DECOMP_CORE3] = 0x307000, [HZIP_DECOMP_CORE4] = 0x308000, [HZIP_DECOMP_CORE5] = 0x309000, }; static const struct debugfs_reg32 hzip_dfx_regs[] = { {"HZIP_GET_BD_NUM ", 0x00ull}, {"HZIP_GET_RIGHT_BD ", 0x04ull}, {"HZIP_GET_ERROR_BD ", 0x08ull}, {"HZIP_DONE_BD_NUM ", 0x0cull}, {"HZIP_WORK_CYCLE ", 0x10ull}, {"HZIP_IDLE_CYCLE ", 0x18ull}, {"HZIP_MAX_DELAY ", 0x20ull}, {"HZIP_MIN_DELAY ", 0x24ull}, {"HZIP_AVG_DELAY ", 0x28ull}, {"HZIP_MEM_VISIBLE_DATA ", 0x30ull}, {"HZIP_MEM_VISIBLE_ADDR ", 0x34ull}, {"HZIP_CONSUMED_BYTE ", 0x38ull}, {"HZIP_PRODUCED_BYTE ", 0x40ull}, {"HZIP_COMP_INF ", 0x70ull}, {"HZIP_PRE_OUT ", 0x78ull}, {"HZIP_BD_RD ", 0x7cull}, {"HZIP_BD_WR ", 0x80ull}, {"HZIP_GET_BD_AXI_ERR_NUM ", 0x84ull}, {"HZIP_GET_BD_PARSE_ERR_NUM ", 0x88ull}, {"HZIP_ADD_BD_AXI_ERR_NUM ", 0x8cull}, {"HZIP_DECOMP_STF_RELOAD_CURR_ST ", 0x94ull}, {"HZIP_DECOMP_LZ77_CURR_ST ", 0x9cull}, }; static const struct debugfs_reg32 hzip_com_dfx_regs[] = { {"HZIP_CLOCK_GATE_CTRL ", 0x301004}, {"HZIP_CORE_INT_RAS_CE_ENB ", 0x301160}, {"HZIP_CORE_INT_RAS_NFE_ENB ", 0x301164}, {"HZIP_CORE_INT_RAS_FE_ENB ", 0x301168}, {"HZIP_UNCOM_ERR_RAS_CTRL ", 0x30116C}, }; static const struct debugfs_reg32 hzip_dump_dfx_regs[] = { {"HZIP_GET_BD_NUM ", 0x00ull}, {"HZIP_GET_RIGHT_BD ", 0x04ull}, {"HZIP_GET_ERROR_BD ", 0x08ull}, {"HZIP_DONE_BD_NUM ", 0x0cull}, {"HZIP_MAX_DELAY ", 0x20ull}, }; /* define the ZIP's dfx regs region and region length */ static struct dfx_diff_registers hzip_diff_regs[] = { { .reg_offset = HZIP_CORE_DFX_BASE, .reg_len = HZIP_CORE_REGS_BASE_LEN, }, { .reg_offset = HZIP_CORE_DFX_COMP_0, .reg_len = HZIP_CORE_REGS_DFX_LEN, }, { .reg_offset = HZIP_CORE_DFX_COMP_1, .reg_len = HZIP_CORE_REGS_DFX_LEN, }, { .reg_offset = HZIP_CORE_DFX_DECOMP_0, .reg_len = HZIP_CORE_REGS_DFX_LEN, }, { .reg_offset = HZIP_CORE_DFX_DECOMP_1, .reg_len = HZIP_CORE_REGS_DFX_LEN, }, { .reg_offset = HZIP_CORE_DFX_DECOMP_2, .reg_len = HZIP_CORE_REGS_DFX_LEN, }, { .reg_offset = HZIP_CORE_DFX_DECOMP_3, .reg_len = HZIP_CORE_REGS_DFX_LEN, }, { .reg_offset = HZIP_CORE_DFX_DECOMP_4, .reg_len = HZIP_CORE_REGS_DFX_LEN, }, { .reg_offset = HZIP_CORE_DFX_DECOMP_5, .reg_len = HZIP_CORE_REGS_DFX_LEN, }, }; static int hzip_diff_regs_show(struct seq_file *s, void *unused) { struct hisi_qm *qm = s->private; hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, ARRAY_SIZE(hzip_diff_regs)); return 0; } DEFINE_SHOW_ATTRIBUTE(hzip_diff_regs); static const struct kernel_param_ops zip_uacce_mode_ops = { .set = uacce_mode_set, .get = param_get_int, }; /* * uacce_mode = 0 means zip only register to crypto, * uacce_mode = 1 means zip both register to crypto and uacce. */ static u32 uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &zip_uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); static int pf_q_num_set(const char *val, const struct kernel_param *kp) { return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_ZIP_PF); } static const struct kernel_param_ops pf_q_num_ops = { .set = pf_q_num_set, .get = param_get_int, }; static u32 pf_q_num = HZIP_PF_DEF_Q_NUM; module_param_cb(pf_q_num, &pf_q_num_ops, &pf_q_num, 0444); MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)"); static const struct kernel_param_ops vfs_num_ops = { .set = vfs_num_set, .get = param_get_int, }; static u32 vfs_num; module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); static const struct pci_device_id hisi_zip_dev_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_PF) }, { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_ZIP_VF) }, { 0, } }; MODULE_DEVICE_TABLE(pci, hisi_zip_dev_ids); int zip_create_qps(struct hisi_qp **qps, int qp_num, int node) { if (node == NUMA_NO_NODE) node = cpu_to_node(smp_processor_id()); return hisi_qm_alloc_qps_node(&zip_devices, qp_num, 0, node, qps); } bool hisi_zip_alg_support(struct hisi_qm *qm, u32 alg) { u32 cap_val; cap_val = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DRV_ALG_BITMAP, qm->cap_ver); if ((alg & cap_val) == alg) return true; return false; } static int hisi_zip_set_qm_algs(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; char *algs, *ptr; u32 alg_mask; int i; if (!qm->use_sva) return 0; algs = devm_kzalloc(dev, HZIP_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); if (!algs) return -ENOMEM; alg_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DEV_ALG_BITMAP, qm->cap_ver); for (i = 0; i < ARRAY_SIZE(zip_dev_algs); i++) if (alg_mask & zip_dev_algs[i].alg_msk) strcat(algs, zip_dev_algs[i].algs); ptr = strrchr(algs, '\n'); if (ptr) *ptr = '\0'; qm->uacce->algs = algs; return 0; } static void hisi_zip_open_sva_prefetch(struct hisi_qm *qm) { u32 val; int ret; if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; /* Enable prefetch */ val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG); val &= HZIP_PREFETCH_ENABLE; writel(val, qm->io_base + HZIP_PREFETCH_CFG); ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_PREFETCH_CFG, val, !(val & HZIP_SVA_PREFETCH_DISABLE), HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US); if (ret) pci_err(qm->pdev, "failed to open sva prefetch\n"); } static void hisi_zip_close_sva_prefetch(struct hisi_qm *qm) { u32 val; int ret; if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; val = readl_relaxed(qm->io_base + HZIP_PREFETCH_CFG); val |= HZIP_SVA_PREFETCH_DISABLE; writel(val, qm->io_base + HZIP_PREFETCH_CFG); ret = readl_relaxed_poll_timeout(qm->io_base + HZIP_SVA_TRANS, val, !(val & HZIP_SVA_DISABLE_READY), HZIP_DELAY_1_US, HZIP_POLL_TIMEOUT_US); if (ret) pci_err(qm->pdev, "failed to close sva prefetch\n"); } static void hisi_zip_enable_clock_gate(struct hisi_qm *qm) { u32 val; if (qm->ver < QM_HW_V3) return; val = readl(qm->io_base + HZIP_CLOCK_GATE_CTRL); val |= HZIP_CLOCK_GATED_EN; writel(val, qm->io_base + HZIP_CLOCK_GATE_CTRL); val = readl(qm->io_base + HZIP_PEH_CFG_AUTO_GATE); val |= HZIP_PEH_CFG_AUTO_GATE_EN; writel(val, qm->io_base + HZIP_PEH_CFG_AUTO_GATE); } static int hisi_zip_set_user_domain_and_cache(struct hisi_qm *qm) { void __iomem *base = qm->io_base; u32 dcomp_bm, comp_bm; /* qm user domain */ writel(AXUSER_BASE, base + QM_ARUSER_M_CFG_1); writel(ARUSER_M_CFG_ENABLE, base + QM_ARUSER_M_CFG_ENABLE); writel(AXUSER_BASE, base + QM_AWUSER_M_CFG_1); writel(AWUSER_M_CFG_ENABLE, base + QM_AWUSER_M_CFG_ENABLE); writel(WUSER_M_CFG_ENABLE, base + QM_WUSER_M_CFG_ENABLE); /* qm cache */ writel(AXI_M_CFG, base + QM_AXI_M_CFG); writel(AXI_M_CFG_ENABLE, base + QM_AXI_M_CFG_ENABLE); /* disable FLR triggered by BME(bus master enable) */ writel(PEH_AXUSER_CFG, base + QM_PEH_AXUSER_CFG); writel(PEH_AXUSER_CFG_ENABLE, base + QM_PEH_AXUSER_CFG_ENABLE); /* cache */ writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_0); writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_ARCA_CHE_1); writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_0); writel(HZIP_CACHE_ALL_EN, base + HZIP_PORT_AWCA_CHE_1); /* user domain configurations */ writel(AXUSER_BASE, base + HZIP_BD_RUSER_32_63); writel(AXUSER_BASE, base + HZIP_BD_WUSER_32_63); if (qm->use_sva && qm->ver == QM_HW_V2) { writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_RUSER_32_63); writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_DATA_WUSER_32_63); writel(AXUSER_BASE | AXUSER_SSV, base + HZIP_SGL_RUSER_32_63); } else { writel(AXUSER_BASE, base + HZIP_DATA_RUSER_32_63); writel(AXUSER_BASE, base + HZIP_DATA_WUSER_32_63); writel(AXUSER_BASE, base + HZIP_SGL_RUSER_32_63); } /* let's open all compression/decompression cores */ dcomp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_DECOMP_ENABLE_BITMAP, qm->cap_ver); comp_bm = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_COMP_ENABLE_BITMAP, qm->cap_ver); writel(HZIP_DECOMP_CHECK_ENABLE | dcomp_bm | comp_bm, base + HZIP_CLOCK_GATE_CTRL); /* enable sqc,cqc writeback */ writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | FIELD_PREP(CQC_CACHE_WB_THRD, 1), base + QM_CACHE_CTL); hisi_zip_enable_clock_gate(qm); return 0; } static void hisi_zip_master_ooo_ctrl(struct hisi_qm *qm, bool enable) { u32 val1, val2; val1 = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); if (enable) { val1 |= HZIP_AXI_SHUTDOWN_ENABLE; val2 = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); } else { val1 &= ~HZIP_AXI_SHUTDOWN_ENABLE; val2 = 0x0; } if (qm->ver > QM_HW_V2) writel(val2, qm->io_base + HZIP_OOO_SHUTDOWN_SEL); writel(val1, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); } static void hisi_zip_hw_error_enable(struct hisi_qm *qm) { u32 nfe, ce; if (qm->ver == QM_HW_V1) { writel(HZIP_CORE_INT_MASK_ALL, qm->io_base + HZIP_CORE_INT_MASK_REG); dev_info(&qm->pdev->dev, "Does not support hw error handle\n"); return; } nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); /* clear ZIP hw error source if having */ writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_SOURCE); /* configure error type */ writel(ce, qm->io_base + HZIP_CORE_INT_RAS_CE_ENB); writel(HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_RAS_FE_ENB); writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); hisi_zip_master_ooo_ctrl(qm, true); /* enable ZIP hw error interrupts */ writel(0, qm->io_base + HZIP_CORE_INT_MASK_REG); } static void hisi_zip_hw_error_disable(struct hisi_qm *qm) { u32 nfe, ce; /* disable ZIP hw error interrupts */ nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CE_MASK_CAP, qm->cap_ver); writel(ce | nfe | HZIP_CORE_INT_RAS_FE_ENB_MASK, qm->io_base + HZIP_CORE_INT_MASK_REG); hisi_zip_master_ooo_ctrl(qm, false); } static inline struct hisi_qm *file_to_qm(struct ctrl_debug_file *file) { struct hisi_zip *hisi_zip = file->ctrl->hisi_zip; return &hisi_zip->qm; } static u32 clear_enable_read(struct hisi_qm *qm) { return readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) & HZIP_SOFT_CTRL_CNT_CLR_CE_BIT; } static int clear_enable_write(struct hisi_qm *qm, u32 val) { u32 tmp; if (val != 1 && val != 0) return -EINVAL; tmp = (readl(qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE) & ~HZIP_SOFT_CTRL_CNT_CLR_CE_BIT) | val; writel(tmp, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); return 0; } static ssize_t hisi_zip_ctrl_debug_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct ctrl_debug_file *file = filp->private_data; struct hisi_qm *qm = file_to_qm(file); char tbuf[HZIP_BUF_SIZE]; u32 val; int ret; ret = hisi_qm_get_dfx_access(qm); if (ret) return ret; spin_lock_irq(&file->lock); switch (file->index) { case HZIP_CLEAR_ENABLE: val = clear_enable_read(qm); break; default: goto err_input; } spin_unlock_irq(&file->lock); hisi_qm_put_dfx_access(qm); ret = scnprintf(tbuf, sizeof(tbuf), "%u\n", val); return simple_read_from_buffer(buf, count, pos, tbuf, ret); err_input: spin_unlock_irq(&file->lock); hisi_qm_put_dfx_access(qm); return -EINVAL; } static ssize_t hisi_zip_ctrl_debug_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct ctrl_debug_file *file = filp->private_data; struct hisi_qm *qm = file_to_qm(file); char tbuf[HZIP_BUF_SIZE]; unsigned long val; int len, ret; if (*pos != 0) return 0; if (count >= HZIP_BUF_SIZE) return -ENOSPC; len = simple_write_to_buffer(tbuf, HZIP_BUF_SIZE - 1, pos, buf, count); if (len < 0) return len; tbuf[len] = '\0'; ret = kstrtoul(tbuf, 0, &val); if (ret) return ret; ret = hisi_qm_get_dfx_access(qm); if (ret) return ret; spin_lock_irq(&file->lock); switch (file->index) { case HZIP_CLEAR_ENABLE: ret = clear_enable_write(qm, val); if (ret) goto err_input; break; default: ret = -EINVAL; goto err_input; } ret = count; err_input: spin_unlock_irq(&file->lock); hisi_qm_put_dfx_access(qm); return ret; } static const struct file_operations ctrl_debug_fops = { .owner = THIS_MODULE, .open = simple_open, .read = hisi_zip_ctrl_debug_read, .write = hisi_zip_ctrl_debug_write, }; static int zip_debugfs_atomic64_set(void *data, u64 val) { if (val) return -EINVAL; atomic64_set((atomic64_t *)data, 0); return 0; } static int zip_debugfs_atomic64_get(void *data, u64 *val) { *val = atomic64_read((atomic64_t *)data); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(zip_atomic64_ops, zip_debugfs_atomic64_get, zip_debugfs_atomic64_set, "%llu\n"); static int hisi_zip_regs_show(struct seq_file *s, void *unused) { hisi_qm_regs_dump(s, s->private); return 0; } DEFINE_SHOW_ATTRIBUTE(hisi_zip_regs); static int hisi_zip_core_debug_init(struct hisi_qm *qm) { u32 zip_core_num, zip_comp_core_num; struct device *dev = &qm->pdev->dev; struct debugfs_regset32 *regset; struct dentry *tmp_d; char buf[HZIP_BUF_SIZE]; int i; zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver); zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP, qm->cap_ver); for (i = 0; i < zip_core_num; i++) { if (i < zip_comp_core_num) scnprintf(buf, sizeof(buf), "comp_core%d", i); else scnprintf(buf, sizeof(buf), "decomp_core%d", i - zip_comp_core_num); regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) return -ENOENT; regset->regs = hzip_dfx_regs; regset->nregs = ARRAY_SIZE(hzip_dfx_regs); regset->base = qm->io_base + core_offsets[i]; regset->dev = dev; tmp_d = debugfs_create_dir(buf, qm->debug.debug_root); debugfs_create_file("regs", 0444, tmp_d, regset, &hisi_zip_regs_fops); } return 0; } static void hisi_zip_dfx_debug_init(struct hisi_qm *qm) { struct dfx_diff_registers *hzip_regs = qm->debug.acc_diff_regs; struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); struct hisi_zip_dfx *dfx = &zip->dfx; struct dentry *tmp_dir; void *data; int i; tmp_dir = debugfs_create_dir("zip_dfx", qm->debug.debug_root); for (i = 0; i < ARRAY_SIZE(zip_dfx_files); i++) { data = (atomic64_t *)((uintptr_t)dfx + zip_dfx_files[i].offset); debugfs_create_file(zip_dfx_files[i].name, 0644, tmp_dir, data, &zip_atomic64_ops); } if (qm->fun_type == QM_HW_PF && hzip_regs) debugfs_create_file("diff_regs", 0444, tmp_dir, qm, &hzip_diff_regs_fops); } static int hisi_zip_ctrl_debug_init(struct hisi_qm *qm) { struct hisi_zip *zip = container_of(qm, struct hisi_zip, qm); int i; for (i = HZIP_CLEAR_ENABLE; i < HZIP_DEBUG_FILE_NUM; i++) { spin_lock_init(&zip->ctrl->files[i].lock); zip->ctrl->files[i].ctrl = zip->ctrl; zip->ctrl->files[i].index = i; debugfs_create_file(ctrl_debug_file_name[i], 0600, qm->debug.debug_root, zip->ctrl->files + i, &ctrl_debug_fops); } return hisi_zip_core_debug_init(qm); } static int hisi_zip_debugfs_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; struct dentry *dev_d; int ret; dev_d = debugfs_create_dir(dev_name(dev), hzip_debugfs_root); qm->debug.sqe_mask_offset = HZIP_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = HZIP_SQE_MASK_LEN; qm->debug.debug_root = dev_d; ret = hisi_qm_regs_debugfs_init(qm, hzip_diff_regs, ARRAY_SIZE(hzip_diff_regs)); if (ret) { dev_warn(dev, "Failed to init ZIP diff regs!\n"); goto debugfs_remove; } hisi_qm_debug_init(qm); if (qm->fun_type == QM_HW_PF) { ret = hisi_zip_ctrl_debug_init(qm); if (ret) goto failed_to_create; } hisi_zip_dfx_debug_init(qm); return 0; failed_to_create: hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); debugfs_remove: debugfs_remove_recursive(hzip_debugfs_root); return ret; } /* hisi_zip_debug_regs_clear() - clear the zip debug regs */ static void hisi_zip_debug_regs_clear(struct hisi_qm *qm) { int i, j; /* enable register read_clear bit */ writel(HZIP_RD_CNT_CLR_CE_EN, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); for (i = 0; i < ARRAY_SIZE(core_offsets); i++) for (j = 0; j < ARRAY_SIZE(hzip_dfx_regs); j++) readl(qm->io_base + core_offsets[i] + hzip_dfx_regs[j].offset); /* disable register read_clear bit */ writel(0x0, qm->io_base + HZIP_SOFT_CTRL_CNT_CLR_CE); hisi_qm_debug_regs_clear(qm); } static void hisi_zip_debugfs_exit(struct hisi_qm *qm) { hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(hzip_diff_regs)); debugfs_remove_recursive(qm->debug.debug_root); if (qm->fun_type == QM_HW_PF) { hisi_zip_debug_regs_clear(qm); qm->debug.curr_qm_qp_num = 0; } } static int hisi_zip_show_last_regs_init(struct hisi_qm *qm) { int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs); int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); struct qm_debug *debug = &qm->debug; void __iomem *io_base; u32 zip_core_num; int i, j, idx; zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver); debug->last_words = kcalloc(core_dfx_regs_num * zip_core_num + com_dfx_regs_num, sizeof(unsigned int), GFP_KERNEL); if (!debug->last_words) return -ENOMEM; for (i = 0; i < com_dfx_regs_num; i++) { io_base = qm->io_base + hzip_com_dfx_regs[i].offset; debug->last_words[i] = readl_relaxed(io_base); } for (i = 0; i < zip_core_num; i++) { io_base = qm->io_base + core_offsets[i]; for (j = 0; j < core_dfx_regs_num; j++) { idx = com_dfx_regs_num + i * core_dfx_regs_num + j; debug->last_words[idx] = readl_relaxed( io_base + hzip_dump_dfx_regs[j].offset); } } return 0; } static void hisi_zip_show_last_regs_uninit(struct hisi_qm *qm) { struct qm_debug *debug = &qm->debug; if (qm->fun_type == QM_HW_VF || !debug->last_words) return; kfree(debug->last_words); debug->last_words = NULL; } static void hisi_zip_show_last_dfx_regs(struct hisi_qm *qm) { int core_dfx_regs_num = ARRAY_SIZE(hzip_dump_dfx_regs); int com_dfx_regs_num = ARRAY_SIZE(hzip_com_dfx_regs); u32 zip_core_num, zip_comp_core_num; struct qm_debug *debug = &qm->debug; char buf[HZIP_BUF_SIZE]; void __iomem *base; int i, j, idx; u32 val; if (qm->fun_type == QM_HW_VF || !debug->last_words) return; for (i = 0; i < com_dfx_regs_num; i++) { val = readl_relaxed(qm->io_base + hzip_com_dfx_regs[i].offset); if (debug->last_words[i] != val) pci_info(qm->pdev, "com_dfx: %s \t= 0x%08x => 0x%08x\n", hzip_com_dfx_regs[i].name, debug->last_words[i], val); } zip_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CORE_NUM_CAP, qm->cap_ver); zip_comp_core_num = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_CLUSTER_COMP_NUM_CAP, qm->cap_ver); for (i = 0; i < zip_core_num; i++) { if (i < zip_comp_core_num) scnprintf(buf, sizeof(buf), "Comp_core-%d", i); else scnprintf(buf, sizeof(buf), "Decomp_core-%d", i - zip_comp_core_num); base = qm->io_base + core_offsets[i]; pci_info(qm->pdev, "==>%s:\n", buf); /* dump last word for dfx regs during control resetting */ for (j = 0; j < core_dfx_regs_num; j++) { idx = com_dfx_regs_num + i * core_dfx_regs_num + j; val = readl_relaxed(base + hzip_dump_dfx_regs[j].offset); if (debug->last_words[idx] != val) pci_info(qm->pdev, "%s \t= 0x%08x => 0x%08x\n", hzip_dump_dfx_regs[j].name, debug->last_words[idx], val); } } } static void hisi_zip_log_hw_error(struct hisi_qm *qm, u32 err_sts) { const struct hisi_zip_hw_error *err = zip_hw_error; struct device *dev = &qm->pdev->dev; u32 err_val; while (err->msg) { if (err->int_msk & err_sts) { dev_err(dev, "%s [error status=0x%x] found\n", err->msg, err->int_msk); if (err->int_msk & HZIP_CORE_INT_STATUS_M_ECC) { err_val = readl(qm->io_base + HZIP_CORE_SRAM_ECC_ERR_INFO); dev_err(dev, "hisi-zip multi ecc sram num=0x%x\n", ((err_val >> HZIP_SRAM_ECC_ERR_NUM_SHIFT) & 0xFF)); } } err++; } } static u32 hisi_zip_get_hw_err_status(struct hisi_qm *qm) { return readl(qm->io_base + HZIP_CORE_INT_STATUS); } static void hisi_zip_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) { u32 nfe; writel(err_sts, qm->io_base + HZIP_CORE_INT_SOURCE); nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_NFE_MASK_CAP, qm->cap_ver); writel(nfe, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); } static void hisi_zip_open_axi_master_ooo(struct hisi_qm *qm) { u32 val; val = readl(qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); writel(val & ~HZIP_AXI_SHUTDOWN_ENABLE, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); writel(val | HZIP_AXI_SHUTDOWN_ENABLE, qm->io_base + HZIP_SOFT_CTRL_ZIP_CONTROL); } static void hisi_zip_close_axi_master_ooo(struct hisi_qm *qm) { u32 nfe_enb; /* Disable ECC Mbit error report. */ nfe_enb = readl(qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); writel(nfe_enb & ~HZIP_CORE_INT_STATUS_M_ECC, qm->io_base + HZIP_CORE_INT_RAS_NFE_ENB); /* Inject zip ECC Mbit error to block master ooo. */ writel(HZIP_CORE_INT_STATUS_M_ECC, qm->io_base + HZIP_CORE_INT_SET); } static void hisi_zip_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; err_info->fe = HZIP_CORE_INT_RAS_FE_ENB_MASK; err_info->ce = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_CE_MASK_CAP, qm->cap_ver); err_info->nfe = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_NFE_MASK_CAP, qm->cap_ver); err_info->ecc_2bits_mask = HZIP_CORE_INT_STATUS_M_ECC; err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_QM_RESET_MASK_CAP, qm->cap_ver); err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, zip_basic_cap_info, ZIP_RESET_MASK_CAP, qm->cap_ver); err_info->msi_wr_port = HZIP_WR_PORT; err_info->acpi_rst = "ZRST"; } static const struct hisi_qm_err_ini hisi_zip_err_ini = { .hw_init = hisi_zip_set_user_domain_and_cache, .hw_err_enable = hisi_zip_hw_error_enable, .hw_err_disable = hisi_zip_hw_error_disable, .get_dev_hw_err_status = hisi_zip_get_hw_err_status, .clear_dev_hw_err_status = hisi_zip_clear_hw_err_status, .log_dev_hw_err = hisi_zip_log_hw_error, .open_axi_master_ooo = hisi_zip_open_axi_master_ooo, .close_axi_master_ooo = hisi_zip_close_axi_master_ooo, .open_sva_prefetch = hisi_zip_open_sva_prefetch, .close_sva_prefetch = hisi_zip_close_sva_prefetch, .show_last_dfx_regs = hisi_zip_show_last_dfx_regs, .err_info_init = hisi_zip_err_info_init, }; static int hisi_zip_pf_probe_init(struct hisi_zip *hisi_zip) { struct hisi_qm *qm = &hisi_zip->qm; struct hisi_zip_ctrl *ctrl; int ret; ctrl = devm_kzalloc(&qm->pdev->dev, sizeof(*ctrl), GFP_KERNEL); if (!ctrl) return -ENOMEM; hisi_zip->ctrl = ctrl; ctrl->hisi_zip = hisi_zip; qm->err_ini = &hisi_zip_err_ini; qm->err_ini->err_info_init(qm); ret = hisi_zip_set_user_domain_and_cache(qm); if (ret) return ret; hisi_zip_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); hisi_zip_debug_regs_clear(qm); ret = hisi_zip_show_last_regs_init(qm); if (ret) pci_err(qm->pdev, "Failed to init last word regs!\n"); return ret; } static int hisi_zip_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { int ret; qm->pdev = pdev; qm->ver = pdev->revision; qm->mode = uacce_mode; qm->sqe_size = HZIP_SQE_SIZE; qm->dev_name = hisi_zip_name; qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_ZIP_PF) ? QM_HW_PF : QM_HW_VF; if (qm->fun_type == QM_HW_PF) { qm->qp_base = HZIP_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &zip_devices; } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { /* * have no way to get qm configure in VM in v1 hardware, * so currently force PF to uses HZIP_PF_DEF_Q_NUM, and force * to trigger only one VF in v1 hardware. * * v2 hardware has no such problem. */ qm->qp_base = HZIP_PF_DEF_Q_NUM; qm->qp_num = HZIP_QUEUE_NUM_V1 - HZIP_PF_DEF_Q_NUM; } ret = hisi_qm_init(qm); if (ret) { pci_err(qm->pdev, "Failed to init zip qm configures!\n"); return ret; } ret = hisi_zip_set_qm_algs(qm); if (ret) { pci_err(qm->pdev, "Failed to set zip algs!\n"); hisi_qm_uninit(qm); } return ret; } static void hisi_zip_qm_uninit(struct hisi_qm *qm) { hisi_qm_uninit(qm); } static int hisi_zip_probe_init(struct hisi_zip *hisi_zip) { u32 type_rate = HZIP_SHAPER_RATE_COMPRESS; struct hisi_qm *qm = &hisi_zip->qm; int ret; if (qm->fun_type == QM_HW_PF) { ret = hisi_zip_pf_probe_init(hisi_zip); if (ret) return ret; /* enable shaper type 0 */ if (qm->ver >= QM_HW_V3) { type_rate |= QM_SHAPER_ENABLE; /* ZIP need to enable shaper type 1 */ type_rate |= HZIP_SHAPER_RATE_DECOMPRESS << QM_SHAPER_TYPE1_OFFSET; qm->type_rate = type_rate; } } return 0; } static int hisi_zip_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct hisi_zip *hisi_zip; struct hisi_qm *qm; int ret; hisi_zip = devm_kzalloc(&pdev->dev, sizeof(*hisi_zip), GFP_KERNEL); if (!hisi_zip) return -ENOMEM; qm = &hisi_zip->qm; ret = hisi_zip_qm_init(qm, pdev); if (ret) { pci_err(pdev, "Failed to init ZIP QM (%d)!\n", ret); return ret; } ret = hisi_zip_probe_init(hisi_zip); if (ret) { pci_err(pdev, "Failed to probe (%d)!\n", ret); goto err_qm_uninit; } ret = hisi_qm_start(qm); if (ret) goto err_dev_err_uninit; ret = hisi_zip_debugfs_init(qm); if (ret) pci_err(pdev, "failed to init debugfs (%d)!\n", ret); ret = hisi_qm_alg_register(qm, &zip_devices); if (ret < 0) { pci_err(pdev, "failed to register driver to crypto!\n"); goto err_qm_stop; } if (qm->uacce) { ret = uacce_register(qm->uacce); if (ret) { pci_err(pdev, "failed to register uacce (%d)!\n", ret); goto err_qm_alg_unregister; } } if (qm->fun_type == QM_HW_PF && vfs_num > 0) { ret = hisi_qm_sriov_enable(pdev, vfs_num); if (ret < 0) goto err_qm_alg_unregister; } hisi_qm_pm_init(qm); return 0; err_qm_alg_unregister: hisi_qm_alg_unregister(qm, &zip_devices); err_qm_stop: hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_dev_err_uninit: hisi_zip_show_last_regs_uninit(qm); hisi_qm_dev_err_uninit(qm); err_qm_uninit: hisi_zip_qm_uninit(qm); return ret; } static void hisi_zip_remove(struct pci_dev *pdev) { struct hisi_qm *qm = pci_get_drvdata(pdev); hisi_qm_pm_uninit(qm); hisi_qm_wait_task_finish(qm, &zip_devices); hisi_qm_alg_unregister(qm, &zip_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev, true); hisi_zip_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); hisi_zip_show_last_regs_uninit(qm); hisi_qm_dev_err_uninit(qm); hisi_zip_qm_uninit(qm); } static const struct dev_pm_ops hisi_zip_pm_ops = { SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL) }; static const struct pci_error_handlers hisi_zip_err_handler = { .error_detected = hisi_qm_dev_err_detected, .slot_reset = hisi_qm_dev_slot_reset, .reset_prepare = hisi_qm_reset_prepare, .reset_done = hisi_qm_reset_done, }; static struct pci_driver hisi_zip_pci_driver = { .name = "hisi_zip", .id_table = hisi_zip_dev_ids, .probe = hisi_zip_probe, .remove = hisi_zip_remove, .sriov_configure = IS_ENABLED(CONFIG_PCI_IOV) ? hisi_qm_sriov_configure : NULL, .err_handler = &hisi_zip_err_handler, .shutdown = hisi_qm_dev_shutdown, .driver.pm = &hisi_zip_pm_ops, }; struct pci_driver *hisi_zip_get_pf_driver(void) { return &hisi_zip_pci_driver; } EXPORT_SYMBOL_GPL(hisi_zip_get_pf_driver); static void hisi_zip_register_debugfs(void) { if (!debugfs_initialized()) return; hzip_debugfs_root = debugfs_create_dir("hisi_zip", NULL); } static void hisi_zip_unregister_debugfs(void) { debugfs_remove_recursive(hzip_debugfs_root); } static int __init hisi_zip_init(void) { int ret; hisi_qm_init_list(&zip_devices); hisi_zip_register_debugfs(); ret = pci_register_driver(&hisi_zip_pci_driver); if (ret < 0) { hisi_zip_unregister_debugfs(); pr_err("Failed to register pci driver.\n"); } return ret; } static void __exit hisi_zip_exit(void) { pci_unregister_driver(&hisi_zip_pci_driver); hisi_zip_unregister_debugfs(); } module_init(hisi_zip_init); module_exit(hisi_zip_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zhou Wang <[email protected]>"); MODULE_DESCRIPTION("Driver for HiSilicon ZIP accelerator");
linux-master
drivers/crypto/hisilicon/zip/zip_main.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include <crypto/aes.h> #include <crypto/aead.h> #include <crypto/algapi.h> #include <crypto/authenc.h> #include <crypto/des.h> #include <crypto/hash.h> #include <crypto/internal/aead.h> #include <crypto/internal/des.h> #include <crypto/sha1.h> #include <crypto/sha2.h> #include <crypto/skcipher.h> #include <crypto/xts.h> #include <linux/crypto.h> #include <linux/dma-mapping.h> #include <linux/idr.h> #include "sec.h" #include "sec_crypto.h" #define SEC_PRIORITY 4001 #define SEC_XTS_MIN_KEY_SIZE (2 * AES_MIN_KEY_SIZE) #define SEC_XTS_MID_KEY_SIZE (3 * AES_MIN_KEY_SIZE) #define SEC_XTS_MAX_KEY_SIZE (2 * AES_MAX_KEY_SIZE) #define SEC_DES3_2KEY_SIZE (2 * DES_KEY_SIZE) #define SEC_DES3_3KEY_SIZE (3 * DES_KEY_SIZE) /* SEC sqe(bd) bit operational relative MACRO */ #define SEC_DE_OFFSET 1 #define SEC_CIPHER_OFFSET 4 #define SEC_SCENE_OFFSET 3 #define SEC_DST_SGL_OFFSET 2 #define SEC_SRC_SGL_OFFSET 7 #define SEC_CKEY_OFFSET 9 #define SEC_CMODE_OFFSET 12 #define SEC_AKEY_OFFSET 5 #define SEC_AEAD_ALG_OFFSET 11 #define SEC_AUTH_OFFSET 6 #define SEC_DE_OFFSET_V3 9 #define SEC_SCENE_OFFSET_V3 5 #define SEC_CKEY_OFFSET_V3 13 #define SEC_CTR_CNT_OFFSET 25 #define SEC_CTR_CNT_ROLLOVER 2 #define SEC_SRC_SGL_OFFSET_V3 11 #define SEC_DST_SGL_OFFSET_V3 14 #define SEC_CALG_OFFSET_V3 4 #define SEC_AKEY_OFFSET_V3 9 #define SEC_MAC_OFFSET_V3 4 #define SEC_AUTH_ALG_OFFSET_V3 15 #define SEC_CIPHER_AUTH_V3 0xbf #define SEC_AUTH_CIPHER_V3 0x40 #define SEC_FLAG_OFFSET 7 #define SEC_FLAG_MASK 0x0780 #define SEC_TYPE_MASK 0x0F #define SEC_DONE_MASK 0x0001 #define SEC_ICV_MASK 0x000E #define SEC_SQE_LEN_RATE_MASK 0x3 #define SEC_TOTAL_IV_SZ(depth) (SEC_IV_SIZE * (depth)) #define SEC_SGL_SGE_NR 128 #define SEC_CIPHER_AUTH 0xfe #define SEC_AUTH_CIPHER 0x1 #define SEC_MAX_MAC_LEN 64 #define SEC_MAX_AAD_LEN 65535 #define SEC_MAX_CCM_AAD_LEN 65279 #define SEC_TOTAL_MAC_SZ(depth) (SEC_MAX_MAC_LEN * (depth)) #define SEC_PBUF_SZ 512 #define SEC_PBUF_IV_OFFSET SEC_PBUF_SZ #define SEC_PBUF_MAC_OFFSET (SEC_PBUF_SZ + SEC_IV_SIZE) #define SEC_PBUF_PKG (SEC_PBUF_SZ + SEC_IV_SIZE + \ SEC_MAX_MAC_LEN * 2) #define SEC_PBUF_NUM (PAGE_SIZE / SEC_PBUF_PKG) #define SEC_PBUF_PAGE_NUM(depth) ((depth) / SEC_PBUF_NUM) #define SEC_PBUF_LEFT_SZ(depth) (SEC_PBUF_PKG * ((depth) - \ SEC_PBUF_PAGE_NUM(depth) * SEC_PBUF_NUM)) #define SEC_TOTAL_PBUF_SZ(depth) (PAGE_SIZE * SEC_PBUF_PAGE_NUM(depth) + \ SEC_PBUF_LEFT_SZ(depth)) #define SEC_SQE_LEN_RATE 4 #define SEC_SQE_CFLAG 2 #define SEC_SQE_AEAD_FLAG 3 #define SEC_SQE_DONE 0x1 #define SEC_ICV_ERR 0x2 #define MIN_MAC_LEN 4 #define MAC_LEN_MASK 0x1U #define MAX_INPUT_DATA_LEN 0xFFFE00 #define BITS_MASK 0xFF #define BYTE_BITS 0x8 #define SEC_XTS_NAME_SZ 0x3 #define IV_CM_CAL_NUM 2 #define IV_CL_MASK 0x7 #define IV_CL_MIN 2 #define IV_CL_MID 4 #define IV_CL_MAX 8 #define IV_FLAGS_OFFSET 0x6 #define IV_CM_OFFSET 0x3 #define IV_LAST_BYTE1 1 #define IV_LAST_BYTE2 2 #define IV_LAST_BYTE_MASK 0xFF #define IV_CTR_INIT 0x1 #define IV_BYTE_OFFSET 0x8 struct sec_skcipher { u64 alg_msk; struct skcipher_alg alg; }; struct sec_aead { u64 alg_msk; struct aead_alg alg; }; /* Get an en/de-cipher queue cyclically to balance load over queues of TFM */ static inline int sec_alloc_queue_id(struct sec_ctx *ctx, struct sec_req *req) { if (req->c_req.encrypt) return (u32)atomic_inc_return(&ctx->enc_qcyclic) % ctx->hlf_q_num; return (u32)atomic_inc_return(&ctx->dec_qcyclic) % ctx->hlf_q_num + ctx->hlf_q_num; } static inline void sec_free_queue_id(struct sec_ctx *ctx, struct sec_req *req) { if (req->c_req.encrypt) atomic_dec(&ctx->enc_qcyclic); else atomic_dec(&ctx->dec_qcyclic); } static int sec_alloc_req_id(struct sec_req *req, struct sec_qp_ctx *qp_ctx) { int req_id; spin_lock_bh(&qp_ctx->req_lock); req_id = idr_alloc_cyclic(&qp_ctx->req_idr, NULL, 0, qp_ctx->qp->sq_depth, GFP_ATOMIC); spin_unlock_bh(&qp_ctx->req_lock); if (unlikely(req_id < 0)) { dev_err(req->ctx->dev, "alloc req id fail!\n"); return req_id; } req->qp_ctx = qp_ctx; qp_ctx->req_list[req_id] = req; return req_id; } static void sec_free_req_id(struct sec_req *req) { struct sec_qp_ctx *qp_ctx = req->qp_ctx; int req_id = req->req_id; if (unlikely(req_id < 0 || req_id >= qp_ctx->qp->sq_depth)) { dev_err(req->ctx->dev, "free request id invalid!\n"); return; } qp_ctx->req_list[req_id] = NULL; req->qp_ctx = NULL; spin_lock_bh(&qp_ctx->req_lock); idr_remove(&qp_ctx->req_idr, req_id); spin_unlock_bh(&qp_ctx->req_lock); } static u8 pre_parse_finished_bd(struct bd_status *status, void *resp) { struct sec_sqe *bd = resp; status->done = le16_to_cpu(bd->type2.done_flag) & SEC_DONE_MASK; status->icv = (le16_to_cpu(bd->type2.done_flag) & SEC_ICV_MASK) >> 1; status->flag = (le16_to_cpu(bd->type2.done_flag) & SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; status->tag = le16_to_cpu(bd->type2.tag); status->err_type = bd->type2.error_type; return bd->type_cipher_auth & SEC_TYPE_MASK; } static u8 pre_parse_finished_bd3(struct bd_status *status, void *resp) { struct sec_sqe3 *bd3 = resp; status->done = le16_to_cpu(bd3->done_flag) & SEC_DONE_MASK; status->icv = (le16_to_cpu(bd3->done_flag) & SEC_ICV_MASK) >> 1; status->flag = (le16_to_cpu(bd3->done_flag) & SEC_FLAG_MASK) >> SEC_FLAG_OFFSET; status->tag = le64_to_cpu(bd3->tag); status->err_type = bd3->error_type; return le32_to_cpu(bd3->bd_param) & SEC_TYPE_MASK; } static int sec_cb_status_check(struct sec_req *req, struct bd_status *status) { struct sec_ctx *ctx = req->ctx; if (unlikely(req->err_type || status->done != SEC_SQE_DONE)) { dev_err_ratelimited(ctx->dev, "err_type[%d], done[%u]\n", req->err_type, status->done); return -EIO; } if (unlikely(ctx->alg_type == SEC_SKCIPHER)) { if (unlikely(status->flag != SEC_SQE_CFLAG)) { dev_err_ratelimited(ctx->dev, "flag[%u]\n", status->flag); return -EIO; } } else if (unlikely(ctx->alg_type == SEC_AEAD)) { if (unlikely(status->flag != SEC_SQE_AEAD_FLAG || status->icv == SEC_ICV_ERR)) { dev_err_ratelimited(ctx->dev, "flag[%u], icv[%u]\n", status->flag, status->icv); return -EBADMSG; } } return 0; } static void sec_req_cb(struct hisi_qp *qp, void *resp) { struct sec_qp_ctx *qp_ctx = qp->qp_ctx; struct sec_dfx *dfx = &qp_ctx->ctx->sec->debug.dfx; u8 type_supported = qp_ctx->ctx->type_supported; struct bd_status status; struct sec_ctx *ctx; struct sec_req *req; int err; u8 type; if (type_supported == SEC_BD_TYPE2) { type = pre_parse_finished_bd(&status, resp); req = qp_ctx->req_list[status.tag]; } else { type = pre_parse_finished_bd3(&status, resp); req = (void *)(uintptr_t)status.tag; } if (unlikely(type != type_supported)) { atomic64_inc(&dfx->err_bd_cnt); pr_err("err bd type [%u]\n", type); return; } if (unlikely(!req)) { atomic64_inc(&dfx->invalid_req_cnt); atomic_inc(&qp->qp_status.used); return; } req->err_type = status.err_type; ctx = req->ctx; err = sec_cb_status_check(req, &status); if (err) atomic64_inc(&dfx->done_flag_cnt); atomic64_inc(&dfx->recv_cnt); ctx->req_op->buf_unmap(ctx, req); ctx->req_op->callback(ctx, req, err); } static int sec_bd_send(struct sec_ctx *ctx, struct sec_req *req) { struct sec_qp_ctx *qp_ctx = req->qp_ctx; int ret; if (ctx->fake_req_limit <= atomic_read(&qp_ctx->qp->qp_status.used) && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)) return -EBUSY; spin_lock_bh(&qp_ctx->req_lock); ret = hisi_qp_send(qp_ctx->qp, &req->sec_sqe); if (ctx->fake_req_limit <= atomic_read(&qp_ctx->qp->qp_status.used) && !ret) { list_add_tail(&req->backlog_head, &qp_ctx->backlog); atomic64_inc(&ctx->sec->debug.dfx.send_cnt); atomic64_inc(&ctx->sec->debug.dfx.send_busy_cnt); spin_unlock_bh(&qp_ctx->req_lock); return -EBUSY; } spin_unlock_bh(&qp_ctx->req_lock); if (unlikely(ret == -EBUSY)) return -ENOBUFS; if (likely(!ret)) { ret = -EINPROGRESS; atomic64_inc(&ctx->sec->debug.dfx.send_cnt); } return ret; } /* Get DMA memory resources */ static int sec_alloc_civ_resource(struct device *dev, struct sec_alg_res *res) { u16 q_depth = res->depth; int i; res->c_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), &res->c_ivin_dma, GFP_KERNEL); if (!res->c_ivin) return -ENOMEM; for (i = 1; i < q_depth; i++) { res[i].c_ivin_dma = res->c_ivin_dma + i * SEC_IV_SIZE; res[i].c_ivin = res->c_ivin + i * SEC_IV_SIZE; } return 0; } static void sec_free_civ_resource(struct device *dev, struct sec_alg_res *res) { if (res->c_ivin) dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), res->c_ivin, res->c_ivin_dma); } static int sec_alloc_aiv_resource(struct device *dev, struct sec_alg_res *res) { u16 q_depth = res->depth; int i; res->a_ivin = dma_alloc_coherent(dev, SEC_TOTAL_IV_SZ(q_depth), &res->a_ivin_dma, GFP_KERNEL); if (!res->a_ivin) return -ENOMEM; for (i = 1; i < q_depth; i++) { res[i].a_ivin_dma = res->a_ivin_dma + i * SEC_IV_SIZE; res[i].a_ivin = res->a_ivin + i * SEC_IV_SIZE; } return 0; } static void sec_free_aiv_resource(struct device *dev, struct sec_alg_res *res) { if (res->a_ivin) dma_free_coherent(dev, SEC_TOTAL_IV_SZ(res->depth), res->a_ivin, res->a_ivin_dma); } static int sec_alloc_mac_resource(struct device *dev, struct sec_alg_res *res) { u16 q_depth = res->depth; int i; res->out_mac = dma_alloc_coherent(dev, SEC_TOTAL_MAC_SZ(q_depth) << 1, &res->out_mac_dma, GFP_KERNEL); if (!res->out_mac) return -ENOMEM; for (i = 1; i < q_depth; i++) { res[i].out_mac_dma = res->out_mac_dma + i * (SEC_MAX_MAC_LEN << 1); res[i].out_mac = res->out_mac + i * (SEC_MAX_MAC_LEN << 1); } return 0; } static void sec_free_mac_resource(struct device *dev, struct sec_alg_res *res) { if (res->out_mac) dma_free_coherent(dev, SEC_TOTAL_MAC_SZ(res->depth) << 1, res->out_mac, res->out_mac_dma); } static void sec_free_pbuf_resource(struct device *dev, struct sec_alg_res *res) { if (res->pbuf) dma_free_coherent(dev, SEC_TOTAL_PBUF_SZ(res->depth), res->pbuf, res->pbuf_dma); } /* * To improve performance, pbuffer is used for * small packets (< 512Bytes) as IOMMU translation using. */ static int sec_alloc_pbuf_resource(struct device *dev, struct sec_alg_res *res) { u16 q_depth = res->depth; int size = SEC_PBUF_PAGE_NUM(q_depth); int pbuf_page_offset; int i, j, k; res->pbuf = dma_alloc_coherent(dev, SEC_TOTAL_PBUF_SZ(q_depth), &res->pbuf_dma, GFP_KERNEL); if (!res->pbuf) return -ENOMEM; /* * SEC_PBUF_PKG contains data pbuf, iv and * out_mac : <SEC_PBUF|SEC_IV|SEC_MAC> * Every PAGE contains six SEC_PBUF_PKG * The sec_qp_ctx contains QM_Q_DEPTH numbers of SEC_PBUF_PKG * So we need SEC_PBUF_PAGE_NUM numbers of PAGE * for the SEC_TOTAL_PBUF_SZ */ for (i = 0; i <= size; i++) { pbuf_page_offset = PAGE_SIZE * i; for (j = 0; j < SEC_PBUF_NUM; j++) { k = i * SEC_PBUF_NUM + j; if (k == q_depth) break; res[k].pbuf = res->pbuf + j * SEC_PBUF_PKG + pbuf_page_offset; res[k].pbuf_dma = res->pbuf_dma + j * SEC_PBUF_PKG + pbuf_page_offset; } } return 0; } static int sec_alg_resource_alloc(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { struct sec_alg_res *res = qp_ctx->res; struct device *dev = ctx->dev; int ret; ret = sec_alloc_civ_resource(dev, res); if (ret) return ret; if (ctx->alg_type == SEC_AEAD) { ret = sec_alloc_aiv_resource(dev, res); if (ret) goto alloc_aiv_fail; ret = sec_alloc_mac_resource(dev, res); if (ret) goto alloc_mac_fail; } if (ctx->pbuf_supported) { ret = sec_alloc_pbuf_resource(dev, res); if (ret) { dev_err(dev, "fail to alloc pbuf dma resource!\n"); goto alloc_pbuf_fail; } } return 0; alloc_pbuf_fail: if (ctx->alg_type == SEC_AEAD) sec_free_mac_resource(dev, qp_ctx->res); alloc_mac_fail: if (ctx->alg_type == SEC_AEAD) sec_free_aiv_resource(dev, res); alloc_aiv_fail: sec_free_civ_resource(dev, res); return ret; } static void sec_alg_resource_free(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { struct device *dev = ctx->dev; sec_free_civ_resource(dev, qp_ctx->res); if (ctx->pbuf_supported) sec_free_pbuf_resource(dev, qp_ctx->res); if (ctx->alg_type == SEC_AEAD) sec_free_mac_resource(dev, qp_ctx->res); } static int sec_alloc_qp_ctx_resource(struct hisi_qm *qm, struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { u16 q_depth = qp_ctx->qp->sq_depth; struct device *dev = ctx->dev; int ret = -ENOMEM; qp_ctx->req_list = kcalloc(q_depth, sizeof(struct sec_req *), GFP_KERNEL); if (!qp_ctx->req_list) return ret; qp_ctx->res = kcalloc(q_depth, sizeof(struct sec_alg_res), GFP_KERNEL); if (!qp_ctx->res) goto err_free_req_list; qp_ctx->res->depth = q_depth; qp_ctx->c_in_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); if (IS_ERR(qp_ctx->c_in_pool)) { dev_err(dev, "fail to create sgl pool for input!\n"); goto err_free_res; } qp_ctx->c_out_pool = hisi_acc_create_sgl_pool(dev, q_depth, SEC_SGL_SGE_NR); if (IS_ERR(qp_ctx->c_out_pool)) { dev_err(dev, "fail to create sgl pool for output!\n"); goto err_free_c_in_pool; } ret = sec_alg_resource_alloc(ctx, qp_ctx); if (ret) goto err_free_c_out_pool; return 0; err_free_c_out_pool: hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); err_free_c_in_pool: hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); err_free_res: kfree(qp_ctx->res); err_free_req_list: kfree(qp_ctx->req_list); return ret; } static void sec_free_qp_ctx_resource(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { struct device *dev = ctx->dev; sec_alg_resource_free(ctx, qp_ctx); hisi_acc_free_sgl_pool(dev, qp_ctx->c_out_pool); hisi_acc_free_sgl_pool(dev, qp_ctx->c_in_pool); kfree(qp_ctx->res); kfree(qp_ctx->req_list); } static int sec_create_qp_ctx(struct hisi_qm *qm, struct sec_ctx *ctx, int qp_ctx_id, int alg_type) { struct sec_qp_ctx *qp_ctx; struct hisi_qp *qp; int ret; qp_ctx = &ctx->qp_ctx[qp_ctx_id]; qp = ctx->qps[qp_ctx_id]; qp->req_type = 0; qp->qp_ctx = qp_ctx; qp_ctx->qp = qp; qp_ctx->ctx = ctx; qp->req_cb = sec_req_cb; spin_lock_init(&qp_ctx->req_lock); idr_init(&qp_ctx->req_idr); INIT_LIST_HEAD(&qp_ctx->backlog); ret = sec_alloc_qp_ctx_resource(qm, ctx, qp_ctx); if (ret) goto err_destroy_idr; ret = hisi_qm_start_qp(qp, 0); if (ret < 0) goto err_resource_free; return 0; err_resource_free: sec_free_qp_ctx_resource(ctx, qp_ctx); err_destroy_idr: idr_destroy(&qp_ctx->req_idr); return ret; } static void sec_release_qp_ctx(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { hisi_qm_stop_qp(qp_ctx->qp); sec_free_qp_ctx_resource(ctx, qp_ctx); idr_destroy(&qp_ctx->req_idr); } static int sec_ctx_base_init(struct sec_ctx *ctx) { struct sec_dev *sec; int i, ret; ctx->qps = sec_create_qps(); if (!ctx->qps) { pr_err("Can not create sec qps!\n"); return -ENODEV; } sec = container_of(ctx->qps[0]->qm, struct sec_dev, qm); ctx->sec = sec; ctx->dev = &sec->qm.pdev->dev; ctx->hlf_q_num = sec->ctx_q_num >> 1; ctx->pbuf_supported = ctx->sec->iommu_used; /* Half of queue depth is taken as fake requests limit in the queue. */ ctx->fake_req_limit = ctx->qps[0]->sq_depth >> 1; ctx->qp_ctx = kcalloc(sec->ctx_q_num, sizeof(struct sec_qp_ctx), GFP_KERNEL); if (!ctx->qp_ctx) { ret = -ENOMEM; goto err_destroy_qps; } for (i = 0; i < sec->ctx_q_num; i++) { ret = sec_create_qp_ctx(&sec->qm, ctx, i, 0); if (ret) goto err_sec_release_qp_ctx; } return 0; err_sec_release_qp_ctx: for (i = i - 1; i >= 0; i--) sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); kfree(ctx->qp_ctx); err_destroy_qps: sec_destroy_qps(ctx->qps, sec->ctx_q_num); return ret; } static void sec_ctx_base_uninit(struct sec_ctx *ctx) { int i; for (i = 0; i < ctx->sec->ctx_q_num; i++) sec_release_qp_ctx(ctx, &ctx->qp_ctx[i]); sec_destroy_qps(ctx->qps, ctx->sec->ctx_q_num); kfree(ctx->qp_ctx); } static int sec_cipher_init(struct sec_ctx *ctx) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; c_ctx->c_key = dma_alloc_coherent(ctx->dev, SEC_MAX_KEY_SIZE, &c_ctx->c_key_dma, GFP_KERNEL); if (!c_ctx->c_key) return -ENOMEM; return 0; } static void sec_cipher_uninit(struct sec_ctx *ctx) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; memzero_explicit(c_ctx->c_key, SEC_MAX_KEY_SIZE); dma_free_coherent(ctx->dev, SEC_MAX_KEY_SIZE, c_ctx->c_key, c_ctx->c_key_dma); } static int sec_auth_init(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; a_ctx->a_key = dma_alloc_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, &a_ctx->a_key_dma, GFP_KERNEL); if (!a_ctx->a_key) return -ENOMEM; return 0; } static void sec_auth_uninit(struct sec_ctx *ctx) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; memzero_explicit(a_ctx->a_key, SEC_MAX_AKEY_SIZE); dma_free_coherent(ctx->dev, SEC_MAX_AKEY_SIZE, a_ctx->a_key, a_ctx->a_key_dma); } static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm) { const char *alg = crypto_tfm_alg_name(&tfm->base); struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; c_ctx->fallback = false; /* Currently, only XTS mode need fallback tfm when using 192bit key */ if (likely(strncmp(alg, "xts", SEC_XTS_NAME_SZ))) return 0; c_ctx->fbtfm = crypto_alloc_sync_skcipher(alg, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(c_ctx->fbtfm)) { pr_err("failed to alloc xts mode fallback tfm!\n"); return PTR_ERR(c_ctx->fbtfm); } return 0; } static int sec_skcipher_init(struct crypto_skcipher *tfm) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; ctx->alg_type = SEC_SKCIPHER; crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_req)); ctx->c_ctx.ivsize = crypto_skcipher_ivsize(tfm); if (ctx->c_ctx.ivsize > SEC_IV_SIZE) { pr_err("get error skcipher iv size!\n"); return -EINVAL; } ret = sec_ctx_base_init(ctx); if (ret) return ret; ret = sec_cipher_init(ctx); if (ret) goto err_cipher_init; ret = sec_skcipher_fbtfm_init(tfm); if (ret) goto err_fbtfm_init; return 0; err_fbtfm_init: sec_cipher_uninit(ctx); err_cipher_init: sec_ctx_base_uninit(ctx); return ret; } static void sec_skcipher_uninit(struct crypto_skcipher *tfm) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); if (ctx->c_ctx.fbtfm) crypto_free_sync_skcipher(ctx->c_ctx.fbtfm); sec_cipher_uninit(ctx); sec_ctx_base_uninit(ctx); } static int sec_skcipher_3des_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen, const enum sec_cmode c_mode) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; int ret; ret = verify_skcipher_des3_key(tfm, key); if (ret) return ret; switch (keylen) { case SEC_DES3_2KEY_SIZE: c_ctx->c_key_len = SEC_CKEY_3DES_2KEY; break; case SEC_DES3_3KEY_SIZE: c_ctx->c_key_len = SEC_CKEY_3DES_3KEY; break; default: return -EINVAL; } return 0; } static int sec_skcipher_aes_sm4_setkey(struct sec_cipher_ctx *c_ctx, const u32 keylen, const enum sec_cmode c_mode) { if (c_mode == SEC_CMODE_XTS) { switch (keylen) { case SEC_XTS_MIN_KEY_SIZE: c_ctx->c_key_len = SEC_CKEY_128BIT; break; case SEC_XTS_MID_KEY_SIZE: c_ctx->fallback = true; break; case SEC_XTS_MAX_KEY_SIZE: c_ctx->c_key_len = SEC_CKEY_256BIT; break; default: pr_err("hisi_sec2: xts mode key error!\n"); return -EINVAL; } } else { if (c_ctx->c_alg == SEC_CALG_SM4 && keylen != AES_KEYSIZE_128) { pr_err("hisi_sec2: sm4 key error!\n"); return -EINVAL; } else { switch (keylen) { case AES_KEYSIZE_128: c_ctx->c_key_len = SEC_CKEY_128BIT; break; case AES_KEYSIZE_192: c_ctx->c_key_len = SEC_CKEY_192BIT; break; case AES_KEYSIZE_256: c_ctx->c_key_len = SEC_CKEY_256BIT; break; default: pr_err("hisi_sec2: aes key error!\n"); return -EINVAL; } } } return 0; } static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, const u32 keylen, const enum sec_calg c_alg, const enum sec_cmode c_mode) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; struct device *dev = ctx->dev; int ret; if (c_mode == SEC_CMODE_XTS) { ret = xts_verify_key(tfm, key, keylen); if (ret) { dev_err(dev, "xts mode key err!\n"); return ret; } } c_ctx->c_alg = c_alg; c_ctx->c_mode = c_mode; switch (c_alg) { case SEC_CALG_3DES: ret = sec_skcipher_3des_setkey(tfm, key, keylen, c_mode); break; case SEC_CALG_AES: case SEC_CALG_SM4: ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); break; default: return -EINVAL; } if (ret) { dev_err(dev, "set sec key err!\n"); return ret; } memcpy(c_ctx->c_key, key, keylen); if (c_ctx->fallback && c_ctx->fbtfm) { ret = crypto_sync_skcipher_setkey(c_ctx->fbtfm, key, keylen); if (ret) { dev_err(dev, "failed to set fallback skcipher key!\n"); return ret; } } return 0; } #define GEN_SEC_SETKEY_FUNC(name, c_alg, c_mode) \ static int sec_setkey_##name(struct crypto_skcipher *tfm, const u8 *key,\ u32 keylen) \ { \ return sec_skcipher_setkey(tfm, key, keylen, c_alg, c_mode); \ } GEN_SEC_SETKEY_FUNC(aes_ecb, SEC_CALG_AES, SEC_CMODE_ECB) GEN_SEC_SETKEY_FUNC(aes_cbc, SEC_CALG_AES, SEC_CMODE_CBC) GEN_SEC_SETKEY_FUNC(aes_xts, SEC_CALG_AES, SEC_CMODE_XTS) GEN_SEC_SETKEY_FUNC(aes_ofb, SEC_CALG_AES, SEC_CMODE_OFB) GEN_SEC_SETKEY_FUNC(aes_cfb, SEC_CALG_AES, SEC_CMODE_CFB) GEN_SEC_SETKEY_FUNC(aes_ctr, SEC_CALG_AES, SEC_CMODE_CTR) GEN_SEC_SETKEY_FUNC(3des_ecb, SEC_CALG_3DES, SEC_CMODE_ECB) GEN_SEC_SETKEY_FUNC(3des_cbc, SEC_CALG_3DES, SEC_CMODE_CBC) GEN_SEC_SETKEY_FUNC(sm4_xts, SEC_CALG_SM4, SEC_CMODE_XTS) GEN_SEC_SETKEY_FUNC(sm4_cbc, SEC_CALG_SM4, SEC_CMODE_CBC) GEN_SEC_SETKEY_FUNC(sm4_ofb, SEC_CALG_SM4, SEC_CMODE_OFB) GEN_SEC_SETKEY_FUNC(sm4_cfb, SEC_CALG_SM4, SEC_CMODE_CFB) GEN_SEC_SETKEY_FUNC(sm4_ctr, SEC_CALG_SM4, SEC_CMODE_CTR) static int sec_cipher_pbuf_map(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src) { struct sec_aead_req *a_req = &req->aead_req; struct aead_request *aead_req = a_req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct device *dev = ctx->dev; int copy_size, pbuf_length; int req_id = req->req_id; struct crypto_aead *tfm; size_t authsize; u8 *mac_offset; if (ctx->alg_type == SEC_AEAD) copy_size = aead_req->cryptlen + aead_req->assoclen; else copy_size = c_req->c_len; pbuf_length = sg_copy_to_buffer(src, sg_nents(src), qp_ctx->res[req_id].pbuf, copy_size); if (unlikely(pbuf_length != copy_size)) { dev_err(dev, "copy src data to pbuf error!\n"); return -EINVAL; } if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { tfm = crypto_aead_reqtfm(aead_req); authsize = crypto_aead_authsize(tfm); mac_offset = qp_ctx->res[req_id].pbuf + copy_size - authsize; memcpy(a_req->out_mac, mac_offset, authsize); } req->in_dma = qp_ctx->res[req_id].pbuf_dma; c_req->c_out_dma = req->in_dma; return 0; } static void sec_cipher_pbuf_unmap(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *dst) { struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; int copy_size, pbuf_length; int req_id = req->req_id; if (ctx->alg_type == SEC_AEAD) copy_size = c_req->c_len + aead_req->assoclen; else copy_size = c_req->c_len; pbuf_length = sg_copy_from_buffer(dst, sg_nents(dst), qp_ctx->res[req_id].pbuf, copy_size); if (unlikely(pbuf_length != copy_size)) dev_err(ctx->dev, "copy pbuf data to dst error!\n"); } static int sec_aead_mac_init(struct sec_aead_req *req) { struct aead_request *aead_req = req->aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); size_t authsize = crypto_aead_authsize(tfm); u8 *mac_out = req->out_mac; struct scatterlist *sgl = aead_req->src; size_t copy_size; off_t skip_size; /* Copy input mac */ skip_size = aead_req->assoclen + aead_req->cryptlen - authsize; copy_size = sg_pcopy_to_buffer(sgl, sg_nents(sgl), mac_out, authsize, skip_size); if (unlikely(copy_size != authsize)) return -EINVAL; return 0; } static int sec_cipher_map(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src, struct scatterlist *dst) { struct sec_cipher_req *c_req = &req->c_req; struct sec_aead_req *a_req = &req->aead_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct sec_alg_res *res = &qp_ctx->res[req->req_id]; struct device *dev = ctx->dev; int ret; if (req->use_pbuf) { c_req->c_ivin = res->pbuf + SEC_PBUF_IV_OFFSET; c_req->c_ivin_dma = res->pbuf_dma + SEC_PBUF_IV_OFFSET; if (ctx->alg_type == SEC_AEAD) { a_req->a_ivin = res->a_ivin; a_req->a_ivin_dma = res->a_ivin_dma; a_req->out_mac = res->pbuf + SEC_PBUF_MAC_OFFSET; a_req->out_mac_dma = res->pbuf_dma + SEC_PBUF_MAC_OFFSET; } ret = sec_cipher_pbuf_map(ctx, req, src); return ret; } c_req->c_ivin = res->c_ivin; c_req->c_ivin_dma = res->c_ivin_dma; if (ctx->alg_type == SEC_AEAD) { a_req->a_ivin = res->a_ivin; a_req->a_ivin_dma = res->a_ivin_dma; a_req->out_mac = res->out_mac; a_req->out_mac_dma = res->out_mac_dma; } req->in = hisi_acc_sg_buf_map_to_hw_sgl(dev, src, qp_ctx->c_in_pool, req->req_id, &req->in_dma); if (IS_ERR(req->in)) { dev_err(dev, "fail to dma map input sgl buffers!\n"); return PTR_ERR(req->in); } if (!c_req->encrypt && ctx->alg_type == SEC_AEAD) { ret = sec_aead_mac_init(a_req); if (unlikely(ret)) { dev_err(dev, "fail to init mac data for ICV!\n"); return ret; } } if (dst == src) { c_req->c_out = req->in; c_req->c_out_dma = req->in_dma; } else { c_req->c_out = hisi_acc_sg_buf_map_to_hw_sgl(dev, dst, qp_ctx->c_out_pool, req->req_id, &c_req->c_out_dma); if (IS_ERR(c_req->c_out)) { dev_err(dev, "fail to dma map output sgl buffers!\n"); hisi_acc_sg_buf_unmap(dev, src, req->in); return PTR_ERR(c_req->c_out); } } return 0; } static void sec_cipher_unmap(struct sec_ctx *ctx, struct sec_req *req, struct scatterlist *src, struct scatterlist *dst) { struct sec_cipher_req *c_req = &req->c_req; struct device *dev = ctx->dev; if (req->use_pbuf) { sec_cipher_pbuf_unmap(ctx, req, dst); } else { if (dst != src) hisi_acc_sg_buf_unmap(dev, src, req->in); hisi_acc_sg_buf_unmap(dev, dst, c_req->c_out); } } static int sec_skcipher_sgl_map(struct sec_ctx *ctx, struct sec_req *req) { struct skcipher_request *sq = req->c_req.sk_req; return sec_cipher_map(ctx, req, sq->src, sq->dst); } static void sec_skcipher_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) { struct skcipher_request *sq = req->c_req.sk_req; sec_cipher_unmap(ctx, req, sq->src, sq->dst); } static int sec_aead_aes_set_key(struct sec_cipher_ctx *c_ctx, struct crypto_authenc_keys *keys) { switch (keys->enckeylen) { case AES_KEYSIZE_128: c_ctx->c_key_len = SEC_CKEY_128BIT; break; case AES_KEYSIZE_192: c_ctx->c_key_len = SEC_CKEY_192BIT; break; case AES_KEYSIZE_256: c_ctx->c_key_len = SEC_CKEY_256BIT; break; default: pr_err("hisi_sec2: aead aes key error!\n"); return -EINVAL; } memcpy(c_ctx->c_key, keys->enckey, keys->enckeylen); return 0; } static int sec_aead_auth_set_key(struct sec_auth_ctx *ctx, struct crypto_authenc_keys *keys) { struct crypto_shash *hash_tfm = ctx->hash_tfm; int blocksize, digestsize, ret; if (!keys->authkeylen) { pr_err("hisi_sec2: aead auth key error!\n"); return -EINVAL; } blocksize = crypto_shash_blocksize(hash_tfm); digestsize = crypto_shash_digestsize(hash_tfm); if (keys->authkeylen > blocksize) { ret = crypto_shash_tfm_digest(hash_tfm, keys->authkey, keys->authkeylen, ctx->a_key); if (ret) { pr_err("hisi_sec2: aead auth digest error!\n"); return -EINVAL; } ctx->a_key_len = digestsize; } else { memcpy(ctx->a_key, keys->authkey, keys->authkeylen); ctx->a_key_len = keys->authkeylen; } return 0; } static int sec_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize) { struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct sec_ctx *ctx = crypto_tfm_ctx(tfm); struct sec_auth_ctx *a_ctx = &ctx->a_ctx; if (unlikely(a_ctx->fallback_aead_tfm)) return crypto_aead_setauthsize(a_ctx->fallback_aead_tfm, authsize); return 0; } static int sec_aead_fallback_setkey(struct sec_auth_ctx *a_ctx, struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { crypto_aead_clear_flags(a_ctx->fallback_aead_tfm, CRYPTO_TFM_REQ_MASK); crypto_aead_set_flags(a_ctx->fallback_aead_tfm, crypto_aead_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); return crypto_aead_setkey(a_ctx->fallback_aead_tfm, key, keylen); } static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key, const u32 keylen, const enum sec_hash_alg a_alg, const enum sec_calg c_alg, const enum sec_mac_len mac_len, const enum sec_cmode c_mode) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; struct sec_auth_ctx *a_ctx = &ctx->a_ctx; struct device *dev = ctx->dev; struct crypto_authenc_keys keys; int ret; ctx->a_ctx.a_alg = a_alg; ctx->c_ctx.c_alg = c_alg; ctx->a_ctx.mac_len = mac_len; c_ctx->c_mode = c_mode; if (c_mode == SEC_CMODE_CCM || c_mode == SEC_CMODE_GCM) { ret = sec_skcipher_aes_sm4_setkey(c_ctx, keylen, c_mode); if (ret) { dev_err(dev, "set sec aes ccm cipher key err!\n"); return ret; } memcpy(c_ctx->c_key, key, keylen); if (unlikely(a_ctx->fallback_aead_tfm)) { ret = sec_aead_fallback_setkey(a_ctx, tfm, key, keylen); if (ret) return ret; } return 0; } if (crypto_authenc_extractkeys(&keys, key, keylen)) goto bad_key; ret = sec_aead_aes_set_key(c_ctx, &keys); if (ret) { dev_err(dev, "set sec cipher key err!\n"); goto bad_key; } ret = sec_aead_auth_set_key(&ctx->a_ctx, &keys); if (ret) { dev_err(dev, "set sec auth key err!\n"); goto bad_key; } if ((ctx->a_ctx.mac_len & SEC_SQE_LEN_RATE_MASK) || (ctx->a_ctx.a_key_len & SEC_SQE_LEN_RATE_MASK)) { dev_err(dev, "MAC or AUTH key length error!\n"); goto bad_key; } return 0; bad_key: memzero_explicit(&keys, sizeof(struct crypto_authenc_keys)); return -EINVAL; } #define GEN_SEC_AEAD_SETKEY_FUNC(name, aalg, calg, maclen, cmode) \ static int sec_setkey_##name(struct crypto_aead *tfm, const u8 *key, \ u32 keylen) \ { \ return sec_aead_setkey(tfm, key, keylen, aalg, calg, maclen, cmode);\ } GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha1, SEC_A_HMAC_SHA1, SEC_CALG_AES, SEC_HMAC_SHA1_MAC, SEC_CMODE_CBC) GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha256, SEC_A_HMAC_SHA256, SEC_CALG_AES, SEC_HMAC_SHA256_MAC, SEC_CMODE_CBC) GEN_SEC_AEAD_SETKEY_FUNC(aes_cbc_sha512, SEC_A_HMAC_SHA512, SEC_CALG_AES, SEC_HMAC_SHA512_MAC, SEC_CMODE_CBC) GEN_SEC_AEAD_SETKEY_FUNC(aes_ccm, 0, SEC_CALG_AES, SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) GEN_SEC_AEAD_SETKEY_FUNC(aes_gcm, 0, SEC_CALG_AES, SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) GEN_SEC_AEAD_SETKEY_FUNC(sm4_ccm, 0, SEC_CALG_SM4, SEC_HMAC_CCM_MAC, SEC_CMODE_CCM) GEN_SEC_AEAD_SETKEY_FUNC(sm4_gcm, 0, SEC_CALG_SM4, SEC_HMAC_GCM_MAC, SEC_CMODE_GCM) static int sec_aead_sgl_map(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aq = req->aead_req.aead_req; return sec_cipher_map(ctx, req, aq->src, aq->dst); } static void sec_aead_sgl_unmap(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aq = req->aead_req.aead_req; sec_cipher_unmap(ctx, req, aq->src, aq->dst); } static int sec_request_transfer(struct sec_ctx *ctx, struct sec_req *req) { int ret; ret = ctx->req_op->buf_map(ctx, req); if (unlikely(ret)) return ret; ctx->req_op->do_transfer(ctx, req); ret = ctx->req_op->bd_fill(ctx, req); if (unlikely(ret)) goto unmap_req_buf; return ret; unmap_req_buf: ctx->req_op->buf_unmap(ctx, req); return ret; } static void sec_request_untransfer(struct sec_ctx *ctx, struct sec_req *req) { ctx->req_op->buf_unmap(ctx, req); } static void sec_skcipher_copy_iv(struct sec_ctx *ctx, struct sec_req *req) { struct skcipher_request *sk_req = req->c_req.sk_req; struct sec_cipher_req *c_req = &req->c_req; memcpy(c_req->c_ivin, sk_req->iv, ctx->c_ctx.ivsize); } static int sec_skcipher_bd_fill(struct sec_ctx *ctx, struct sec_req *req) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; struct sec_cipher_req *c_req = &req->c_req; struct sec_sqe *sec_sqe = &req->sec_sqe; u8 scene, sa_type, da_type; u8 bd_type, cipher; u8 de = 0; memset(sec_sqe, 0, sizeof(struct sec_sqe)); sec_sqe->type2.c_key_addr = cpu_to_le64(c_ctx->c_key_dma); sec_sqe->type2.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); sec_sqe->type2.data_src_addr = cpu_to_le64(req->in_dma); sec_sqe->type2.data_dst_addr = cpu_to_le64(c_req->c_out_dma); sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_mode) << SEC_CMODE_OFFSET); sec_sqe->type2.c_alg = c_ctx->c_alg; sec_sqe->type2.icvw_kmode |= cpu_to_le16(((u16)c_ctx->c_key_len) << SEC_CKEY_OFFSET); bd_type = SEC_BD_TYPE2; if (c_req->encrypt) cipher = SEC_CIPHER_ENC << SEC_CIPHER_OFFSET; else cipher = SEC_CIPHER_DEC << SEC_CIPHER_OFFSET; sec_sqe->type_cipher_auth = bd_type | cipher; /* Set destination and source address type */ if (req->use_pbuf) { sa_type = SEC_PBUF << SEC_SRC_SGL_OFFSET; da_type = SEC_PBUF << SEC_DST_SGL_OFFSET; } else { sa_type = SEC_SGL << SEC_SRC_SGL_OFFSET; da_type = SEC_SGL << SEC_DST_SGL_OFFSET; } sec_sqe->sdm_addr_type |= da_type; scene = SEC_COMM_SCENE << SEC_SCENE_OFFSET; if (req->in_dma != c_req->c_out_dma) de = 0x1 << SEC_DE_OFFSET; sec_sqe->sds_sa_type = (de | scene | sa_type); sec_sqe->type2.clen_ivhlen |= cpu_to_le32(c_req->c_len); sec_sqe->type2.tag = cpu_to_le16((u16)req->req_id); return 0; } static int sec_skcipher_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) { struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; struct sec_cipher_req *c_req = &req->c_req; u32 bd_param = 0; u16 cipher; memset(sec_sqe3, 0, sizeof(struct sec_sqe3)); sec_sqe3->c_key_addr = cpu_to_le64(c_ctx->c_key_dma); sec_sqe3->no_scene.c_ivin_addr = cpu_to_le64(c_req->c_ivin_dma); sec_sqe3->data_src_addr = cpu_to_le64(req->in_dma); sec_sqe3->data_dst_addr = cpu_to_le64(c_req->c_out_dma); sec_sqe3->c_mode_alg = ((u8)c_ctx->c_alg << SEC_CALG_OFFSET_V3) | c_ctx->c_mode; sec_sqe3->c_icv_key |= cpu_to_le16(((u16)c_ctx->c_key_len) << SEC_CKEY_OFFSET_V3); if (c_req->encrypt) cipher = SEC_CIPHER_ENC; else cipher = SEC_CIPHER_DEC; sec_sqe3->c_icv_key |= cpu_to_le16(cipher); /* Set the CTR counter mode is 128bit rollover */ sec_sqe3->auth_mac_key = cpu_to_le32((u32)SEC_CTR_CNT_ROLLOVER << SEC_CTR_CNT_OFFSET); if (req->use_pbuf) { bd_param |= SEC_PBUF << SEC_SRC_SGL_OFFSET_V3; bd_param |= SEC_PBUF << SEC_DST_SGL_OFFSET_V3; } else { bd_param |= SEC_SGL << SEC_SRC_SGL_OFFSET_V3; bd_param |= SEC_SGL << SEC_DST_SGL_OFFSET_V3; } bd_param |= SEC_COMM_SCENE << SEC_SCENE_OFFSET_V3; if (req->in_dma != c_req->c_out_dma) bd_param |= 0x1 << SEC_DE_OFFSET_V3; bd_param |= SEC_BD_TYPE3; sec_sqe3->bd_param = cpu_to_le32(bd_param); sec_sqe3->c_len_ivin |= cpu_to_le32(c_req->c_len); sec_sqe3->tag = cpu_to_le64(req); return 0; } /* increment counter (128-bit int) */ static void ctr_iv_inc(__u8 *counter, __u8 bits, __u32 nums) { do { --bits; nums += counter[bits]; counter[bits] = nums & BITS_MASK; nums >>= BYTE_BITS; } while (bits && nums); } static void sec_update_iv(struct sec_req *req, enum sec_alg_type alg_type) { struct aead_request *aead_req = req->aead_req.aead_req; struct skcipher_request *sk_req = req->c_req.sk_req; u32 iv_size = req->ctx->c_ctx.ivsize; struct scatterlist *sgl; unsigned int cryptlen; size_t sz; u8 *iv; if (req->c_req.encrypt) sgl = alg_type == SEC_SKCIPHER ? sk_req->dst : aead_req->dst; else sgl = alg_type == SEC_SKCIPHER ? sk_req->src : aead_req->src; if (alg_type == SEC_SKCIPHER) { iv = sk_req->iv; cryptlen = sk_req->cryptlen; } else { iv = aead_req->iv; cryptlen = aead_req->cryptlen; } if (req->ctx->c_ctx.c_mode == SEC_CMODE_CBC) { sz = sg_pcopy_to_buffer(sgl, sg_nents(sgl), iv, iv_size, cryptlen - iv_size); if (unlikely(sz != iv_size)) dev_err(req->ctx->dev, "copy output iv error!\n"); } else { sz = cryptlen / iv_size; if (cryptlen % iv_size) sz += 1; ctr_iv_inc(iv, iv_size, sz); } } static struct sec_req *sec_back_req_clear(struct sec_ctx *ctx, struct sec_qp_ctx *qp_ctx) { struct sec_req *backlog_req = NULL; spin_lock_bh(&qp_ctx->req_lock); if (ctx->fake_req_limit >= atomic_read(&qp_ctx->qp->qp_status.used) && !list_empty(&qp_ctx->backlog)) { backlog_req = list_first_entry(&qp_ctx->backlog, typeof(*backlog_req), backlog_head); list_del(&backlog_req->backlog_head); } spin_unlock_bh(&qp_ctx->req_lock); return backlog_req; } static void sec_skcipher_callback(struct sec_ctx *ctx, struct sec_req *req, int err) { struct skcipher_request *sk_req = req->c_req.sk_req; struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct skcipher_request *backlog_sk_req; struct sec_req *backlog_req; sec_free_req_id(req); /* IV output at encrypto of CBC/CTR mode */ if (!err && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || ctx->c_ctx.c_mode == SEC_CMODE_CTR) && req->c_req.encrypt) sec_update_iv(req, SEC_SKCIPHER); while (1) { backlog_req = sec_back_req_clear(ctx, qp_ctx); if (!backlog_req) break; backlog_sk_req = backlog_req->c_req.sk_req; skcipher_request_complete(backlog_sk_req, -EINPROGRESS); atomic64_inc(&ctx->sec->debug.dfx.recv_busy_cnt); } skcipher_request_complete(sk_req, err); } static void set_aead_auth_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; struct sec_cipher_req *c_req = &req->c_req; struct sec_aead_req *a_req = &req->aead_req; size_t authsize = ctx->a_ctx.mac_len; u32 data_size = aead_req->cryptlen; u8 flage = 0; u8 cm, cl; /* the specification has been checked in aead_iv_demension_check() */ cl = c_req->c_ivin[0] + 1; c_req->c_ivin[ctx->c_ctx.ivsize - cl] = 0x00; memset(&c_req->c_ivin[ctx->c_ctx.ivsize - cl], 0, cl); c_req->c_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = IV_CTR_INIT; /* the last 3bit is L' */ flage |= c_req->c_ivin[0] & IV_CL_MASK; /* the M' is bit3~bit5, the Flags is bit6 */ cm = (authsize - IV_CM_CAL_NUM) / IV_CM_CAL_NUM; flage |= cm << IV_CM_OFFSET; if (aead_req->assoclen) flage |= 0x01 << IV_FLAGS_OFFSET; memcpy(a_req->a_ivin, c_req->c_ivin, ctx->c_ctx.ivsize); a_req->a_ivin[0] = flage; /* * the last 32bit is counter's initial number, * but the nonce uses the first 16bit * the tail 16bit fill with the cipher length */ if (!c_req->encrypt) data_size = aead_req->cryptlen - authsize; a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE1] = data_size & IV_LAST_BYTE_MASK; data_size >>= IV_BYTE_OFFSET; a_req->a_ivin[ctx->c_ctx.ivsize - IV_LAST_BYTE2] = data_size & IV_LAST_BYTE_MASK; } static void sec_aead_set_iv(struct sec_ctx *ctx, struct sec_req *req) { struct aead_request *aead_req = req->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(aead_req); size_t authsize = crypto_aead_authsize(tfm); struct sec_cipher_req *c_req = &req->c_req; struct sec_aead_req *a_req = &req->aead_req; memcpy(c_req->c_ivin, aead_req->iv, ctx->c_ctx.ivsize); if (ctx->c_ctx.c_mode == SEC_CMODE_CCM) { /* * CCM 16Byte Cipher_IV: {1B_Flage,13B_IV,2B_counter}, * the counter must set to 0x01 */ ctx->a_ctx.mac_len = authsize; /* CCM 16Byte Auth_IV: {1B_AFlage,13B_IV,2B_Ptext_length} */ set_aead_auth_iv(ctx, req); } /* GCM 12Byte Cipher_IV == Auth_IV */ if (ctx->c_ctx.c_mode == SEC_CMODE_GCM) { ctx->a_ctx.mac_len = authsize; memcpy(a_req->a_ivin, c_req->c_ivin, SEC_AIV_SIZE); } } static void sec_auth_bd_fill_xcm(struct sec_auth_ctx *ctx, int dir, struct sec_req *req, struct sec_sqe *sec_sqe) { struct sec_aead_req *a_req = &req->aead_req; struct aead_request *aq = a_req->aead_req; /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ sec_sqe->type2.icvw_kmode |= cpu_to_le16((u16)ctx->mac_len); /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ sec_sqe->type2.a_key_addr = sec_sqe->type2.c_key_addr; sec_sqe->type2.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); sec_sqe->type_cipher_auth |= SEC_NO_AUTH << SEC_AUTH_OFFSET; if (dir) sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; else sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; sec_sqe->type2.alen_ivllen = cpu_to_le32(aq->assoclen); sec_sqe->type2.auth_src_offset = cpu_to_le16(0x0); sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); } static void sec_auth_bd_fill_xcm_v3(struct sec_auth_ctx *ctx, int dir, struct sec_req *req, struct sec_sqe3 *sqe3) { struct sec_aead_req *a_req = &req->aead_req; struct aead_request *aq = a_req->aead_req; /* C_ICV_Len is MAC size, 0x4 ~ 0x10 */ sqe3->c_icv_key |= cpu_to_le16((u16)ctx->mac_len << SEC_MAC_OFFSET_V3); /* mode set to CCM/GCM, don't set {A_Alg, AKey_Len, MAC_Len} */ sqe3->a_key_addr = sqe3->c_key_addr; sqe3->auth_ivin.a_ivin_addr = cpu_to_le64(a_req->a_ivin_dma); sqe3->auth_mac_key |= SEC_NO_AUTH; if (dir) sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; else sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; sqe3->a_len_key = cpu_to_le32(aq->assoclen); sqe3->auth_src_offset = cpu_to_le16(0x0); sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); } static void sec_auth_bd_fill_ex(struct sec_auth_ctx *ctx, int dir, struct sec_req *req, struct sec_sqe *sec_sqe) { struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct aead_request *aq = a_req->aead_req; sec_sqe->type2.a_key_addr = cpu_to_le64(ctx->a_key_dma); sec_sqe->type2.mac_key_alg = cpu_to_le32(ctx->mac_len / SEC_SQE_LEN_RATE); sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)((ctx->a_key_len) / SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET); sec_sqe->type2.mac_key_alg |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AEAD_ALG_OFFSET); if (dir) { sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE1 << SEC_AUTH_OFFSET; sec_sqe->sds_sa_type &= SEC_CIPHER_AUTH; } else { sec_sqe->type_cipher_auth |= SEC_AUTH_TYPE2 << SEC_AUTH_OFFSET; sec_sqe->sds_sa_type |= SEC_AUTH_CIPHER; } sec_sqe->type2.alen_ivllen = cpu_to_le32(c_req->c_len + aq->assoclen); sec_sqe->type2.cipher_src_offset = cpu_to_le16((u16)aq->assoclen); sec_sqe->type2.mac_addr = cpu_to_le64(a_req->out_mac_dma); } static int sec_aead_bd_fill(struct sec_ctx *ctx, struct sec_req *req) { struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; struct sec_sqe *sec_sqe = &req->sec_sqe; int ret; ret = sec_skcipher_bd_fill(ctx, req); if (unlikely(ret)) { dev_err(ctx->dev, "skcipher bd fill is error!\n"); return ret; } if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || ctx->c_ctx.c_mode == SEC_CMODE_GCM) sec_auth_bd_fill_xcm(auth_ctx, req->c_req.encrypt, req, sec_sqe); else sec_auth_bd_fill_ex(auth_ctx, req->c_req.encrypt, req, sec_sqe); return 0; } static void sec_auth_bd_fill_ex_v3(struct sec_auth_ctx *ctx, int dir, struct sec_req *req, struct sec_sqe3 *sqe3) { struct sec_aead_req *a_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; struct aead_request *aq = a_req->aead_req; sqe3->a_key_addr = cpu_to_le64(ctx->a_key_dma); sqe3->auth_mac_key |= cpu_to_le32((u32)(ctx->mac_len / SEC_SQE_LEN_RATE) << SEC_MAC_OFFSET_V3); sqe3->auth_mac_key |= cpu_to_le32((u32)(ctx->a_key_len / SEC_SQE_LEN_RATE) << SEC_AKEY_OFFSET_V3); sqe3->auth_mac_key |= cpu_to_le32((u32)(ctx->a_alg) << SEC_AUTH_ALG_OFFSET_V3); if (dir) { sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE1); sqe3->huk_iv_seq &= SEC_CIPHER_AUTH_V3; } else { sqe3->auth_mac_key |= cpu_to_le32((u32)SEC_AUTH_TYPE2); sqe3->huk_iv_seq |= SEC_AUTH_CIPHER_V3; } sqe3->a_len_key = cpu_to_le32(c_req->c_len + aq->assoclen); sqe3->cipher_src_offset = cpu_to_le16((u16)aq->assoclen); sqe3->mac_addr = cpu_to_le64(a_req->out_mac_dma); } static int sec_aead_bd_fill_v3(struct sec_ctx *ctx, struct sec_req *req) { struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; struct sec_sqe3 *sec_sqe3 = &req->sec_sqe3; int ret; ret = sec_skcipher_bd_fill_v3(ctx, req); if (unlikely(ret)) { dev_err(ctx->dev, "skcipher bd3 fill is error!\n"); return ret; } if (ctx->c_ctx.c_mode == SEC_CMODE_CCM || ctx->c_ctx.c_mode == SEC_CMODE_GCM) sec_auth_bd_fill_xcm_v3(auth_ctx, req->c_req.encrypt, req, sec_sqe3); else sec_auth_bd_fill_ex_v3(auth_ctx, req->c_req.encrypt, req, sec_sqe3); return 0; } static void sec_aead_callback(struct sec_ctx *c, struct sec_req *req, int err) { struct aead_request *a_req = req->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); struct sec_aead_req *aead_req = &req->aead_req; struct sec_cipher_req *c_req = &req->c_req; size_t authsize = crypto_aead_authsize(tfm); struct sec_qp_ctx *qp_ctx = req->qp_ctx; struct aead_request *backlog_aead_req; struct sec_req *backlog_req; size_t sz; if (!err && c->c_ctx.c_mode == SEC_CMODE_CBC && c_req->encrypt) sec_update_iv(req, SEC_AEAD); /* Copy output mac */ if (!err && c_req->encrypt) { struct scatterlist *sgl = a_req->dst; sz = sg_pcopy_from_buffer(sgl, sg_nents(sgl), aead_req->out_mac, authsize, a_req->cryptlen + a_req->assoclen); if (unlikely(sz != authsize)) { dev_err(c->dev, "copy out mac err!\n"); err = -EINVAL; } } sec_free_req_id(req); while (1) { backlog_req = sec_back_req_clear(c, qp_ctx); if (!backlog_req) break; backlog_aead_req = backlog_req->aead_req.aead_req; aead_request_complete(backlog_aead_req, -EINPROGRESS); atomic64_inc(&c->sec->debug.dfx.recv_busy_cnt); } aead_request_complete(a_req, err); } static void sec_request_uninit(struct sec_ctx *ctx, struct sec_req *req) { sec_free_req_id(req); sec_free_queue_id(ctx, req); } static int sec_request_init(struct sec_ctx *ctx, struct sec_req *req) { struct sec_qp_ctx *qp_ctx; int queue_id; /* To load balance */ queue_id = sec_alloc_queue_id(ctx, req); qp_ctx = &ctx->qp_ctx[queue_id]; req->req_id = sec_alloc_req_id(req, qp_ctx); if (unlikely(req->req_id < 0)) { sec_free_queue_id(ctx, req); return req->req_id; } return 0; } static int sec_process(struct sec_ctx *ctx, struct sec_req *req) { struct sec_cipher_req *c_req = &req->c_req; int ret; ret = sec_request_init(ctx, req); if (unlikely(ret)) return ret; ret = sec_request_transfer(ctx, req); if (unlikely(ret)) goto err_uninit_req; /* Output IV as decrypto */ if (!req->c_req.encrypt && (ctx->c_ctx.c_mode == SEC_CMODE_CBC || ctx->c_ctx.c_mode == SEC_CMODE_CTR)) sec_update_iv(req, ctx->alg_type); ret = ctx->req_op->bd_send(ctx, req); if (unlikely((ret != -EBUSY && ret != -EINPROGRESS) || (ret == -EBUSY && !(req->flag & CRYPTO_TFM_REQ_MAY_BACKLOG)))) { dev_err_ratelimited(ctx->dev, "send sec request failed!\n"); goto err_send_req; } return ret; err_send_req: /* As failing, restore the IV from user */ if (ctx->c_ctx.c_mode == SEC_CMODE_CBC && !req->c_req.encrypt) { if (ctx->alg_type == SEC_SKCIPHER) memcpy(req->c_req.sk_req->iv, c_req->c_ivin, ctx->c_ctx.ivsize); else memcpy(req->aead_req.aead_req->iv, c_req->c_ivin, ctx->c_ctx.ivsize); } sec_request_untransfer(ctx, req); err_uninit_req: sec_request_uninit(ctx, req); return ret; } static const struct sec_req_op sec_skcipher_req_ops = { .buf_map = sec_skcipher_sgl_map, .buf_unmap = sec_skcipher_sgl_unmap, .do_transfer = sec_skcipher_copy_iv, .bd_fill = sec_skcipher_bd_fill, .bd_send = sec_bd_send, .callback = sec_skcipher_callback, .process = sec_process, }; static const struct sec_req_op sec_aead_req_ops = { .buf_map = sec_aead_sgl_map, .buf_unmap = sec_aead_sgl_unmap, .do_transfer = sec_aead_set_iv, .bd_fill = sec_aead_bd_fill, .bd_send = sec_bd_send, .callback = sec_aead_callback, .process = sec_process, }; static const struct sec_req_op sec_skcipher_req_ops_v3 = { .buf_map = sec_skcipher_sgl_map, .buf_unmap = sec_skcipher_sgl_unmap, .do_transfer = sec_skcipher_copy_iv, .bd_fill = sec_skcipher_bd_fill_v3, .bd_send = sec_bd_send, .callback = sec_skcipher_callback, .process = sec_process, }; static const struct sec_req_op sec_aead_req_ops_v3 = { .buf_map = sec_aead_sgl_map, .buf_unmap = sec_aead_sgl_unmap, .do_transfer = sec_aead_set_iv, .bd_fill = sec_aead_bd_fill_v3, .bd_send = sec_bd_send, .callback = sec_aead_callback, .process = sec_process, }; static int sec_skcipher_ctx_init(struct crypto_skcipher *tfm) { struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; ret = sec_skcipher_init(tfm); if (ret) return ret; if (ctx->sec->qm.ver < QM_HW_V3) { ctx->type_supported = SEC_BD_TYPE2; ctx->req_op = &sec_skcipher_req_ops; } else { ctx->type_supported = SEC_BD_TYPE3; ctx->req_op = &sec_skcipher_req_ops_v3; } return ret; } static void sec_skcipher_ctx_exit(struct crypto_skcipher *tfm) { sec_skcipher_uninit(tfm); } static int sec_aead_init(struct crypto_aead *tfm) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); int ret; crypto_aead_set_reqsize(tfm, sizeof(struct sec_req)); ctx->alg_type = SEC_AEAD; ctx->c_ctx.ivsize = crypto_aead_ivsize(tfm); if (ctx->c_ctx.ivsize < SEC_AIV_SIZE || ctx->c_ctx.ivsize > SEC_IV_SIZE) { pr_err("get error aead iv size!\n"); return -EINVAL; } ret = sec_ctx_base_init(ctx); if (ret) return ret; if (ctx->sec->qm.ver < QM_HW_V3) { ctx->type_supported = SEC_BD_TYPE2; ctx->req_op = &sec_aead_req_ops; } else { ctx->type_supported = SEC_BD_TYPE3; ctx->req_op = &sec_aead_req_ops_v3; } ret = sec_auth_init(ctx); if (ret) goto err_auth_init; ret = sec_cipher_init(ctx); if (ret) goto err_cipher_init; return ret; err_cipher_init: sec_auth_uninit(ctx); err_auth_init: sec_ctx_base_uninit(ctx); return ret; } static void sec_aead_exit(struct crypto_aead *tfm) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); sec_cipher_uninit(ctx); sec_auth_uninit(ctx); sec_ctx_base_uninit(ctx); } static int sec_aead_ctx_init(struct crypto_aead *tfm, const char *hash_name) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); struct sec_auth_ctx *auth_ctx = &ctx->a_ctx; int ret; ret = sec_aead_init(tfm); if (ret) { pr_err("hisi_sec2: aead init error!\n"); return ret; } auth_ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); if (IS_ERR(auth_ctx->hash_tfm)) { dev_err(ctx->dev, "aead alloc shash error!\n"); sec_aead_exit(tfm); return PTR_ERR(auth_ctx->hash_tfm); } return 0; } static void sec_aead_ctx_exit(struct crypto_aead *tfm) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_shash(ctx->a_ctx.hash_tfm); sec_aead_exit(tfm); } static int sec_aead_xcm_ctx_init(struct crypto_aead *tfm) { struct aead_alg *alg = crypto_aead_alg(tfm); struct sec_ctx *ctx = crypto_aead_ctx(tfm); struct sec_auth_ctx *a_ctx = &ctx->a_ctx; const char *aead_name = alg->base.cra_name; int ret; ret = sec_aead_init(tfm); if (ret) { dev_err(ctx->dev, "hisi_sec2: aead xcm init error!\n"); return ret; } a_ctx->fallback_aead_tfm = crypto_alloc_aead(aead_name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); if (IS_ERR(a_ctx->fallback_aead_tfm)) { dev_err(ctx->dev, "aead driver alloc fallback tfm error!\n"); sec_aead_exit(tfm); return PTR_ERR(a_ctx->fallback_aead_tfm); } a_ctx->fallback = false; return 0; } static void sec_aead_xcm_ctx_exit(struct crypto_aead *tfm) { struct sec_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->a_ctx.fallback_aead_tfm); sec_aead_exit(tfm); } static int sec_aead_sha1_ctx_init(struct crypto_aead *tfm) { return sec_aead_ctx_init(tfm, "sha1"); } static int sec_aead_sha256_ctx_init(struct crypto_aead *tfm) { return sec_aead_ctx_init(tfm, "sha256"); } static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm) { return sec_aead_ctx_init(tfm, "sha512"); } static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx, struct sec_req *sreq) { u32 cryptlen = sreq->c_req.sk_req->cryptlen; struct device *dev = ctx->dev; u8 c_mode = ctx->c_ctx.c_mode; int ret = 0; switch (c_mode) { case SEC_CMODE_XTS: if (unlikely(cryptlen < AES_BLOCK_SIZE)) { dev_err(dev, "skcipher XTS mode input length error!\n"); ret = -EINVAL; } break; case SEC_CMODE_ECB: case SEC_CMODE_CBC: if (unlikely(cryptlen & (AES_BLOCK_SIZE - 1))) { dev_err(dev, "skcipher AES input length error!\n"); ret = -EINVAL; } break; case SEC_CMODE_CFB: case SEC_CMODE_OFB: case SEC_CMODE_CTR: if (unlikely(ctx->sec->qm.ver < QM_HW_V3)) { dev_err(dev, "skcipher HW version error!\n"); ret = -EINVAL; } break; default: ret = -EINVAL; } return ret; } static int sec_skcipher_param_check(struct sec_ctx *ctx, struct sec_req *sreq) { struct skcipher_request *sk_req = sreq->c_req.sk_req; struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; if (unlikely(!sk_req->src || !sk_req->dst || sk_req->cryptlen > MAX_INPUT_DATA_LEN)) { dev_err(dev, "skcipher input param error!\n"); return -EINVAL; } sreq->c_req.c_len = sk_req->cryptlen; if (ctx->pbuf_supported && sk_req->cryptlen <= SEC_PBUF_SZ) sreq->use_pbuf = true; else sreq->use_pbuf = false; if (c_alg == SEC_CALG_3DES) { if (unlikely(sk_req->cryptlen & (DES3_EDE_BLOCK_SIZE - 1))) { dev_err(dev, "skcipher 3des input length error!\n"); return -EINVAL; } return 0; } else if (c_alg == SEC_CALG_AES || c_alg == SEC_CALG_SM4) { return sec_skcipher_cryptlen_check(ctx, sreq); } dev_err(dev, "skcipher algorithm error!\n"); return -EINVAL; } static int sec_skcipher_soft_crypto(struct sec_ctx *ctx, struct skcipher_request *sreq, bool encrypt) { struct sec_cipher_ctx *c_ctx = &ctx->c_ctx; SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, c_ctx->fbtfm); struct device *dev = ctx->dev; int ret; if (!c_ctx->fbtfm) { dev_err_ratelimited(dev, "the soft tfm isn't supported in the current system.\n"); return -EINVAL; } skcipher_request_set_sync_tfm(subreq, c_ctx->fbtfm); /* software need sync mode to do crypto */ skcipher_request_set_callback(subreq, sreq->base.flags, NULL, NULL); skcipher_request_set_crypt(subreq, sreq->src, sreq->dst, sreq->cryptlen, sreq->iv); if (encrypt) ret = crypto_skcipher_encrypt(subreq); else ret = crypto_skcipher_decrypt(subreq); skcipher_request_zero(subreq); return ret; } static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(sk_req); struct sec_req *req = skcipher_request_ctx(sk_req); struct sec_ctx *ctx = crypto_skcipher_ctx(tfm); int ret; if (!sk_req->cryptlen) { if (ctx->c_ctx.c_mode == SEC_CMODE_XTS) return -EINVAL; return 0; } req->flag = sk_req->base.flags; req->c_req.sk_req = sk_req; req->c_req.encrypt = encrypt; req->ctx = ctx; ret = sec_skcipher_param_check(ctx, req); if (unlikely(ret)) return -EINVAL; if (unlikely(ctx->c_ctx.fallback)) return sec_skcipher_soft_crypto(ctx, sk_req, encrypt); return ctx->req_op->process(ctx, req); } static int sec_skcipher_encrypt(struct skcipher_request *sk_req) { return sec_skcipher_crypto(sk_req, true); } static int sec_skcipher_decrypt(struct skcipher_request *sk_req) { return sec_skcipher_crypto(sk_req, false); } #define SEC_SKCIPHER_GEN_ALG(sec_cra_name, sec_set_key, sec_min_key_size, \ sec_max_key_size, ctx_init, ctx_exit, blk_size, iv_size)\ {\ .base = {\ .cra_name = sec_cra_name,\ .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ .cra_flags = CRYPTO_ALG_ASYNC |\ CRYPTO_ALG_NEED_FALLBACK,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ },\ .init = ctx_init,\ .exit = ctx_exit,\ .setkey = sec_set_key,\ .decrypt = sec_skcipher_decrypt,\ .encrypt = sec_skcipher_encrypt,\ .min_keysize = sec_min_key_size,\ .max_keysize = sec_max_key_size,\ .ivsize = iv_size,\ } #define SEC_SKCIPHER_ALG(name, key_func, min_key_size, \ max_key_size, blk_size, iv_size) \ SEC_SKCIPHER_GEN_ALG(name, key_func, min_key_size, max_key_size, \ sec_skcipher_ctx_init, sec_skcipher_ctx_exit, blk_size, iv_size) static struct sec_skcipher sec_skciphers[] = { { .alg_msk = BIT(0), .alg = SEC_SKCIPHER_ALG("ecb(aes)", sec_setkey_aes_ecb, AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, 0), }, { .alg_msk = BIT(1), .alg = SEC_SKCIPHER_ALG("cbc(aes)", sec_setkey_aes_cbc, AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), }, { .alg_msk = BIT(2), .alg = SEC_SKCIPHER_ALG("ctr(aes)", sec_setkey_aes_ctr, AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), }, { .alg_msk = BIT(3), .alg = SEC_SKCIPHER_ALG("xts(aes)", sec_setkey_aes_xts, SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MAX_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), }, { .alg_msk = BIT(4), .alg = SEC_SKCIPHER_ALG("ofb(aes)", sec_setkey_aes_ofb, AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), }, { .alg_msk = BIT(5), .alg = SEC_SKCIPHER_ALG("cfb(aes)", sec_setkey_aes_cfb, AES_MIN_KEY_SIZE, AES_MAX_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), }, { .alg_msk = BIT(12), .alg = SEC_SKCIPHER_ALG("cbc(sm4)", sec_setkey_sm4_cbc, AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), }, { .alg_msk = BIT(13), .alg = SEC_SKCIPHER_ALG("ctr(sm4)", sec_setkey_sm4_ctr, AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), }, { .alg_msk = BIT(14), .alg = SEC_SKCIPHER_ALG("xts(sm4)", sec_setkey_sm4_xts, SEC_XTS_MIN_KEY_SIZE, SEC_XTS_MIN_KEY_SIZE, AES_BLOCK_SIZE, AES_BLOCK_SIZE), }, { .alg_msk = BIT(15), .alg = SEC_SKCIPHER_ALG("ofb(sm4)", sec_setkey_sm4_ofb, AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), }, { .alg_msk = BIT(16), .alg = SEC_SKCIPHER_ALG("cfb(sm4)", sec_setkey_sm4_cfb, AES_MIN_KEY_SIZE, AES_MIN_KEY_SIZE, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE), }, { .alg_msk = BIT(23), .alg = SEC_SKCIPHER_ALG("ecb(des3_ede)", sec_setkey_3des_ecb, SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, 0), }, { .alg_msk = BIT(24), .alg = SEC_SKCIPHER_ALG("cbc(des3_ede)", sec_setkey_3des_cbc, SEC_DES3_3KEY_SIZE, SEC_DES3_3KEY_SIZE, DES3_EDE_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE), }, }; static int aead_iv_demension_check(struct aead_request *aead_req) { u8 cl; cl = aead_req->iv[0] + 1; if (cl < IV_CL_MIN || cl > IV_CL_MAX) return -EINVAL; if (cl < IV_CL_MID && aead_req->cryptlen >> (BYTE_BITS * cl)) return -EOVERFLOW; return 0; } static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq) { struct aead_request *req = sreq->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(req); size_t authsize = crypto_aead_authsize(tfm); u8 c_mode = ctx->c_ctx.c_mode; struct device *dev = ctx->dev; int ret; if (unlikely(req->cryptlen + req->assoclen > MAX_INPUT_DATA_LEN || req->assoclen > SEC_MAX_AAD_LEN)) { dev_err(dev, "aead input spec error!\n"); return -EINVAL; } if (unlikely((c_mode == SEC_CMODE_GCM && authsize < DES_BLOCK_SIZE) || (c_mode == SEC_CMODE_CCM && (authsize < MIN_MAC_LEN || authsize & MAC_LEN_MASK)))) { dev_err(dev, "aead input mac length error!\n"); return -EINVAL; } if (c_mode == SEC_CMODE_CCM) { if (unlikely(req->assoclen > SEC_MAX_CCM_AAD_LEN)) { dev_err_ratelimited(dev, "CCM input aad parameter is too long!\n"); return -EINVAL; } ret = aead_iv_demension_check(req); if (ret) { dev_err(dev, "aead input iv param error!\n"); return ret; } } if (sreq->c_req.encrypt) sreq->c_req.c_len = req->cryptlen; else sreq->c_req.c_len = req->cryptlen - authsize; if (c_mode == SEC_CMODE_CBC) { if (unlikely(sreq->c_req.c_len & (AES_BLOCK_SIZE - 1))) { dev_err(dev, "aead crypto length error!\n"); return -EINVAL; } } return 0; } static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq) { struct aead_request *req = sreq->aead_req.aead_req; struct crypto_aead *tfm = crypto_aead_reqtfm(req); size_t authsize = crypto_aead_authsize(tfm); struct device *dev = ctx->dev; u8 c_alg = ctx->c_ctx.c_alg; if (unlikely(!req->src || !req->dst)) { dev_err(dev, "aead input param error!\n"); return -EINVAL; } if (ctx->sec->qm.ver == QM_HW_V2) { if (unlikely(!req->cryptlen || (!sreq->c_req.encrypt && req->cryptlen <= authsize))) { ctx->a_ctx.fallback = true; return -EINVAL; } } /* Support AES or SM4 */ if (unlikely(c_alg != SEC_CALG_AES && c_alg != SEC_CALG_SM4)) { dev_err(dev, "aead crypto alg error!\n"); return -EINVAL; } if (unlikely(sec_aead_spec_check(ctx, sreq))) return -EINVAL; if (ctx->pbuf_supported && (req->cryptlen + req->assoclen) <= SEC_PBUF_SZ) sreq->use_pbuf = true; else sreq->use_pbuf = false; return 0; } static int sec_aead_soft_crypto(struct sec_ctx *ctx, struct aead_request *aead_req, bool encrypt) { struct sec_auth_ctx *a_ctx = &ctx->a_ctx; struct device *dev = ctx->dev; struct aead_request *subreq; int ret; /* Kunpeng920 aead mode not support input 0 size */ if (!a_ctx->fallback_aead_tfm) { dev_err(dev, "aead fallback tfm is NULL!\n"); return -EINVAL; } subreq = aead_request_alloc(a_ctx->fallback_aead_tfm, GFP_KERNEL); if (!subreq) return -ENOMEM; aead_request_set_tfm(subreq, a_ctx->fallback_aead_tfm); aead_request_set_callback(subreq, aead_req->base.flags, aead_req->base.complete, aead_req->base.data); aead_request_set_crypt(subreq, aead_req->src, aead_req->dst, aead_req->cryptlen, aead_req->iv); aead_request_set_ad(subreq, aead_req->assoclen); if (encrypt) ret = crypto_aead_encrypt(subreq); else ret = crypto_aead_decrypt(subreq); aead_request_free(subreq); return ret; } static int sec_aead_crypto(struct aead_request *a_req, bool encrypt) { struct crypto_aead *tfm = crypto_aead_reqtfm(a_req); struct sec_req *req = aead_request_ctx(a_req); struct sec_ctx *ctx = crypto_aead_ctx(tfm); int ret; req->flag = a_req->base.flags; req->aead_req.aead_req = a_req; req->c_req.encrypt = encrypt; req->ctx = ctx; ret = sec_aead_param_check(ctx, req); if (unlikely(ret)) { if (ctx->a_ctx.fallback) return sec_aead_soft_crypto(ctx, a_req, encrypt); return -EINVAL; } return ctx->req_op->process(ctx, req); } static int sec_aead_encrypt(struct aead_request *a_req) { return sec_aead_crypto(a_req, true); } static int sec_aead_decrypt(struct aead_request *a_req) { return sec_aead_crypto(a_req, false); } #define SEC_AEAD_ALG(sec_cra_name, sec_set_key, ctx_init,\ ctx_exit, blk_size, iv_size, max_authsize)\ {\ .base = {\ .cra_name = sec_cra_name,\ .cra_driver_name = "hisi_sec_"sec_cra_name,\ .cra_priority = SEC_PRIORITY,\ .cra_flags = CRYPTO_ALG_ASYNC |\ CRYPTO_ALG_NEED_FALLBACK,\ .cra_blocksize = blk_size,\ .cra_ctxsize = sizeof(struct sec_ctx),\ .cra_module = THIS_MODULE,\ },\ .init = ctx_init,\ .exit = ctx_exit,\ .setkey = sec_set_key,\ .setauthsize = sec_aead_setauthsize,\ .decrypt = sec_aead_decrypt,\ .encrypt = sec_aead_encrypt,\ .ivsize = iv_size,\ .maxauthsize = max_authsize,\ } static struct sec_aead sec_aeads[] = { { .alg_msk = BIT(6), .alg = SEC_AEAD_ALG("ccm(aes)", sec_setkey_aes_ccm, sec_aead_xcm_ctx_init, sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, AES_BLOCK_SIZE), }, { .alg_msk = BIT(7), .alg = SEC_AEAD_ALG("gcm(aes)", sec_setkey_aes_gcm, sec_aead_xcm_ctx_init, sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE, AES_BLOCK_SIZE), }, { .alg_msk = BIT(17), .alg = SEC_AEAD_ALG("ccm(sm4)", sec_setkey_sm4_ccm, sec_aead_xcm_ctx_init, sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, AES_BLOCK_SIZE, AES_BLOCK_SIZE), }, { .alg_msk = BIT(18), .alg = SEC_AEAD_ALG("gcm(sm4)", sec_setkey_sm4_gcm, sec_aead_xcm_ctx_init, sec_aead_xcm_ctx_exit, SEC_MIN_BLOCK_SZ, SEC_AIV_SIZE, AES_BLOCK_SIZE), }, { .alg_msk = BIT(43), .alg = SEC_AEAD_ALG("authenc(hmac(sha1),cbc(aes))", sec_setkey_aes_cbc_sha1, sec_aead_sha1_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA1_DIGEST_SIZE), }, { .alg_msk = BIT(44), .alg = SEC_AEAD_ALG("authenc(hmac(sha256),cbc(aes))", sec_setkey_aes_cbc_sha256, sec_aead_sha256_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA256_DIGEST_SIZE), }, { .alg_msk = BIT(45), .alg = SEC_AEAD_ALG("authenc(hmac(sha512),cbc(aes))", sec_setkey_aes_cbc_sha512, sec_aead_sha512_ctx_init, sec_aead_ctx_exit, AES_BLOCK_SIZE, AES_BLOCK_SIZE, SHA512_DIGEST_SIZE), }, }; static void sec_unregister_skcipher(u64 alg_mask, int end) { int i; for (i = 0; i < end; i++) if (sec_skciphers[i].alg_msk & alg_mask) crypto_unregister_skcipher(&sec_skciphers[i].alg); } static int sec_register_skcipher(u64 alg_mask) { int i, ret, count; count = ARRAY_SIZE(sec_skciphers); for (i = 0; i < count; i++) { if (!(sec_skciphers[i].alg_msk & alg_mask)) continue; ret = crypto_register_skcipher(&sec_skciphers[i].alg); if (ret) goto err; } return 0; err: sec_unregister_skcipher(alg_mask, i); return ret; } static void sec_unregister_aead(u64 alg_mask, int end) { int i; for (i = 0; i < end; i++) if (sec_aeads[i].alg_msk & alg_mask) crypto_unregister_aead(&sec_aeads[i].alg); } static int sec_register_aead(u64 alg_mask) { int i, ret, count; count = ARRAY_SIZE(sec_aeads); for (i = 0; i < count; i++) { if (!(sec_aeads[i].alg_msk & alg_mask)) continue; ret = crypto_register_aead(&sec_aeads[i].alg); if (ret) goto err; } return 0; err: sec_unregister_aead(alg_mask, i); return ret; } int sec_register_to_crypto(struct hisi_qm *qm) { u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW); int ret; ret = sec_register_skcipher(alg_mask); if (ret) return ret; ret = sec_register_aead(alg_mask); if (ret) sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); return ret; } void sec_unregister_from_crypto(struct hisi_qm *qm) { u64 alg_mask = sec_get_alg_bitmap(qm, SEC_DRV_ALG_BITMAP_HIGH, SEC_DRV_ALG_BITMAP_LOW); sec_unregister_aead(alg_mask, ARRAY_SIZE(sec_aeads)); sec_unregister_skcipher(alg_mask, ARRAY_SIZE(sec_skciphers)); }
linux-master
drivers/crypto/hisilicon/sec2/sec_crypto.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 HiSilicon Limited. */ #include <linux/acpi.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/init.h> #include <linux/io.h> #include <linux/iommu.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include <linux/seq_file.h> #include <linux/topology.h> #include <linux/uacce.h> #include "sec.h" #define SEC_VF_NUM 63 #define SEC_QUEUE_NUM_V1 4096 #define PCI_DEVICE_ID_HUAWEI_SEC_PF 0xa255 #define SEC_BD_ERR_CHK_EN0 0xEFFFFFFF #define SEC_BD_ERR_CHK_EN1 0x7ffff7fd #define SEC_BD_ERR_CHK_EN3 0xffffbfff #define SEC_SQE_SIZE 128 #define SEC_PF_DEF_Q_NUM 256 #define SEC_PF_DEF_Q_BASE 0 #define SEC_CTX_Q_NUM_DEF 2 #define SEC_CTX_Q_NUM_MAX 32 #define SEC_CTRL_CNT_CLR_CE 0x301120 #define SEC_CTRL_CNT_CLR_CE_BIT BIT(0) #define SEC_CORE_INT_SOURCE 0x301010 #define SEC_CORE_INT_MASK 0x301000 #define SEC_CORE_INT_STATUS 0x301008 #define SEC_CORE_SRAM_ECC_ERR_INFO 0x301C14 #define SEC_ECC_NUM 16 #define SEC_ECC_MASH 0xFF #define SEC_CORE_INT_DISABLE 0x0 #define SEC_RAS_CE_REG 0x301050 #define SEC_RAS_FE_REG 0x301054 #define SEC_RAS_NFE_REG 0x301058 #define SEC_RAS_FE_ENB_MSK 0x0 #define SEC_OOO_SHUTDOWN_SEL 0x301014 #define SEC_RAS_DISABLE 0x0 #define SEC_MEM_START_INIT_REG 0x301100 #define SEC_MEM_INIT_DONE_REG 0x301104 /* clock gating */ #define SEC_CONTROL_REG 0x301200 #define SEC_DYNAMIC_GATE_REG 0x30121c #define SEC_CORE_AUTO_GATE 0x30212c #define SEC_DYNAMIC_GATE_EN 0x7fff #define SEC_CORE_AUTO_GATE_EN GENMASK(3, 0) #define SEC_CLK_GATE_ENABLE BIT(3) #define SEC_CLK_GATE_DISABLE (~BIT(3)) #define SEC_TRNG_EN_SHIFT 8 #define SEC_AXI_SHUTDOWN_ENABLE BIT(12) #define SEC_AXI_SHUTDOWN_DISABLE 0xFFFFEFFF #define SEC_INTERFACE_USER_CTRL0_REG 0x301220 #define SEC_INTERFACE_USER_CTRL1_REG 0x301224 #define SEC_SAA_EN_REG 0x301270 #define SEC_BD_ERR_CHK_EN_REG0 0x301380 #define SEC_BD_ERR_CHK_EN_REG1 0x301384 #define SEC_BD_ERR_CHK_EN_REG3 0x30138c #define SEC_USER0_SMMU_NORMAL (BIT(23) | BIT(15)) #define SEC_USER1_SMMU_NORMAL (BIT(31) | BIT(23) | BIT(15) | BIT(7)) #define SEC_USER1_ENABLE_CONTEXT_SSV BIT(24) #define SEC_USER1_ENABLE_DATA_SSV BIT(16) #define SEC_USER1_WB_CONTEXT_SSV BIT(8) #define SEC_USER1_WB_DATA_SSV BIT(0) #define SEC_USER1_SVA_SET (SEC_USER1_ENABLE_CONTEXT_SSV | \ SEC_USER1_ENABLE_DATA_SSV | \ SEC_USER1_WB_CONTEXT_SSV | \ SEC_USER1_WB_DATA_SSV) #define SEC_USER1_SMMU_SVA (SEC_USER1_SMMU_NORMAL | SEC_USER1_SVA_SET) #define SEC_USER1_SMMU_MASK (~SEC_USER1_SVA_SET) #define SEC_INTERFACE_USER_CTRL0_REG_V3 0x302220 #define SEC_INTERFACE_USER_CTRL1_REG_V3 0x302224 #define SEC_USER1_SMMU_NORMAL_V3 (BIT(23) | BIT(17) | BIT(11) | BIT(5)) #define SEC_USER1_SMMU_MASK_V3 0xFF79E79E #define SEC_CORE_INT_STATUS_M_ECC BIT(2) #define SEC_PREFETCH_CFG 0x301130 #define SEC_SVA_TRANS 0x301EC4 #define SEC_PREFETCH_ENABLE (~(BIT(0) | BIT(1) | BIT(11))) #define SEC_PREFETCH_DISABLE BIT(1) #define SEC_SVA_DISABLE_READY (BIT(7) | BIT(11)) #define SEC_DELAY_10_US 10 #define SEC_POLL_TIMEOUT_US 1000 #define SEC_DBGFS_VAL_MAX_LEN 20 #define SEC_SINGLE_PORT_MAX_TRANS 0x2060 #define SEC_SQE_MASK_OFFSET 64 #define SEC_SQE_MASK_LEN 48 #define SEC_SHAPER_TYPE_RATE 400 #define SEC_DFX_BASE 0x301000 #define SEC_DFX_CORE 0x302100 #define SEC_DFX_COMMON1 0x301600 #define SEC_DFX_COMMON2 0x301C00 #define SEC_DFX_BASE_LEN 0x9D #define SEC_DFX_CORE_LEN 0x32B #define SEC_DFX_COMMON1_LEN 0x45 #define SEC_DFX_COMMON2_LEN 0xBA #define SEC_ALG_BITMAP_SHIFT 32 #define SEC_CIPHER_BITMAP (GENMASK_ULL(5, 0) | GENMASK_ULL(16, 12) | \ GENMASK(24, 21)) #define SEC_DIGEST_BITMAP (GENMASK_ULL(11, 8) | GENMASK_ULL(20, 19) | \ GENMASK_ULL(42, 25)) #define SEC_AEAD_BITMAP (GENMASK_ULL(7, 6) | GENMASK_ULL(18, 17) | \ GENMASK_ULL(45, 43)) #define SEC_DEV_ALG_MAX_LEN 256 struct sec_hw_error { u32 int_msk; const char *msg; }; struct sec_dfx_item { const char *name; u32 offset; }; struct sec_dev_alg { u64 alg_msk; const char *algs; }; static const char sec_name[] = "hisi_sec2"; static struct dentry *sec_debugfs_root; static struct hisi_qm_list sec_devices = { .register_to_crypto = sec_register_to_crypto, .unregister_from_crypto = sec_unregister_from_crypto, }; static const struct hisi_qm_cap_info sec_basic_info[] = { {SEC_QM_NFE_MASK_CAP, 0x3124, 0, GENMASK(31, 0), 0x0, 0x1C77, 0x7C77}, {SEC_QM_RESET_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0xC77, 0x6C77}, {SEC_QM_OOO_SHUTDOWN_MASK_CAP, 0x3128, 0, GENMASK(31, 0), 0x0, 0x4, 0x6C77}, {SEC_QM_CE_MASK_CAP, 0x312C, 0, GENMASK(31, 0), 0x0, 0x8, 0x8}, {SEC_NFE_MASK_CAP, 0x3130, 0, GENMASK(31, 0), 0x0, 0x177, 0x60177}, {SEC_RESET_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x177, 0x177}, {SEC_OOO_SHUTDOWN_MASK_CAP, 0x3134, 0, GENMASK(31, 0), 0x0, 0x4, 0x177}, {SEC_CE_MASK_CAP, 0x3138, 0, GENMASK(31, 0), 0x0, 0x88, 0xC088}, {SEC_CLUSTER_NUM_CAP, 0x313c, 20, GENMASK(3, 0), 0x1, 0x1, 0x1}, {SEC_CORE_TYPE_NUM_CAP, 0x313c, 16, GENMASK(3, 0), 0x1, 0x1, 0x1}, {SEC_CORE_NUM_CAP, 0x313c, 8, GENMASK(7, 0), 0x4, 0x4, 0x4}, {SEC_CORES_PER_CLUSTER_NUM_CAP, 0x313c, 0, GENMASK(7, 0), 0x4, 0x4, 0x4}, {SEC_CORE_ENABLE_BITMAP, 0x3140, 32, GENMASK(31, 0), 0x17F, 0x17F, 0xF}, {SEC_DRV_ALG_BITMAP_LOW, 0x3144, 0, GENMASK(31, 0), 0x18050CB, 0x18050CB, 0x187F0FF}, {SEC_DRV_ALG_BITMAP_HIGH, 0x3148, 0, GENMASK(31, 0), 0x395C, 0x395C, 0x395C}, {SEC_DEV_ALG_BITMAP_LOW, 0x314c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, {SEC_DEV_ALG_BITMAP_HIGH, 0x3150, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, {SEC_CORE1_ALG_BITMAP_LOW, 0x3154, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, {SEC_CORE1_ALG_BITMAP_HIGH, 0x3158, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, {SEC_CORE2_ALG_BITMAP_LOW, 0x315c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, {SEC_CORE2_ALG_BITMAP_HIGH, 0x3160, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, {SEC_CORE3_ALG_BITMAP_LOW, 0x3164, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, {SEC_CORE3_ALG_BITMAP_HIGH, 0x3168, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, {SEC_CORE4_ALG_BITMAP_LOW, 0x316c, 0, GENMASK(31, 0), 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF}, {SEC_CORE4_ALG_BITMAP_HIGH, 0x3170, 0, GENMASK(31, 0), 0x3FFF, 0x3FFF, 0x3FFF}, }; static const struct sec_dev_alg sec_dev_algs[] = { { .alg_msk = SEC_CIPHER_BITMAP, .algs = "cipher\n", }, { .alg_msk = SEC_DIGEST_BITMAP, .algs = "digest\n", }, { .alg_msk = SEC_AEAD_BITMAP, .algs = "aead\n", }, }; static const struct sec_hw_error sec_hw_errors[] = { { .int_msk = BIT(0), .msg = "sec_axi_rresp_err_rint" }, { .int_msk = BIT(1), .msg = "sec_axi_bresp_err_rint" }, { .int_msk = BIT(2), .msg = "sec_ecc_2bit_err_rint" }, { .int_msk = BIT(3), .msg = "sec_ecc_1bit_err_rint" }, { .int_msk = BIT(4), .msg = "sec_req_trng_timeout_rint" }, { .int_msk = BIT(5), .msg = "sec_fsm_hbeat_rint" }, { .int_msk = BIT(6), .msg = "sec_channel_req_rng_timeout_rint" }, { .int_msk = BIT(7), .msg = "sec_bd_err_rint" }, { .int_msk = BIT(8), .msg = "sec_chain_buff_err_rint" }, { .int_msk = BIT(14), .msg = "sec_no_secure_access" }, { .int_msk = BIT(15), .msg = "sec_wrapping_key_auth_err" }, { .int_msk = BIT(16), .msg = "sec_km_key_crc_fail" }, { .int_msk = BIT(17), .msg = "sec_axi_poison_err" }, { .int_msk = BIT(18), .msg = "sec_sva_err" }, {} }; static const char * const sec_dbg_file_name[] = { [SEC_CLEAR_ENABLE] = "clear_enable", }; static struct sec_dfx_item sec_dfx_labels[] = { {"send_cnt", offsetof(struct sec_dfx, send_cnt)}, {"recv_cnt", offsetof(struct sec_dfx, recv_cnt)}, {"send_busy_cnt", offsetof(struct sec_dfx, send_busy_cnt)}, {"recv_busy_cnt", offsetof(struct sec_dfx, recv_busy_cnt)}, {"err_bd_cnt", offsetof(struct sec_dfx, err_bd_cnt)}, {"invalid_req_cnt", offsetof(struct sec_dfx, invalid_req_cnt)}, {"done_flag_cnt", offsetof(struct sec_dfx, done_flag_cnt)}, }; static const struct debugfs_reg32 sec_dfx_regs[] = { {"SEC_PF_ABNORMAL_INT_SOURCE ", 0x301010}, {"SEC_SAA_EN ", 0x301270}, {"SEC_BD_LATENCY_MIN ", 0x301600}, {"SEC_BD_LATENCY_MAX ", 0x301608}, {"SEC_BD_LATENCY_AVG ", 0x30160C}, {"SEC_BD_NUM_IN_SAA0 ", 0x301670}, {"SEC_BD_NUM_IN_SAA1 ", 0x301674}, {"SEC_BD_NUM_IN_SEC ", 0x301680}, {"SEC_ECC_1BIT_CNT ", 0x301C00}, {"SEC_ECC_1BIT_INFO ", 0x301C04}, {"SEC_ECC_2BIT_CNT ", 0x301C10}, {"SEC_ECC_2BIT_INFO ", 0x301C14}, {"SEC_BD_SAA0 ", 0x301C20}, {"SEC_BD_SAA1 ", 0x301C24}, {"SEC_BD_SAA2 ", 0x301C28}, {"SEC_BD_SAA3 ", 0x301C2C}, {"SEC_BD_SAA4 ", 0x301C30}, {"SEC_BD_SAA5 ", 0x301C34}, {"SEC_BD_SAA6 ", 0x301C38}, {"SEC_BD_SAA7 ", 0x301C3C}, {"SEC_BD_SAA8 ", 0x301C40}, }; /* define the SEC's dfx regs region and region length */ static struct dfx_diff_registers sec_diff_regs[] = { { .reg_offset = SEC_DFX_BASE, .reg_len = SEC_DFX_BASE_LEN, }, { .reg_offset = SEC_DFX_COMMON1, .reg_len = SEC_DFX_COMMON1_LEN, }, { .reg_offset = SEC_DFX_COMMON2, .reg_len = SEC_DFX_COMMON2_LEN, }, { .reg_offset = SEC_DFX_CORE, .reg_len = SEC_DFX_CORE_LEN, }, }; static int sec_diff_regs_show(struct seq_file *s, void *unused) { struct hisi_qm *qm = s->private; hisi_qm_acc_diff_regs_dump(qm, s, qm->debug.acc_diff_regs, ARRAY_SIZE(sec_diff_regs)); return 0; } DEFINE_SHOW_ATTRIBUTE(sec_diff_regs); static int sec_pf_q_num_set(const char *val, const struct kernel_param *kp) { return q_num_set(val, kp, PCI_DEVICE_ID_HUAWEI_SEC_PF); } static const struct kernel_param_ops sec_pf_q_num_ops = { .set = sec_pf_q_num_set, .get = param_get_int, }; static u32 pf_q_num = SEC_PF_DEF_Q_NUM; module_param_cb(pf_q_num, &sec_pf_q_num_ops, &pf_q_num, 0444); MODULE_PARM_DESC(pf_q_num, "Number of queues in PF(v1 2-4096, v2 2-1024)"); static int sec_ctx_q_num_set(const char *val, const struct kernel_param *kp) { u32 ctx_q_num; int ret; if (!val) return -EINVAL; ret = kstrtou32(val, 10, &ctx_q_num); if (ret) return -EINVAL; if (!ctx_q_num || ctx_q_num > SEC_CTX_Q_NUM_MAX || ctx_q_num & 0x1) { pr_err("ctx queue num[%u] is invalid!\n", ctx_q_num); return -EINVAL; } return param_set_int(val, kp); } static const struct kernel_param_ops sec_ctx_q_num_ops = { .set = sec_ctx_q_num_set, .get = param_get_int, }; static u32 ctx_q_num = SEC_CTX_Q_NUM_DEF; module_param_cb(ctx_q_num, &sec_ctx_q_num_ops, &ctx_q_num, 0444); MODULE_PARM_DESC(ctx_q_num, "Queue num in ctx (2 default, 2, 4, ..., 32)"); static const struct kernel_param_ops vfs_num_ops = { .set = vfs_num_set, .get = param_get_int, }; static u32 vfs_num; module_param_cb(vfs_num, &vfs_num_ops, &vfs_num, 0444); MODULE_PARM_DESC(vfs_num, "Number of VFs to enable(1-63), 0(default)"); void sec_destroy_qps(struct hisi_qp **qps, int qp_num) { hisi_qm_free_qps(qps, qp_num); kfree(qps); } struct hisi_qp **sec_create_qps(void) { int node = cpu_to_node(smp_processor_id()); u32 ctx_num = ctx_q_num; struct hisi_qp **qps; int ret; qps = kcalloc(ctx_num, sizeof(struct hisi_qp *), GFP_KERNEL); if (!qps) return NULL; ret = hisi_qm_alloc_qps_node(&sec_devices, ctx_num, 0, node, qps); if (!ret) return qps; kfree(qps); return NULL; } u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low) { u32 cap_val_h, cap_val_l; cap_val_h = hisi_qm_get_hw_info(qm, sec_basic_info, high, qm->cap_ver); cap_val_l = hisi_qm_get_hw_info(qm, sec_basic_info, low, qm->cap_ver); return ((u64)cap_val_h << SEC_ALG_BITMAP_SHIFT) | (u64)cap_val_l; } static const struct kernel_param_ops sec_uacce_mode_ops = { .set = uacce_mode_set, .get = param_get_int, }; /* * uacce_mode = 0 means sec only register to crypto, * uacce_mode = 1 means sec both register to crypto and uacce. */ static u32 uacce_mode = UACCE_MODE_NOUACCE; module_param_cb(uacce_mode, &sec_uacce_mode_ops, &uacce_mode, 0444); MODULE_PARM_DESC(uacce_mode, UACCE_MODE_DESC); static const struct pci_device_id sec_dev_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_PF) }, { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HUAWEI_SEC_VF) }, { 0, } }; MODULE_DEVICE_TABLE(pci, sec_dev_ids); static void sec_set_endian(struct hisi_qm *qm) { u32 reg; reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); reg &= ~(BIT(1) | BIT(0)); if (!IS_ENABLED(CONFIG_64BIT)) reg |= BIT(1); if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) reg |= BIT(0); writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); } static void sec_engine_sva_config(struct hisi_qm *qm) { u32 reg; if (qm->ver > QM_HW_V2) { reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG_V3); reg |= SEC_USER0_SMMU_NORMAL; writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG_V3); reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG_V3); reg &= SEC_USER1_SMMU_MASK_V3; reg |= SEC_USER1_SMMU_NORMAL_V3; writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG_V3); } else { reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); reg |= SEC_USER0_SMMU_NORMAL; writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL0_REG); reg = readl_relaxed(qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); reg &= SEC_USER1_SMMU_MASK; if (qm->use_sva) reg |= SEC_USER1_SMMU_SVA; else reg |= SEC_USER1_SMMU_NORMAL; writel_relaxed(reg, qm->io_base + SEC_INTERFACE_USER_CTRL1_REG); } } static void sec_open_sva_prefetch(struct hisi_qm *qm) { u32 val; int ret; if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; /* Enable prefetch */ val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); val &= SEC_PREFETCH_ENABLE; writel(val, qm->io_base + SEC_PREFETCH_CFG); ret = readl_relaxed_poll_timeout(qm->io_base + SEC_PREFETCH_CFG, val, !(val & SEC_PREFETCH_DISABLE), SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); if (ret) pci_err(qm->pdev, "failed to open sva prefetch\n"); } static void sec_close_sva_prefetch(struct hisi_qm *qm) { u32 val; int ret; if (!test_bit(QM_SUPPORT_SVA_PREFETCH, &qm->caps)) return; val = readl_relaxed(qm->io_base + SEC_PREFETCH_CFG); val |= SEC_PREFETCH_DISABLE; writel(val, qm->io_base + SEC_PREFETCH_CFG); ret = readl_relaxed_poll_timeout(qm->io_base + SEC_SVA_TRANS, val, !(val & SEC_SVA_DISABLE_READY), SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); if (ret) pci_err(qm->pdev, "failed to close sva prefetch\n"); } static void sec_enable_clock_gate(struct hisi_qm *qm) { u32 val; if (qm->ver < QM_HW_V3) return; val = readl_relaxed(qm->io_base + SEC_CONTROL_REG); val |= SEC_CLK_GATE_ENABLE; writel_relaxed(val, qm->io_base + SEC_CONTROL_REG); val = readl(qm->io_base + SEC_DYNAMIC_GATE_REG); val |= SEC_DYNAMIC_GATE_EN; writel(val, qm->io_base + SEC_DYNAMIC_GATE_REG); val = readl(qm->io_base + SEC_CORE_AUTO_GATE); val |= SEC_CORE_AUTO_GATE_EN; writel(val, qm->io_base + SEC_CORE_AUTO_GATE); } static void sec_disable_clock_gate(struct hisi_qm *qm) { u32 val; /* Kunpeng920 needs to close clock gating */ val = readl_relaxed(qm->io_base + SEC_CONTROL_REG); val &= SEC_CLK_GATE_DISABLE; writel_relaxed(val, qm->io_base + SEC_CONTROL_REG); } static int sec_engine_init(struct hisi_qm *qm) { int ret; u32 reg; /* disable clock gate control before mem init */ sec_disable_clock_gate(qm); writel_relaxed(0x1, qm->io_base + SEC_MEM_START_INIT_REG); ret = readl_relaxed_poll_timeout(qm->io_base + SEC_MEM_INIT_DONE_REG, reg, reg & 0x1, SEC_DELAY_10_US, SEC_POLL_TIMEOUT_US); if (ret) { pci_err(qm->pdev, "fail to init sec mem\n"); return ret; } reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG); reg |= (0x1 << SEC_TRNG_EN_SHIFT); writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG); sec_engine_sva_config(qm); writel(SEC_SINGLE_PORT_MAX_TRANS, qm->io_base + AM_CFG_SINGLE_PORT_MAX_TRANS); reg = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CORE_ENABLE_BITMAP, qm->cap_ver); writel(reg, qm->io_base + SEC_SAA_EN_REG); if (qm->ver < QM_HW_V3) { /* HW V2 enable sm4 extra mode, as ctr/ecb */ writel_relaxed(SEC_BD_ERR_CHK_EN0, qm->io_base + SEC_BD_ERR_CHK_EN_REG0); /* HW V2 enable sm4 xts mode multiple iv */ writel_relaxed(SEC_BD_ERR_CHK_EN1, qm->io_base + SEC_BD_ERR_CHK_EN_REG1); writel_relaxed(SEC_BD_ERR_CHK_EN3, qm->io_base + SEC_BD_ERR_CHK_EN_REG3); } /* config endian */ sec_set_endian(qm); sec_enable_clock_gate(qm); return 0; } static int sec_set_user_domain_and_cache(struct hisi_qm *qm) { /* qm user domain */ writel(AXUSER_BASE, qm->io_base + QM_ARUSER_M_CFG_1); writel(ARUSER_M_CFG_ENABLE, qm->io_base + QM_ARUSER_M_CFG_ENABLE); writel(AXUSER_BASE, qm->io_base + QM_AWUSER_M_CFG_1); writel(AWUSER_M_CFG_ENABLE, qm->io_base + QM_AWUSER_M_CFG_ENABLE); writel(WUSER_M_CFG_ENABLE, qm->io_base + QM_WUSER_M_CFG_ENABLE); /* qm cache */ writel(AXI_M_CFG, qm->io_base + QM_AXI_M_CFG); writel(AXI_M_CFG_ENABLE, qm->io_base + QM_AXI_M_CFG_ENABLE); /* disable FLR triggered by BME(bus master enable) */ writel(PEH_AXUSER_CFG, qm->io_base + QM_PEH_AXUSER_CFG); writel(PEH_AXUSER_CFG_ENABLE, qm->io_base + QM_PEH_AXUSER_CFG_ENABLE); /* enable sqc,cqc writeback */ writel(SQC_CACHE_ENABLE | CQC_CACHE_ENABLE | SQC_CACHE_WB_ENABLE | CQC_CACHE_WB_ENABLE | FIELD_PREP(SQC_CACHE_WB_THRD, 1) | FIELD_PREP(CQC_CACHE_WB_THRD, 1), qm->io_base + QM_CACHE_CTL); return sec_engine_init(qm); } /* sec_debug_regs_clear() - clear the sec debug regs */ static void sec_debug_regs_clear(struct hisi_qm *qm) { int i; /* clear sec dfx regs */ writel(0x1, qm->io_base + SEC_CTRL_CNT_CLR_CE); for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) readl(qm->io_base + sec_dfx_regs[i].offset); /* clear rdclr_en */ writel(0x0, qm->io_base + SEC_CTRL_CNT_CLR_CE); hisi_qm_debug_regs_clear(qm); } static void sec_master_ooo_ctrl(struct hisi_qm *qm, bool enable) { u32 val1, val2; val1 = readl(qm->io_base + SEC_CONTROL_REG); if (enable) { val1 |= SEC_AXI_SHUTDOWN_ENABLE; val2 = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); } else { val1 &= SEC_AXI_SHUTDOWN_DISABLE; val2 = 0x0; } if (qm->ver > QM_HW_V2) writel(val2, qm->io_base + SEC_OOO_SHUTDOWN_SEL); writel(val1, qm->io_base + SEC_CONTROL_REG); } static void sec_hw_error_enable(struct hisi_qm *qm) { u32 ce, nfe; if (qm->ver == QM_HW_V1) { writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); pci_info(qm->pdev, "V1 not support hw error handle\n"); return; } ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_CE_MASK_CAP, qm->cap_ver); nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); /* clear SEC hw error source if having */ writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_SOURCE); /* enable RAS int */ writel(ce, qm->io_base + SEC_RAS_CE_REG); writel(SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_RAS_FE_REG); writel(nfe, qm->io_base + SEC_RAS_NFE_REG); /* enable SEC block master OOO when nfe occurs on Kunpeng930 */ sec_master_ooo_ctrl(qm, true); /* enable SEC hw error interrupts */ writel(ce | nfe | SEC_RAS_FE_ENB_MSK, qm->io_base + SEC_CORE_INT_MASK); } static void sec_hw_error_disable(struct hisi_qm *qm) { /* disable SEC hw error interrupts */ writel(SEC_CORE_INT_DISABLE, qm->io_base + SEC_CORE_INT_MASK); /* disable SEC block master OOO when nfe occurs on Kunpeng930 */ sec_master_ooo_ctrl(qm, false); /* disable RAS int */ writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_CE_REG); writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_FE_REG); writel(SEC_RAS_DISABLE, qm->io_base + SEC_RAS_NFE_REG); } static u32 sec_clear_enable_read(struct hisi_qm *qm) { return readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & SEC_CTRL_CNT_CLR_CE_BIT; } static int sec_clear_enable_write(struct hisi_qm *qm, u32 val) { u32 tmp; if (val != 1 && val) return -EINVAL; tmp = (readl(qm->io_base + SEC_CTRL_CNT_CLR_CE) & ~SEC_CTRL_CNT_CLR_CE_BIT) | val; writel(tmp, qm->io_base + SEC_CTRL_CNT_CLR_CE); return 0; } static ssize_t sec_debug_read(struct file *filp, char __user *buf, size_t count, loff_t *pos) { struct sec_debug_file *file = filp->private_data; char tbuf[SEC_DBGFS_VAL_MAX_LEN]; struct hisi_qm *qm = file->qm; u32 val; int ret; ret = hisi_qm_get_dfx_access(qm); if (ret) return ret; spin_lock_irq(&file->lock); switch (file->index) { case SEC_CLEAR_ENABLE: val = sec_clear_enable_read(qm); break; default: goto err_input; } spin_unlock_irq(&file->lock); hisi_qm_put_dfx_access(qm); ret = snprintf(tbuf, SEC_DBGFS_VAL_MAX_LEN, "%u\n", val); return simple_read_from_buffer(buf, count, pos, tbuf, ret); err_input: spin_unlock_irq(&file->lock); hisi_qm_put_dfx_access(qm); return -EINVAL; } static ssize_t sec_debug_write(struct file *filp, const char __user *buf, size_t count, loff_t *pos) { struct sec_debug_file *file = filp->private_data; char tbuf[SEC_DBGFS_VAL_MAX_LEN]; struct hisi_qm *qm = file->qm; unsigned long val; int len, ret; if (*pos != 0) return 0; if (count >= SEC_DBGFS_VAL_MAX_LEN) return -ENOSPC; len = simple_write_to_buffer(tbuf, SEC_DBGFS_VAL_MAX_LEN - 1, pos, buf, count); if (len < 0) return len; tbuf[len] = '\0'; if (kstrtoul(tbuf, 0, &val)) return -EFAULT; ret = hisi_qm_get_dfx_access(qm); if (ret) return ret; spin_lock_irq(&file->lock); switch (file->index) { case SEC_CLEAR_ENABLE: ret = sec_clear_enable_write(qm, val); if (ret) goto err_input; break; default: ret = -EINVAL; goto err_input; } ret = count; err_input: spin_unlock_irq(&file->lock); hisi_qm_put_dfx_access(qm); return ret; } static const struct file_operations sec_dbg_fops = { .owner = THIS_MODULE, .open = simple_open, .read = sec_debug_read, .write = sec_debug_write, }; static int sec_debugfs_atomic64_get(void *data, u64 *val) { *val = atomic64_read((atomic64_t *)data); return 0; } static int sec_debugfs_atomic64_set(void *data, u64 val) { if (val) return -EINVAL; atomic64_set((atomic64_t *)data, 0); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(sec_atomic64_ops, sec_debugfs_atomic64_get, sec_debugfs_atomic64_set, "%lld\n"); static int sec_regs_show(struct seq_file *s, void *unused) { hisi_qm_regs_dump(s, s->private); return 0; } DEFINE_SHOW_ATTRIBUTE(sec_regs); static int sec_core_debug_init(struct hisi_qm *qm) { struct dfx_diff_registers *sec_regs = qm->debug.acc_diff_regs; struct sec_dev *sec = container_of(qm, struct sec_dev, qm); struct device *dev = &qm->pdev->dev; struct sec_dfx *dfx = &sec->debug.dfx; struct debugfs_regset32 *regset; struct dentry *tmp_d; int i; tmp_d = debugfs_create_dir("sec_dfx", qm->debug.debug_root); regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) return -ENOMEM; regset->regs = sec_dfx_regs; regset->nregs = ARRAY_SIZE(sec_dfx_regs); regset->base = qm->io_base; regset->dev = dev; if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) debugfs_create_file("regs", 0444, tmp_d, regset, &sec_regs_fops); if (qm->fun_type == QM_HW_PF && sec_regs) debugfs_create_file("diff_regs", 0444, tmp_d, qm, &sec_diff_regs_fops); for (i = 0; i < ARRAY_SIZE(sec_dfx_labels); i++) { atomic64_t *data = (atomic64_t *)((uintptr_t)dfx + sec_dfx_labels[i].offset); debugfs_create_file(sec_dfx_labels[i].name, 0644, tmp_d, data, &sec_atomic64_ops); } return 0; } static int sec_debug_init(struct hisi_qm *qm) { struct sec_dev *sec = container_of(qm, struct sec_dev, qm); int i; if (qm->pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) { for (i = SEC_CLEAR_ENABLE; i < SEC_DEBUG_FILE_NUM; i++) { spin_lock_init(&sec->debug.files[i].lock); sec->debug.files[i].index = i; sec->debug.files[i].qm = qm; debugfs_create_file(sec_dbg_file_name[i], 0600, qm->debug.debug_root, sec->debug.files + i, &sec_dbg_fops); } } return sec_core_debug_init(qm); } static int sec_debugfs_init(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; int ret; qm->debug.debug_root = debugfs_create_dir(dev_name(dev), sec_debugfs_root); qm->debug.sqe_mask_offset = SEC_SQE_MASK_OFFSET; qm->debug.sqe_mask_len = SEC_SQE_MASK_LEN; ret = hisi_qm_regs_debugfs_init(qm, sec_diff_regs, ARRAY_SIZE(sec_diff_regs)); if (ret) { dev_warn(dev, "Failed to init SEC diff regs!\n"); goto debugfs_remove; } hisi_qm_debug_init(qm); ret = sec_debug_init(qm); if (ret) goto failed_to_create; return 0; failed_to_create: hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); debugfs_remove: debugfs_remove_recursive(sec_debugfs_root); return ret; } static void sec_debugfs_exit(struct hisi_qm *qm) { hisi_qm_regs_debugfs_uninit(qm, ARRAY_SIZE(sec_diff_regs)); debugfs_remove_recursive(qm->debug.debug_root); } static int sec_show_last_regs_init(struct hisi_qm *qm) { struct qm_debug *debug = &qm->debug; int i; debug->last_words = kcalloc(ARRAY_SIZE(sec_dfx_regs), sizeof(unsigned int), GFP_KERNEL); if (!debug->last_words) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) debug->last_words[i] = readl_relaxed(qm->io_base + sec_dfx_regs[i].offset); return 0; } static void sec_show_last_regs_uninit(struct hisi_qm *qm) { struct qm_debug *debug = &qm->debug; if (qm->fun_type == QM_HW_VF || !debug->last_words) return; kfree(debug->last_words); debug->last_words = NULL; } static void sec_show_last_dfx_regs(struct hisi_qm *qm) { struct qm_debug *debug = &qm->debug; struct pci_dev *pdev = qm->pdev; u32 val; int i; if (qm->fun_type == QM_HW_VF || !debug->last_words) return; /* dumps last word of the debugging registers during controller reset */ for (i = 0; i < ARRAY_SIZE(sec_dfx_regs); i++) { val = readl_relaxed(qm->io_base + sec_dfx_regs[i].offset); if (val != debug->last_words[i]) pci_info(pdev, "%s \t= 0x%08x => 0x%08x\n", sec_dfx_regs[i].name, debug->last_words[i], val); } } static void sec_log_hw_error(struct hisi_qm *qm, u32 err_sts) { const struct sec_hw_error *errs = sec_hw_errors; struct device *dev = &qm->pdev->dev; u32 err_val; while (errs->msg) { if (errs->int_msk & err_sts) { dev_err(dev, "%s [error status=0x%x] found\n", errs->msg, errs->int_msk); if (SEC_CORE_INT_STATUS_M_ECC & errs->int_msk) { err_val = readl(qm->io_base + SEC_CORE_SRAM_ECC_ERR_INFO); dev_err(dev, "multi ecc sram num=0x%x\n", ((err_val) >> SEC_ECC_NUM) & SEC_ECC_MASH); } } errs++; } } static u32 sec_get_hw_err_status(struct hisi_qm *qm) { return readl(qm->io_base + SEC_CORE_INT_STATUS); } static void sec_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts) { u32 nfe; writel(err_sts, qm->io_base + SEC_CORE_INT_SOURCE); nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_NFE_MASK_CAP, qm->cap_ver); writel(nfe, qm->io_base + SEC_RAS_NFE_REG); } static void sec_open_axi_master_ooo(struct hisi_qm *qm) { u32 val; val = readl(qm->io_base + SEC_CONTROL_REG); writel(val & SEC_AXI_SHUTDOWN_DISABLE, qm->io_base + SEC_CONTROL_REG); writel(val | SEC_AXI_SHUTDOWN_ENABLE, qm->io_base + SEC_CONTROL_REG); } static void sec_err_info_init(struct hisi_qm *qm) { struct hisi_qm_err_info *err_info = &qm->err_info; err_info->fe = SEC_RAS_FE_ENB_MSK; err_info->ce = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_CE_MASK_CAP, qm->cap_ver); err_info->nfe = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_NFE_MASK_CAP, qm->cap_ver); err_info->ecc_2bits_mask = SEC_CORE_INT_STATUS_M_ECC; err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver); err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_QM_RESET_MASK_CAP, qm->cap_ver); err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, sec_basic_info, SEC_RESET_MASK_CAP, qm->cap_ver); err_info->msi_wr_port = BIT(0); err_info->acpi_rst = "SRST"; } static const struct hisi_qm_err_ini sec_err_ini = { .hw_init = sec_set_user_domain_and_cache, .hw_err_enable = sec_hw_error_enable, .hw_err_disable = sec_hw_error_disable, .get_dev_hw_err_status = sec_get_hw_err_status, .clear_dev_hw_err_status = sec_clear_hw_err_status, .log_dev_hw_err = sec_log_hw_error, .open_axi_master_ooo = sec_open_axi_master_ooo, .open_sva_prefetch = sec_open_sva_prefetch, .close_sva_prefetch = sec_close_sva_prefetch, .show_last_dfx_regs = sec_show_last_dfx_regs, .err_info_init = sec_err_info_init, }; static int sec_pf_probe_init(struct sec_dev *sec) { struct hisi_qm *qm = &sec->qm; int ret; qm->err_ini = &sec_err_ini; qm->err_ini->err_info_init(qm); ret = sec_set_user_domain_and_cache(qm); if (ret) return ret; sec_open_sva_prefetch(qm); hisi_qm_dev_err_init(qm); sec_debug_regs_clear(qm); ret = sec_show_last_regs_init(qm); if (ret) pci_err(qm->pdev, "Failed to init last word regs!\n"); return ret; } static int sec_set_qm_algs(struct hisi_qm *qm) { struct device *dev = &qm->pdev->dev; char *algs, *ptr; u64 alg_mask; int i; if (!qm->use_sva) return 0; algs = devm_kzalloc(dev, SEC_DEV_ALG_MAX_LEN * sizeof(char), GFP_KERNEL); if (!algs) return -ENOMEM; alg_mask = sec_get_alg_bitmap(qm, SEC_DEV_ALG_BITMAP_HIGH, SEC_DEV_ALG_BITMAP_LOW); for (i = 0; i < ARRAY_SIZE(sec_dev_algs); i++) if (alg_mask & sec_dev_algs[i].alg_msk) strcat(algs, sec_dev_algs[i].algs); ptr = strrchr(algs, '\n'); if (ptr) *ptr = '\0'; qm->uacce->algs = algs; return 0; } static int sec_qm_init(struct hisi_qm *qm, struct pci_dev *pdev) { int ret; qm->pdev = pdev; qm->ver = pdev->revision; qm->mode = uacce_mode; qm->sqe_size = SEC_SQE_SIZE; qm->dev_name = sec_name; qm->fun_type = (pdev->device == PCI_DEVICE_ID_HUAWEI_SEC_PF) ? QM_HW_PF : QM_HW_VF; if (qm->fun_type == QM_HW_PF) { qm->qp_base = SEC_PF_DEF_Q_BASE; qm->qp_num = pf_q_num; qm->debug.curr_qm_qp_num = pf_q_num; qm->qm_list = &sec_devices; } else if (qm->fun_type == QM_HW_VF && qm->ver == QM_HW_V1) { /* * have no way to get qm configure in VM in v1 hardware, * so currently force PF to uses SEC_PF_DEF_Q_NUM, and force * to trigger only one VF in v1 hardware. * v2 hardware has no such problem. */ qm->qp_base = SEC_PF_DEF_Q_NUM; qm->qp_num = SEC_QUEUE_NUM_V1 - SEC_PF_DEF_Q_NUM; } ret = hisi_qm_init(qm); if (ret) { pci_err(qm->pdev, "Failed to init sec qm configures!\n"); return ret; } ret = sec_set_qm_algs(qm); if (ret) { pci_err(qm->pdev, "Failed to set sec algs!\n"); hisi_qm_uninit(qm); } return ret; } static void sec_qm_uninit(struct hisi_qm *qm) { hisi_qm_uninit(qm); } static int sec_probe_init(struct sec_dev *sec) { u32 type_rate = SEC_SHAPER_TYPE_RATE; struct hisi_qm *qm = &sec->qm; int ret; if (qm->fun_type == QM_HW_PF) { ret = sec_pf_probe_init(sec); if (ret) return ret; /* enable shaper type 0 */ if (qm->ver >= QM_HW_V3) { type_rate |= QM_SHAPER_ENABLE; qm->type_rate = type_rate; } } return 0; } static void sec_probe_uninit(struct hisi_qm *qm) { hisi_qm_dev_err_uninit(qm); } static void sec_iommu_used_check(struct sec_dev *sec) { struct iommu_domain *domain; struct device *dev = &sec->qm.pdev->dev; domain = iommu_get_domain_for_dev(dev); /* Check if iommu is used */ sec->iommu_used = false; if (domain) { if (domain->type & __IOMMU_DOMAIN_PAGING) sec->iommu_used = true; dev_info(dev, "SMMU Opened, the iommu type = %u\n", domain->type); } } static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct sec_dev *sec; struct hisi_qm *qm; int ret; sec = devm_kzalloc(&pdev->dev, sizeof(*sec), GFP_KERNEL); if (!sec) return -ENOMEM; qm = &sec->qm; ret = sec_qm_init(qm, pdev); if (ret) { pci_err(pdev, "Failed to init SEC QM (%d)!\n", ret); return ret; } sec->ctx_q_num = ctx_q_num; sec_iommu_used_check(sec); ret = sec_probe_init(sec); if (ret) { pci_err(pdev, "Failed to probe!\n"); goto err_qm_uninit; } ret = hisi_qm_start(qm); if (ret) { pci_err(pdev, "Failed to start sec qm!\n"); goto err_probe_uninit; } ret = sec_debugfs_init(qm); if (ret) pci_warn(pdev, "Failed to init debugfs!\n"); if (qm->qp_num >= ctx_q_num) { ret = hisi_qm_alg_register(qm, &sec_devices); if (ret < 0) { pr_err("Failed to register driver to crypto.\n"); goto err_qm_stop; } } else { pci_warn(qm->pdev, "Failed to use kernel mode, qp not enough!\n"); } if (qm->uacce) { ret = uacce_register(qm->uacce); if (ret) { pci_err(pdev, "failed to register uacce (%d)!\n", ret); goto err_alg_unregister; } } if (qm->fun_type == QM_HW_PF && vfs_num) { ret = hisi_qm_sriov_enable(pdev, vfs_num); if (ret < 0) goto err_alg_unregister; } hisi_qm_pm_init(qm); return 0; err_alg_unregister: if (qm->qp_num >= ctx_q_num) hisi_qm_alg_unregister(qm, &sec_devices); err_qm_stop: sec_debugfs_exit(qm); hisi_qm_stop(qm, QM_NORMAL); err_probe_uninit: sec_show_last_regs_uninit(qm); sec_probe_uninit(qm); err_qm_uninit: sec_qm_uninit(qm); return ret; } static void sec_remove(struct pci_dev *pdev) { struct hisi_qm *qm = pci_get_drvdata(pdev); hisi_qm_pm_uninit(qm); hisi_qm_wait_task_finish(qm, &sec_devices); if (qm->qp_num >= ctx_q_num) hisi_qm_alg_unregister(qm, &sec_devices); if (qm->fun_type == QM_HW_PF && qm->vfs_num) hisi_qm_sriov_disable(pdev, true); sec_debugfs_exit(qm); (void)hisi_qm_stop(qm, QM_NORMAL); if (qm->fun_type == QM_HW_PF) sec_debug_regs_clear(qm); sec_show_last_regs_uninit(qm); sec_probe_uninit(qm); sec_qm_uninit(qm); } static const struct dev_pm_ops sec_pm_ops = { SET_RUNTIME_PM_OPS(hisi_qm_suspend, hisi_qm_resume, NULL) }; static const struct pci_error_handlers sec_err_handler = { .error_detected = hisi_qm_dev_err_detected, .slot_reset = hisi_qm_dev_slot_reset, .reset_prepare = hisi_qm_reset_prepare, .reset_done = hisi_qm_reset_done, }; static struct pci_driver sec_pci_driver = { .name = "hisi_sec2", .id_table = sec_dev_ids, .probe = sec_probe, .remove = sec_remove, .err_handler = &sec_err_handler, .sriov_configure = hisi_qm_sriov_configure, .shutdown = hisi_qm_dev_shutdown, .driver.pm = &sec_pm_ops, }; struct pci_driver *hisi_sec_get_pf_driver(void) { return &sec_pci_driver; } EXPORT_SYMBOL_GPL(hisi_sec_get_pf_driver); static void sec_register_debugfs(void) { if (!debugfs_initialized()) return; sec_debugfs_root = debugfs_create_dir("hisi_sec2", NULL); } static void sec_unregister_debugfs(void) { debugfs_remove_recursive(sec_debugfs_root); } static int __init sec_init(void) { int ret; hisi_qm_init_list(&sec_devices); sec_register_debugfs(); ret = pci_register_driver(&sec_pci_driver); if (ret < 0) { sec_unregister_debugfs(); pr_err("Failed to register pci driver.\n"); return ret; } return 0; } static void __exit sec_exit(void) { pci_unregister_driver(&sec_pci_driver); sec_unregister_debugfs(); } module_init(sec_init); module_exit(sec_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Zaibo Xu <[email protected]>"); MODULE_AUTHOR("Longfang Liu <[email protected]>"); MODULE_AUTHOR("Kai Ye <[email protected]>"); MODULE_AUTHOR("Wei Zhang <[email protected]>"); MODULE_DESCRIPTION("Driver for HiSilicon SEC accelerator");
linux-master
drivers/crypto/hisilicon/sec2/sec_main.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2016 Broadcom */ #include <linux/kernel.h> #include <linux/string.h> #include "util.h" #include "spu.h" #include "spum.h" #include "cipher.h" char *hash_alg_name[] = { "None", "md5", "sha1", "sha224", "sha256", "aes", "sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" }; char *aead_alg_name[] = { "ccm(aes)", "gcm(aes)", "authenc" }; /* Assumes SPU-M messages are in big endian */ void spum_dump_msg_hdr(u8 *buf, unsigned int buf_len) { u8 *ptr = buf; struct SPUHEADER *spuh = (struct SPUHEADER *)buf; unsigned int hash_key_len = 0; unsigned int hash_state_len = 0; unsigned int cipher_key_len = 0; unsigned int iv_len; u32 pflags; u32 cflags; u32 ecf; u32 cipher_alg; u32 cipher_mode; u32 cipher_type; u32 hash_alg; u32 hash_mode; u32 hash_type; u32 sctx_size; /* SCTX length in words */ u32 sctx_pl_len; /* SCTX payload length in bytes */ packet_log("\n"); packet_log("SPU Message header %p len: %u\n", buf, buf_len); /* ========== Decode MH ========== */ packet_log(" MH 0x%08x\n", be32_to_cpup((__be32 *)ptr)); if (spuh->mh.flags & MH_SCTX_PRES) packet_log(" SCTX present\n"); if (spuh->mh.flags & MH_BDESC_PRES) packet_log(" BDESC present\n"); if (spuh->mh.flags & MH_MFM_PRES) packet_log(" MFM present\n"); if (spuh->mh.flags & MH_BD_PRES) packet_log(" BD present\n"); if (spuh->mh.flags & MH_HASH_PRES) packet_log(" HASH present\n"); if (spuh->mh.flags & MH_SUPDT_PRES) packet_log(" SUPDT present\n"); packet_log(" Opcode 0x%02x\n", spuh->mh.op_code); ptr += sizeof(spuh->mh) + sizeof(spuh->emh); /* skip emh. unused */ /* ========== Decode SCTX ========== */ if (spuh->mh.flags & MH_SCTX_PRES) { pflags = be32_to_cpu(spuh->sa.proto_flags); packet_log(" SCTX[0] 0x%08x\n", pflags); sctx_size = pflags & SCTX_SIZE; packet_log(" Size %u words\n", sctx_size); cflags = be32_to_cpu(spuh->sa.cipher_flags); packet_log(" SCTX[1] 0x%08x\n", cflags); packet_log(" Inbound:%lu (1:decrypt/vrfy 0:encrypt/auth)\n", (cflags & CIPHER_INBOUND) >> CIPHER_INBOUND_SHIFT); packet_log(" Order:%lu (1:AuthFirst 0:EncFirst)\n", (cflags & CIPHER_ORDER) >> CIPHER_ORDER_SHIFT); packet_log(" ICV_IS_512:%lx\n", (cflags & ICV_IS_512) >> ICV_IS_512_SHIFT); cipher_alg = (cflags & CIPHER_ALG) >> CIPHER_ALG_SHIFT; cipher_mode = (cflags & CIPHER_MODE) >> CIPHER_MODE_SHIFT; cipher_type = (cflags & CIPHER_TYPE) >> CIPHER_TYPE_SHIFT; packet_log(" Crypto Alg:%u Mode:%u Type:%u\n", cipher_alg, cipher_mode, cipher_type); hash_alg = (cflags & HASH_ALG) >> HASH_ALG_SHIFT; hash_mode = (cflags & HASH_MODE) >> HASH_MODE_SHIFT; hash_type = (cflags & HASH_TYPE) >> HASH_TYPE_SHIFT; packet_log(" Hash Alg:%x Mode:%x Type:%x\n", hash_alg, hash_mode, hash_type); packet_log(" UPDT_Offset:%u\n", cflags & UPDT_OFST); ecf = be32_to_cpu(spuh->sa.ecf); packet_log(" SCTX[2] 0x%08x\n", ecf); packet_log(" WriteICV:%lu CheckICV:%lu ICV_SIZE:%u ", (ecf & INSERT_ICV) >> INSERT_ICV_SHIFT, (ecf & CHECK_ICV) >> CHECK_ICV_SHIFT, (ecf & ICV_SIZE) >> ICV_SIZE_SHIFT); packet_log("BD_SUPPRESS:%lu\n", (ecf & BD_SUPPRESS) >> BD_SUPPRESS_SHIFT); packet_log(" SCTX_IV:%lu ExplicitIV:%lu GenIV:%lu ", (ecf & SCTX_IV) >> SCTX_IV_SHIFT, (ecf & EXPLICIT_IV) >> EXPLICIT_IV_SHIFT, (ecf & GEN_IV) >> GEN_IV_SHIFT); packet_log("IV_OV_OFST:%lu EXP_IV_SIZE:%u\n", (ecf & IV_OFFSET) >> IV_OFFSET_SHIFT, ecf & EXP_IV_SIZE); ptr += sizeof(struct SCTX); if (hash_alg && hash_mode) { char *name = "NONE"; switch (hash_alg) { case HASH_ALG_MD5: hash_key_len = 16; name = "MD5"; break; case HASH_ALG_SHA1: hash_key_len = 20; name = "SHA1"; break; case HASH_ALG_SHA224: hash_key_len = 28; name = "SHA224"; break; case HASH_ALG_SHA256: hash_key_len = 32; name = "SHA256"; break; case HASH_ALG_SHA384: hash_key_len = 48; name = "SHA384"; break; case HASH_ALG_SHA512: hash_key_len = 64; name = "SHA512"; break; case HASH_ALG_AES: hash_key_len = 0; name = "AES"; break; case HASH_ALG_NONE: break; } packet_log(" Auth Key Type:%s Length:%u Bytes\n", name, hash_key_len); packet_dump(" KEY: ", ptr, hash_key_len); ptr += hash_key_len; } else if ((hash_alg == HASH_ALG_AES) && (hash_mode == HASH_MODE_XCBC)) { char *name = "NONE"; switch (cipher_type) { case CIPHER_TYPE_AES128: hash_key_len = 16; name = "AES128-XCBC"; break; case CIPHER_TYPE_AES192: hash_key_len = 24; name = "AES192-XCBC"; break; case CIPHER_TYPE_AES256: hash_key_len = 32; name = "AES256-XCBC"; break; } packet_log(" Auth Key Type:%s Length:%u Bytes\n", name, hash_key_len); packet_dump(" KEY: ", ptr, hash_key_len); ptr += hash_key_len; } if (hash_alg && (hash_mode == HASH_MODE_NONE) && (hash_type == HASH_TYPE_UPDT)) { char *name = "NONE"; switch (hash_alg) { case HASH_ALG_MD5: hash_state_len = 16; name = "MD5"; break; case HASH_ALG_SHA1: hash_state_len = 20; name = "SHA1"; break; case HASH_ALG_SHA224: hash_state_len = 32; name = "SHA224"; break; case HASH_ALG_SHA256: hash_state_len = 32; name = "SHA256"; break; case HASH_ALG_SHA384: hash_state_len = 48; name = "SHA384"; break; case HASH_ALG_SHA512: hash_state_len = 64; name = "SHA512"; break; case HASH_ALG_AES: hash_state_len = 0; name = "AES"; break; case HASH_ALG_NONE: break; } packet_log(" Auth State Type:%s Length:%u Bytes\n", name, hash_state_len); packet_dump(" State: ", ptr, hash_state_len); ptr += hash_state_len; } if (cipher_alg) { char *name = "NONE"; switch (cipher_alg) { case CIPHER_ALG_DES: cipher_key_len = 8; name = "DES"; break; case CIPHER_ALG_3DES: cipher_key_len = 24; name = "3DES"; break; case CIPHER_ALG_AES: switch (cipher_type) { case CIPHER_TYPE_AES128: cipher_key_len = 16; name = "AES128"; break; case CIPHER_TYPE_AES192: cipher_key_len = 24; name = "AES192"; break; case CIPHER_TYPE_AES256: cipher_key_len = 32; name = "AES256"; break; } break; case CIPHER_ALG_NONE: break; } packet_log(" Cipher Key Type:%s Length:%u Bytes\n", name, cipher_key_len); /* XTS has two keys */ if (cipher_mode == CIPHER_MODE_XTS) { packet_dump(" KEY2: ", ptr, cipher_key_len); ptr += cipher_key_len; packet_dump(" KEY1: ", ptr, cipher_key_len); ptr += cipher_key_len; cipher_key_len *= 2; } else { packet_dump(" KEY: ", ptr, cipher_key_len); ptr += cipher_key_len; } if (ecf & SCTX_IV) { sctx_pl_len = sctx_size * sizeof(u32) - sizeof(struct SCTX); iv_len = sctx_pl_len - (hash_key_len + hash_state_len + cipher_key_len); packet_log(" IV Length:%u Bytes\n", iv_len); packet_dump(" IV: ", ptr, iv_len); ptr += iv_len; } } } /* ========== Decode BDESC ========== */ if (spuh->mh.flags & MH_BDESC_PRES) { struct BDESC_HEADER *bdesc = (struct BDESC_HEADER *)ptr; packet_log(" BDESC[0] 0x%08x\n", be32_to_cpup((__be32 *)ptr)); packet_log(" OffsetMAC:%u LengthMAC:%u\n", be16_to_cpu(bdesc->offset_mac), be16_to_cpu(bdesc->length_mac)); ptr += sizeof(u32); packet_log(" BDESC[1] 0x%08x\n", be32_to_cpup((__be32 *)ptr)); packet_log(" OffsetCrypto:%u LengthCrypto:%u\n", be16_to_cpu(bdesc->offset_crypto), be16_to_cpu(bdesc->length_crypto)); ptr += sizeof(u32); packet_log(" BDESC[2] 0x%08x\n", be32_to_cpup((__be32 *)ptr)); packet_log(" OffsetICV:%u OffsetIV:%u\n", be16_to_cpu(bdesc->offset_icv), be16_to_cpu(bdesc->offset_iv)); ptr += sizeof(u32); } /* ========== Decode BD ========== */ if (spuh->mh.flags & MH_BD_PRES) { struct BD_HEADER *bd = (struct BD_HEADER *)ptr; packet_log(" BD[0] 0x%08x\n", be32_to_cpup((__be32 *)ptr)); packet_log(" Size:%ubytes PrevLength:%u\n", be16_to_cpu(bd->size), be16_to_cpu(bd->prev_length)); ptr += 4; } /* Double check sanity */ if (buf + buf_len != ptr) { packet_log(" Packet parsed incorrectly. "); packet_log("buf:%p buf_len:%u buf+buf_len:%p ptr:%p\n", buf, buf_len, buf + buf_len, ptr); } packet_log("\n"); } /** * spum_ns2_ctx_max_payload() - Determine the max length of the payload for a * SPU message for a given cipher and hash alg context. * @cipher_alg: The cipher algorithm * @cipher_mode: The cipher mode * @blocksize: The size of a block of data for this algo * * The max payload must be a multiple of the blocksize so that if a request is * too large to fit in a single SPU message, the request can be broken into * max_payload sized chunks. Each chunk must be a multiple of blocksize. * * Return: Max payload length in bytes */ u32 spum_ns2_ctx_max_payload(enum spu_cipher_alg cipher_alg, enum spu_cipher_mode cipher_mode, unsigned int blocksize) { u32 max_payload = SPUM_NS2_MAX_PAYLOAD; u32 excess; /* In XTS on SPU-M, we'll need to insert tweak before input data */ if (cipher_mode == CIPHER_MODE_XTS) max_payload -= SPU_XTS_TWEAK_SIZE; excess = max_payload % blocksize; return max_payload - excess; } /** * spum_nsp_ctx_max_payload() - Determine the max length of the payload for a * SPU message for a given cipher and hash alg context. * @cipher_alg: The cipher algorithm * @cipher_mode: The cipher mode * @blocksize: The size of a block of data for this algo * * The max payload must be a multiple of the blocksize so that if a request is * too large to fit in a single SPU message, the request can be broken into * max_payload sized chunks. Each chunk must be a multiple of blocksize. * * Return: Max payload length in bytes */ u32 spum_nsp_ctx_max_payload(enum spu_cipher_alg cipher_alg, enum spu_cipher_mode cipher_mode, unsigned int blocksize) { u32 max_payload = SPUM_NSP_MAX_PAYLOAD; u32 excess; /* In XTS on SPU-M, we'll need to insert tweak before input data */ if (cipher_mode == CIPHER_MODE_XTS) max_payload -= SPU_XTS_TWEAK_SIZE; excess = max_payload % blocksize; return max_payload - excess; } /** spum_payload_length() - Given a SPU-M message header, extract the payload * length. * @spu_hdr: Start of SPU header * * Assumes just MH, EMH, BD (no SCTX, BDESC. Works for response frames. * * Return: payload length in bytes */ u32 spum_payload_length(u8 *spu_hdr) { struct BD_HEADER *bd; u32 pl_len; /* Find BD header. skip MH, EMH */ bd = (struct BD_HEADER *)(spu_hdr + 8); pl_len = be16_to_cpu(bd->size); return pl_len; } /** * spum_response_hdr_len() - Given the length of the hash key and encryption * key, determine the expected length of a SPU response header. * @auth_key_len: authentication key length (bytes) * @enc_key_len: encryption key length (bytes) * @is_hash: true if response message is for a hash operation * * Return: length of SPU response header (bytes) */ u16 spum_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash) { if (is_hash) return SPU_HASH_RESP_HDR_LEN; else return SPU_RESP_HDR_LEN; } /** * spum_hash_pad_len() - Calculate the length of hash padding required to extend * data to a full block size. * @hash_alg: hash algorithm * @hash_mode: hash mode * @chunksize: length of data, in bytes * @hash_block_size: size of a block of data for hash algorithm * * Reserve space for 1 byte (0x80) start of pad and the total length as u64 * * Return: length of hash pad in bytes */ u16 spum_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode, u32 chunksize, u16 hash_block_size) { unsigned int length_len; unsigned int used_space_last_block; int hash_pad_len; /* AES-XCBC hash requires just padding to next block boundary */ if ((hash_alg == HASH_ALG_AES) && (hash_mode == HASH_MODE_XCBC)) { used_space_last_block = chunksize % hash_block_size; hash_pad_len = hash_block_size - used_space_last_block; if (hash_pad_len >= hash_block_size) hash_pad_len -= hash_block_size; return hash_pad_len; } used_space_last_block = chunksize % hash_block_size + 1; if ((hash_alg == HASH_ALG_SHA384) || (hash_alg == HASH_ALG_SHA512)) length_len = 2 * sizeof(u64); else length_len = sizeof(u64); used_space_last_block += length_len; hash_pad_len = hash_block_size - used_space_last_block; if (hash_pad_len < 0) hash_pad_len += hash_block_size; hash_pad_len += 1 + length_len; return hash_pad_len; } /** * spum_gcm_ccm_pad_len() - Determine the required length of GCM or CCM padding. * @cipher_mode: Algo type * @data_size: Length of plaintext (bytes) * * Return: Length of padding, in bytes */ u32 spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode, unsigned int data_size) { u32 pad_len = 0; u32 m1 = SPU_GCM_CCM_ALIGN - 1; if ((cipher_mode == CIPHER_MODE_GCM) || (cipher_mode == CIPHER_MODE_CCM)) pad_len = ((data_size + m1) & ~m1) - data_size; return pad_len; } /** * spum_assoc_resp_len() - Determine the size of the receive buffer required to * catch associated data. * @cipher_mode: cipher mode * @assoc_len: length of associated data (bytes) * @iv_len: length of IV (bytes) * @is_encrypt: true if encrypting. false if decrypting. * * Return: length of associated data in response message (bytes) */ u32 spum_assoc_resp_len(enum spu_cipher_mode cipher_mode, unsigned int assoc_len, unsigned int iv_len, bool is_encrypt) { u32 buflen = 0; u32 pad; if (assoc_len) buflen = assoc_len; if (cipher_mode == CIPHER_MODE_GCM) { /* AAD needs to be padded in responses too */ pad = spum_gcm_ccm_pad_len(cipher_mode, buflen); buflen += pad; } if (cipher_mode == CIPHER_MODE_CCM) { /* * AAD needs to be padded in responses too * for CCM, len + 2 needs to be 128-bit aligned. */ pad = spum_gcm_ccm_pad_len(cipher_mode, buflen + 2); buflen += pad; } return buflen; } /** * spum_aead_ivlen() - Calculate the length of the AEAD IV to be included * in a SPU request after the AAD and before the payload. * @cipher_mode: cipher mode * @iv_len: initialization vector length in bytes * * In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need * to include the IV as a separate field in the SPU request msg. * * Return: Length of AEAD IV in bytes */ u8 spum_aead_ivlen(enum spu_cipher_mode cipher_mode, u16 iv_len) { return 0; } /** * spum_hash_type() - Determine the type of hash operation. * @src_sent: The number of bytes in the current request that have already * been sent to the SPU to be hashed. * * We do not use HASH_TYPE_FULL for requests that fit in a single SPU message. * Using FULL causes failures (such as when the string to be hashed is empty). * For similar reasons, we never use HASH_TYPE_FIN. Instead, submit messages * as INIT or UPDT and do the hash padding in sw. */ enum hash_type spum_hash_type(u32 src_sent) { return src_sent ? HASH_TYPE_UPDT : HASH_TYPE_INIT; } /** * spum_digest_size() - Determine the size of a hash digest to expect the SPU to * return. * @alg_digest_size: Number of bytes in the final digest for the given algo * @alg: The hash algorithm * @htype: Type of hash operation (init, update, full, etc) * * When doing incremental hashing for an algorithm with a truncated hash * (e.g., SHA224), the SPU returns the full digest so that it can be fed back as * a partial result for the next chunk. */ u32 spum_digest_size(u32 alg_digest_size, enum hash_alg alg, enum hash_type htype) { u32 digestsize = alg_digest_size; /* SPU returns complete digest when doing incremental hash and truncated * hash algo. */ if ((htype == HASH_TYPE_INIT) || (htype == HASH_TYPE_UPDT)) { if (alg == HASH_ALG_SHA224) digestsize = SHA256_DIGEST_SIZE; else if (alg == HASH_ALG_SHA384) digestsize = SHA512_DIGEST_SIZE; } return digestsize; } /** * spum_create_request() - Build a SPU request message header, up to and * including the BD header. Construct the message starting at spu_hdr. Caller * should allocate this buffer in DMA-able memory at least SPU_HEADER_ALLOC_LEN * bytes long. * @spu_hdr: Start of buffer where SPU request header is to be written * @req_opts: SPU request message options * @cipher_parms: Parameters related to cipher algorithm * @hash_parms: Parameters related to hash algorithm * @aead_parms: Parameters related to AEAD operation * @data_size: Length of data to be encrypted or authenticated. If AEAD, does * not include length of AAD. * * Return: the length of the SPU header in bytes. 0 if an error occurs. */ u32 spum_create_request(u8 *spu_hdr, struct spu_request_opts *req_opts, struct spu_cipher_parms *cipher_parms, struct spu_hash_parms *hash_parms, struct spu_aead_parms *aead_parms, unsigned int data_size) { struct SPUHEADER *spuh; struct BDESC_HEADER *bdesc; struct BD_HEADER *bd; u8 *ptr; u32 protocol_bits = 0; u32 cipher_bits = 0; u32 ecf_bits = 0; u8 sctx_words = 0; unsigned int buf_len = 0; /* size of the cipher payload */ unsigned int cipher_len = hash_parms->prebuf_len + data_size + hash_parms->pad_len; /* offset of prebuf or data from end of BD header */ unsigned int cipher_offset = aead_parms->assoc_size + aead_parms->iv_len + aead_parms->aad_pad_len; /* total size of the DB data (without STAT word padding) */ unsigned int real_db_size = spu_real_db_size(aead_parms->assoc_size, aead_parms->iv_len, hash_parms->prebuf_len, data_size, aead_parms->aad_pad_len, aead_parms->data_pad_len, hash_parms->pad_len); unsigned int auth_offset = 0; unsigned int offset_iv = 0; /* size/offset of the auth payload */ unsigned int auth_len; auth_len = real_db_size; if (req_opts->is_aead && req_opts->is_inbound) cipher_len -= hash_parms->digestsize; if (req_opts->is_aead && req_opts->is_inbound) auth_len -= hash_parms->digestsize; if ((hash_parms->alg == HASH_ALG_AES) && (hash_parms->mode == HASH_MODE_XCBC)) { auth_len -= hash_parms->pad_len; cipher_len -= hash_parms->pad_len; } flow_log("%s()\n", __func__); flow_log(" in:%u authFirst:%u\n", req_opts->is_inbound, req_opts->auth_first); flow_log(" %s. cipher alg:%u mode:%u type %u\n", spu_alg_name(cipher_parms->alg, cipher_parms->mode), cipher_parms->alg, cipher_parms->mode, cipher_parms->type); flow_log(" key: %d\n", cipher_parms->key_len); flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len); flow_log(" iv: %d\n", cipher_parms->iv_len); flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len); flow_log(" auth alg:%u mode:%u type %u\n", hash_parms->alg, hash_parms->mode, hash_parms->type); flow_log(" digestsize: %u\n", hash_parms->digestsize); flow_log(" authkey: %d\n", hash_parms->key_len); flow_dump(" authkey: ", hash_parms->key_buf, hash_parms->key_len); flow_log(" assoc_size:%u\n", aead_parms->assoc_size); flow_log(" prebuf_len:%u\n", hash_parms->prebuf_len); flow_log(" data_size:%u\n", data_size); flow_log(" hash_pad_len:%u\n", hash_parms->pad_len); flow_log(" real_db_size:%u\n", real_db_size); flow_log(" auth_offset:%u auth_len:%u cipher_offset:%u cipher_len:%u\n", auth_offset, auth_len, cipher_offset, cipher_len); flow_log(" aead_iv: %u\n", aead_parms->iv_len); /* starting out: zero the header (plus some) */ ptr = spu_hdr; memset(ptr, 0, sizeof(struct SPUHEADER)); /* format master header word */ /* Do not set the next bit even though the datasheet says to */ spuh = (struct SPUHEADER *)ptr; ptr += sizeof(struct SPUHEADER); buf_len += sizeof(struct SPUHEADER); spuh->mh.op_code = SPU_CRYPTO_OPERATION_GENERIC; spuh->mh.flags |= (MH_SCTX_PRES | MH_BDESC_PRES | MH_BD_PRES); /* Format sctx word 0 (protocol_bits) */ sctx_words = 3; /* size in words */ /* Format sctx word 1 (cipher_bits) */ if (req_opts->is_inbound) cipher_bits |= CIPHER_INBOUND; if (req_opts->auth_first) cipher_bits |= CIPHER_ORDER; /* Set the crypto parameters in the cipher.flags */ cipher_bits |= cipher_parms->alg << CIPHER_ALG_SHIFT; cipher_bits |= cipher_parms->mode << CIPHER_MODE_SHIFT; cipher_bits |= cipher_parms->type << CIPHER_TYPE_SHIFT; /* Set the auth parameters in the cipher.flags */ cipher_bits |= hash_parms->alg << HASH_ALG_SHIFT; cipher_bits |= hash_parms->mode << HASH_MODE_SHIFT; cipher_bits |= hash_parms->type << HASH_TYPE_SHIFT; /* * Format sctx extensions if required, and update main fields if * required) */ if (hash_parms->alg) { /* Write the authentication key material if present */ if (hash_parms->key_len) { memcpy(ptr, hash_parms->key_buf, hash_parms->key_len); ptr += hash_parms->key_len; buf_len += hash_parms->key_len; sctx_words += hash_parms->key_len / 4; } if ((cipher_parms->mode == CIPHER_MODE_GCM) || (cipher_parms->mode == CIPHER_MODE_CCM)) /* unpadded length */ offset_iv = aead_parms->assoc_size; /* if GCM/CCM we need to write ICV into the payload */ if (!req_opts->is_inbound) { if ((cipher_parms->mode == CIPHER_MODE_GCM) || (cipher_parms->mode == CIPHER_MODE_CCM)) ecf_bits |= 1 << INSERT_ICV_SHIFT; } else { ecf_bits |= CHECK_ICV; } /* Inform the SPU of the ICV size (in words) */ if (hash_parms->digestsize == 64) cipher_bits |= ICV_IS_512; else ecf_bits |= (hash_parms->digestsize / 4) << ICV_SIZE_SHIFT; } if (req_opts->bd_suppress) ecf_bits |= BD_SUPPRESS; /* copy the encryption keys in the SAD entry */ if (cipher_parms->alg) { if (cipher_parms->key_len) { memcpy(ptr, cipher_parms->key_buf, cipher_parms->key_len); ptr += cipher_parms->key_len; buf_len += cipher_parms->key_len; sctx_words += cipher_parms->key_len / 4; } /* * if encrypting then set IV size, use SCTX IV unless no IV * given here */ if (cipher_parms->iv_buf && cipher_parms->iv_len) { /* Use SCTX IV */ ecf_bits |= SCTX_IV; /* cipher iv provided so put it in here */ memcpy(ptr, cipher_parms->iv_buf, cipher_parms->iv_len); ptr += cipher_parms->iv_len; buf_len += cipher_parms->iv_len; sctx_words += cipher_parms->iv_len / 4; } } /* * RFC4543 (GMAC/ESP) requires data to be sent as part of AAD * so we need to override the BDESC parameters. */ if (req_opts->is_rfc4543) { if (req_opts->is_inbound) data_size -= hash_parms->digestsize; offset_iv = aead_parms->assoc_size + data_size; cipher_len = 0; cipher_offset = offset_iv; auth_len = cipher_offset + aead_parms->data_pad_len; } /* write in the total sctx length now that we know it */ protocol_bits |= sctx_words; /* Endian adjust the SCTX */ spuh->sa.proto_flags = cpu_to_be32(protocol_bits); spuh->sa.cipher_flags = cpu_to_be32(cipher_bits); spuh->sa.ecf = cpu_to_be32(ecf_bits); /* === create the BDESC section === */ bdesc = (struct BDESC_HEADER *)ptr; bdesc->offset_mac = cpu_to_be16(auth_offset); bdesc->length_mac = cpu_to_be16(auth_len); bdesc->offset_crypto = cpu_to_be16(cipher_offset); bdesc->length_crypto = cpu_to_be16(cipher_len); /* * CCM in SPU-M requires that ICV not be in same 32-bit word as data or * padding. So account for padding as necessary. */ if (cipher_parms->mode == CIPHER_MODE_CCM) auth_len += spum_wordalign_padlen(auth_len); bdesc->offset_icv = cpu_to_be16(auth_len); bdesc->offset_iv = cpu_to_be16(offset_iv); ptr += sizeof(struct BDESC_HEADER); buf_len += sizeof(struct BDESC_HEADER); /* === no MFM section === */ /* === create the BD section === */ /* add the BD header */ bd = (struct BD_HEADER *)ptr; bd->size = cpu_to_be16(real_db_size); bd->prev_length = 0; ptr += sizeof(struct BD_HEADER); buf_len += sizeof(struct BD_HEADER); packet_dump(" SPU request header: ", spu_hdr, buf_len); return buf_len; } /** * spum_cipher_req_init() - Build a SPU request message header, up to and * including the BD header. * @spu_hdr: Start of SPU request header (MH) * @cipher_parms: Parameters that describe the cipher request * * Construct the message starting at spu_hdr. Caller should allocate this buffer * in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long. * * Return: the length of the SPU header in bytes. 0 if an error occurs. */ u16 spum_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms) { struct SPUHEADER *spuh; u32 protocol_bits = 0; u32 cipher_bits = 0; u32 ecf_bits = 0; u8 sctx_words = 0; u8 *ptr = spu_hdr; flow_log("%s()\n", __func__); flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg, cipher_parms->mode, cipher_parms->type); flow_log(" cipher_iv_len: %u\n", cipher_parms->iv_len); flow_log(" key: %d\n", cipher_parms->key_len); flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len); /* starting out: zero the header (plus some) */ memset(spu_hdr, 0, sizeof(struct SPUHEADER)); ptr += sizeof(struct SPUHEADER); /* format master header word */ /* Do not set the next bit even though the datasheet says to */ spuh = (struct SPUHEADER *)spu_hdr; spuh->mh.op_code = SPU_CRYPTO_OPERATION_GENERIC; spuh->mh.flags |= (MH_SCTX_PRES | MH_BDESC_PRES | MH_BD_PRES); /* Format sctx word 0 (protocol_bits) */ sctx_words = 3; /* size in words */ /* copy the encryption keys in the SAD entry */ if (cipher_parms->alg) { if (cipher_parms->key_len) { ptr += cipher_parms->key_len; sctx_words += cipher_parms->key_len / 4; } /* * if encrypting then set IV size, use SCTX IV unless no IV * given here */ if (cipher_parms->iv_len) { /* Use SCTX IV */ ecf_bits |= SCTX_IV; ptr += cipher_parms->iv_len; sctx_words += cipher_parms->iv_len / 4; } } /* Set the crypto parameters in the cipher.flags */ cipher_bits |= cipher_parms->alg << CIPHER_ALG_SHIFT; cipher_bits |= cipher_parms->mode << CIPHER_MODE_SHIFT; cipher_bits |= cipher_parms->type << CIPHER_TYPE_SHIFT; /* copy the encryption keys in the SAD entry */ if (cipher_parms->alg && cipher_parms->key_len) memcpy(spuh + 1, cipher_parms->key_buf, cipher_parms->key_len); /* write in the total sctx length now that we know it */ protocol_bits |= sctx_words; /* Endian adjust the SCTX */ spuh->sa.proto_flags = cpu_to_be32(protocol_bits); /* Endian adjust the SCTX */ spuh->sa.cipher_flags = cpu_to_be32(cipher_bits); spuh->sa.ecf = cpu_to_be32(ecf_bits); packet_dump(" SPU request header: ", spu_hdr, sizeof(struct SPUHEADER)); return sizeof(struct SPUHEADER) + cipher_parms->key_len + cipher_parms->iv_len + sizeof(struct BDESC_HEADER) + sizeof(struct BD_HEADER); } /** * spum_cipher_req_finish() - Finish building a SPU request message header for a * block cipher request. Assumes much of the header was already filled in at * setkey() time in spu_cipher_req_init(). * @spu_hdr: Start of the request message header (MH field) * @spu_req_hdr_len: Length in bytes of the SPU request header * @is_inbound: 0 encrypt, 1 decrypt * @cipher_parms: Parameters describing cipher operation to be performed * @data_size: Length of the data in the BD field * * Assumes much of the header was already filled in at setkey() time in * spum_cipher_req_init(). * spum_cipher_req_init() fills in the encryption key. */ void spum_cipher_req_finish(u8 *spu_hdr, u16 spu_req_hdr_len, unsigned int is_inbound, struct spu_cipher_parms *cipher_parms, unsigned int data_size) { struct SPUHEADER *spuh; struct BDESC_HEADER *bdesc; struct BD_HEADER *bd; u8 *bdesc_ptr = spu_hdr + spu_req_hdr_len - (sizeof(struct BD_HEADER) + sizeof(struct BDESC_HEADER)); u32 cipher_bits; flow_log("%s()\n", __func__); flow_log(" in: %u\n", is_inbound); flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg, cipher_parms->type); /* * In XTS mode, API puts "i" parameter (block tweak) in IV. For * SPU-M, should be in start of the BD; tx_sg_create() copies it there. * IV in SPU msg for SPU-M should be 0, since that's the "j" parameter * (block ctr within larger data unit) - given we can send entire disk * block (<= 4KB) in 1 SPU msg, don't need to use this parameter. */ if (cipher_parms->mode == CIPHER_MODE_XTS) memset(cipher_parms->iv_buf, 0, cipher_parms->iv_len); flow_log(" iv len: %d\n", cipher_parms->iv_len); flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len); flow_log(" data_size: %u\n", data_size); /* format master header word */ /* Do not set the next bit even though the datasheet says to */ spuh = (struct SPUHEADER *)spu_hdr; /* cipher_bits was initialized at setkey time */ cipher_bits = be32_to_cpu(spuh->sa.cipher_flags); /* Format sctx word 1 (cipher_bits) */ if (is_inbound) cipher_bits |= CIPHER_INBOUND; else cipher_bits &= ~CIPHER_INBOUND; if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len) /* cipher iv provided so put it in here */ memcpy(bdesc_ptr - cipher_parms->iv_len, cipher_parms->iv_buf, cipher_parms->iv_len); spuh->sa.cipher_flags = cpu_to_be32(cipher_bits); /* === create the BDESC section === */ bdesc = (struct BDESC_HEADER *)bdesc_ptr; bdesc->offset_mac = 0; bdesc->length_mac = 0; bdesc->offset_crypto = 0; /* XTS mode, data_size needs to include tweak parameter */ if (cipher_parms->mode == CIPHER_MODE_XTS) bdesc->length_crypto = cpu_to_be16(data_size + SPU_XTS_TWEAK_SIZE); else bdesc->length_crypto = cpu_to_be16(data_size); bdesc->offset_icv = 0; bdesc->offset_iv = 0; /* === no MFM section === */ /* === create the BD section === */ /* add the BD header */ bd = (struct BD_HEADER *)(bdesc_ptr + sizeof(struct BDESC_HEADER)); bd->size = cpu_to_be16(data_size); /* XTS mode, data_size needs to include tweak parameter */ if (cipher_parms->mode == CIPHER_MODE_XTS) bd->size = cpu_to_be16(data_size + SPU_XTS_TWEAK_SIZE); else bd->size = cpu_to_be16(data_size); bd->prev_length = 0; packet_dump(" SPU request header: ", spu_hdr, spu_req_hdr_len); } /** * spum_request_pad() - Create pad bytes at the end of the data. * @pad_start: Start of buffer where pad bytes are to be written * @gcm_ccm_padding: length of GCM/CCM padding, in bytes * @hash_pad_len: Number of bytes of padding extend data to full block * @auth_alg: authentication algorithm * @auth_mode: authentication mode * @total_sent: length inserted at end of hash pad * @status_padding: Number of bytes of padding to align STATUS word * * There may be three forms of pad: * 1. GCM/CCM pad - for GCM/CCM mode ciphers, pad to 16-byte alignment * 2. hash pad - pad to a block length, with 0x80 data terminator and * size at the end * 3. STAT pad - to ensure the STAT field is 4-byte aligned */ void spum_request_pad(u8 *pad_start, u32 gcm_ccm_padding, u32 hash_pad_len, enum hash_alg auth_alg, enum hash_mode auth_mode, unsigned int total_sent, u32 status_padding) { u8 *ptr = pad_start; /* fix data alignent for GCM/CCM */ if (gcm_ccm_padding > 0) { flow_log(" GCM: padding to 16 byte alignment: %u bytes\n", gcm_ccm_padding); memset(ptr, 0, gcm_ccm_padding); ptr += gcm_ccm_padding; } if (hash_pad_len > 0) { /* clear the padding section */ memset(ptr, 0, hash_pad_len); if ((auth_alg == HASH_ALG_AES) && (auth_mode == HASH_MODE_XCBC)) { /* AES/XCBC just requires padding to be 0s */ ptr += hash_pad_len; } else { /* terminate the data */ *ptr = 0x80; ptr += (hash_pad_len - sizeof(u64)); /* add the size at the end as required per alg */ if (auth_alg == HASH_ALG_MD5) *(__le64 *)ptr = cpu_to_le64(total_sent * 8ull); else /* SHA1, SHA2-224, SHA2-256 */ *(__be64 *)ptr = cpu_to_be64(total_sent * 8ull); ptr += sizeof(u64); } } /* pad to a 4byte alignment for STAT */ if (status_padding > 0) { flow_log(" STAT: padding to 4 byte alignment: %u bytes\n", status_padding); memset(ptr, 0, status_padding); ptr += status_padding; } } /** * spum_xts_tweak_in_payload() - Indicate that SPUM DOES place the XTS tweak * field in the packet payload (rather than using IV) * * Return: 1 */ u8 spum_xts_tweak_in_payload(void) { return 1; } /** * spum_tx_status_len() - Return the length of the STATUS field in a SPU * response message. * * Return: Length of STATUS field in bytes. */ u8 spum_tx_status_len(void) { return SPU_TX_STATUS_LEN; } /** * spum_rx_status_len() - Return the length of the STATUS field in a SPU * response message. * * Return: Length of STATUS field in bytes. */ u8 spum_rx_status_len(void) { return SPU_RX_STATUS_LEN; } /** * spum_status_process() - Process the status from a SPU response message. * @statp: start of STATUS word * Return: * 0 - if status is good and response should be processed * !0 - status indicates an error and response is invalid */ int spum_status_process(u8 *statp) { u32 status; status = __be32_to_cpu(*(__be32 *)statp); flow_log("SPU response STATUS %#08x\n", status); if (status & SPU_STATUS_ERROR_FLAG) { pr_err("%s() Warning: Error result from SPU: %#08x\n", __func__, status); if (status & SPU_STATUS_INVALID_ICV) return SPU_INVALID_ICV; return -EBADMSG; } return 0; } /** * spum_ccm_update_iv() - Update the IV as per the requirements for CCM mode. * * @digestsize: Digest size of this request * @cipher_parms: (pointer to) cipher parmaeters, includes IV buf & IV len * @assoclen: Length of AAD data * @chunksize: length of input data to be sent in this req * @is_encrypt: true if this is an output/encrypt operation * @is_esp: true if this is an ESP / RFC4309 operation * */ void spum_ccm_update_iv(unsigned int digestsize, struct spu_cipher_parms *cipher_parms, unsigned int assoclen, unsigned int chunksize, bool is_encrypt, bool is_esp) { u8 L; /* L from CCM algorithm, length of plaintext data */ u8 mprime; /* M' from CCM algo, (M - 2) / 2, where M=authsize */ u8 adata; if (cipher_parms->iv_len != CCM_AES_IV_SIZE) { pr_err("%s(): Invalid IV len %d for CCM mode, should be %d\n", __func__, cipher_parms->iv_len, CCM_AES_IV_SIZE); return; } /* * IV needs to be formatted as follows: * * | Byte 0 | Bytes 1 - N | Bytes (N+1) - 15 | * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | Bits 7 - 0 | Bits 7 - 0 | * | 0 |Ad?|(M - 2) / 2| L - 1 | Nonce | Plaintext Length | * * Ad? = 1 if AAD present, 0 if not present * M = size of auth field, 8, 12, or 16 bytes (SPU-M) -or- * 4, 6, 8, 10, 12, 14, 16 bytes (SPU2) * L = Size of Plaintext Length field; Nonce size = 15 - L * * It appears that the crypto API already expects the L-1 portion * to be set in the first byte of the IV, which implicitly determines * the nonce size, and also fills in the nonce. But the other bits * in byte 0 as well as the plaintext length need to be filled in. * * In rfc4309/esp mode, L is not already in the supplied IV and * we need to fill it in, as well as move the IV data to be after * the salt */ if (is_esp) { L = CCM_ESP_L_VALUE; /* RFC4309 has fixed L */ } else { /* L' = plaintext length - 1 so Plaintext length is L' + 1 */ L = ((cipher_parms->iv_buf[0] & CCM_B0_L_PRIME) >> CCM_B0_L_PRIME_SHIFT) + 1; } mprime = (digestsize - 2) >> 1; /* M' = (M - 2) / 2 */ adata = (assoclen > 0); /* adata = 1 if any associated data */ cipher_parms->iv_buf[0] = (adata << CCM_B0_ADATA_SHIFT) | (mprime << CCM_B0_M_PRIME_SHIFT) | ((L - 1) << CCM_B0_L_PRIME_SHIFT); /* Nonce is already filled in by crypto API, and is 15 - L bytes */ /* Don't include digest in plaintext size when decrypting */ if (!is_encrypt) chunksize -= digestsize; /* Fill in length of plaintext, formatted to be L bytes long */ format_value_ccm(chunksize, &cipher_parms->iv_buf[15 - L + 1], L); } /** * spum_wordalign_padlen() - Given the length of a data field, determine the * padding required to align the data following this field on a 4-byte boundary. * @data_size: length of data field in bytes * * Return: length of status field padding, in bytes */ u32 spum_wordalign_padlen(u32 data_size) { return ((data_size + 3) & ~3) - data_size; }
linux-master
drivers/crypto/bcm/spu.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2016 Broadcom */ #include <linux/debugfs.h> #include "cipher.h" #include "util.h" /* offset of SPU_OFIFO_CTRL register */ #define SPU_OFIFO_CTRL 0x40 #define SPU_FIFO_WATERMARK 0x1FF /** * spu_sg_at_offset() - Find the scatterlist entry at a given distance from the * start of a scatterlist. * @sg: [in] Start of a scatterlist * @skip: [in] Distance from the start of the scatterlist, in bytes * @sge: [out] Scatterlist entry at skip bytes from start * @sge_offset: [out] Number of bytes from start of sge buffer to get to * requested distance. * * Return: 0 if entry found at requested distance * < 0 otherwise */ int spu_sg_at_offset(struct scatterlist *sg, unsigned int skip, struct scatterlist **sge, unsigned int *sge_offset) { /* byte index from start of sg to the end of the previous entry */ unsigned int index = 0; /* byte index from start of sg to the end of the current entry */ unsigned int next_index; next_index = sg->length; while (next_index <= skip) { sg = sg_next(sg); index = next_index; if (!sg) return -EINVAL; next_index += sg->length; } *sge_offset = skip - index; *sge = sg; return 0; } /* Copy len bytes of sg data, starting at offset skip, to a dest buffer */ void sg_copy_part_to_buf(struct scatterlist *src, u8 *dest, unsigned int len, unsigned int skip) { size_t copied; unsigned int nents = sg_nents(src); copied = sg_pcopy_to_buffer(src, nents, dest, len, skip); if (copied != len) { flow_log("%s copied %u bytes of %u requested. ", __func__, (u32)copied, len); flow_log("sg with %u entries and skip %u\n", nents, skip); } } /* * Copy data into a scatterlist starting at a specified offset in the * scatterlist. Specifically, copy len bytes of data in the buffer src * into the scatterlist dest, starting skip bytes into the scatterlist. */ void sg_copy_part_from_buf(struct scatterlist *dest, u8 *src, unsigned int len, unsigned int skip) { size_t copied; unsigned int nents = sg_nents(dest); copied = sg_pcopy_from_buffer(dest, nents, src, len, skip); if (copied != len) { flow_log("%s copied %u bytes of %u requested. ", __func__, (u32)copied, len); flow_log("sg with %u entries and skip %u\n", nents, skip); } } /** * spu_sg_count() - Determine number of elements in scatterlist to provide a * specified number of bytes. * @sg_list: scatterlist to examine * @skip: index of starting point * @nbytes: consider elements of scatterlist until reaching this number of * bytes * * Return: the number of sg entries contributing to nbytes of data */ int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes) { struct scatterlist *sg; int sg_nents = 0; unsigned int offset; if (!sg_list) return 0; if (spu_sg_at_offset(sg_list, skip, &sg, &offset) < 0) return 0; while (sg && (nbytes > 0)) { sg_nents++; nbytes -= (sg->length - offset); offset = 0; sg = sg_next(sg); } return sg_nents; } /** * spu_msg_sg_add() - Copy scatterlist entries from one sg to another, up to a * given length. * @to_sg: scatterlist to copy to * @from_sg: scatterlist to copy from * @from_skip: number of bytes to skip in from_sg. Non-zero when previous * request included part of the buffer in entry in from_sg. * Assumes from_skip < from_sg->length. * @from_nents: number of entries in from_sg * @length: number of bytes to copy. may reach this limit before exhausting * from_sg. * * Copies the entries themselves, not the data in the entries. Assumes to_sg has * enough entries. Does not limit the size of an individual buffer in to_sg. * * to_sg, from_sg, skip are all updated to end of copy * * Return: Number of bytes copied */ u32 spu_msg_sg_add(struct scatterlist **to_sg, struct scatterlist **from_sg, u32 *from_skip, u8 from_nents, u32 length) { struct scatterlist *sg; /* an entry in from_sg */ struct scatterlist *to = *to_sg; struct scatterlist *from = *from_sg; u32 skip = *from_skip; u32 offset; int i; u32 entry_len = 0; u32 frag_len = 0; /* length of entry added to to_sg */ u32 copied = 0; /* number of bytes copied so far */ if (length == 0) return 0; for_each_sg(from, sg, from_nents, i) { /* number of bytes in this from entry not yet used */ entry_len = sg->length - skip; frag_len = min(entry_len, length - copied); offset = sg->offset + skip; if (frag_len) sg_set_page(to++, sg_page(sg), frag_len, offset); copied += frag_len; if (copied == entry_len) { /* used up all of from entry */ skip = 0; /* start at beginning of next entry */ } if (copied == length) break; } *to_sg = to; *from_sg = sg; if (frag_len < entry_len) *from_skip = skip + frag_len; else *from_skip = 0; return copied; } void add_to_ctr(u8 *ctr_pos, unsigned int increment) { __be64 *high_be = (__be64 *)ctr_pos; __be64 *low_be = high_be + 1; u64 orig_low = __be64_to_cpu(*low_be); u64 new_low = orig_low + (u64)increment; *low_be = __cpu_to_be64(new_low); if (new_low < orig_low) /* there was a carry from the low 8 bytes */ *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1); } struct sdesc { struct shash_desc shash; char ctx[]; }; /** * do_shash() - Do a synchronous hash operation in software * @name: The name of the hash algorithm * @result: Buffer where digest is to be written * @data1: First part of data to hash. May be NULL. * @data1_len: Length of data1, in bytes * @data2: Second part of data to hash. May be NULL. * @data2_len: Length of data2, in bytes * @key: Key (if keyed hash) * @key_len: Length of key, in bytes (or 0 if non-keyed hash) * * Note that the crypto API will not select this driver's own transform because * this driver only registers asynchronous algos. * * Return: 0 if hash successfully stored in result * < 0 otherwise */ int do_shash(unsigned char *name, unsigned char *result, const u8 *data1, unsigned int data1_len, const u8 *data2, unsigned int data2_len, const u8 *key, unsigned int key_len) { int rc; unsigned int size; struct crypto_shash *hash; struct sdesc *sdesc; hash = crypto_alloc_shash(name, 0, 0); if (IS_ERR(hash)) { rc = PTR_ERR(hash); pr_err("%s: Crypto %s allocation error %d\n", __func__, name, rc); return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(hash); sdesc = kmalloc(size, GFP_KERNEL); if (!sdesc) { rc = -ENOMEM; goto do_shash_err; } sdesc->shash.tfm = hash; if (key_len > 0) { rc = crypto_shash_setkey(hash, key, key_len); if (rc) { pr_err("%s: Could not setkey %s shash\n", __func__, name); goto do_shash_err; } } rc = crypto_shash_init(&sdesc->shash); if (rc) { pr_err("%s: Could not init %s shash\n", __func__, name); goto do_shash_err; } rc = crypto_shash_update(&sdesc->shash, data1, data1_len); if (rc) { pr_err("%s: Could not update1\n", __func__); goto do_shash_err; } if (data2 && data2_len) { rc = crypto_shash_update(&sdesc->shash, data2, data2_len); if (rc) { pr_err("%s: Could not update2\n", __func__); goto do_shash_err; } } rc = crypto_shash_final(&sdesc->shash, result); if (rc) pr_err("%s: Could not generate %s hash\n", __func__, name); do_shash_err: crypto_free_shash(hash); kfree(sdesc); return rc; } #ifdef DEBUG /* Dump len bytes of a scatterlist starting at skip bytes into the sg */ void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len) { u8 dbuf[16]; unsigned int idx = skip; unsigned int num_out = 0; /* number of bytes dumped so far */ unsigned int count; if (packet_debug_logging) { while (num_out < len) { count = (len - num_out > 16) ? 16 : len - num_out; sg_copy_part_to_buf(sg, dbuf, count, idx); num_out += count; print_hex_dump(KERN_ALERT, " sg: ", DUMP_PREFIX_NONE, 4, 1, dbuf, count, false); idx += 16; } } if (debug_logging_sleep) msleep(debug_logging_sleep); } #endif /* Returns the name for a given cipher alg/mode */ char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode) { switch (alg) { case CIPHER_ALG_RC4: return "rc4"; case CIPHER_ALG_AES: switch (mode) { case CIPHER_MODE_CBC: return "cbc(aes)"; case CIPHER_MODE_ECB: return "ecb(aes)"; case CIPHER_MODE_OFB: return "ofb(aes)"; case CIPHER_MODE_CFB: return "cfb(aes)"; case CIPHER_MODE_CTR: return "ctr(aes)"; case CIPHER_MODE_XTS: return "xts(aes)"; case CIPHER_MODE_GCM: return "gcm(aes)"; default: return "aes"; } break; case CIPHER_ALG_DES: switch (mode) { case CIPHER_MODE_CBC: return "cbc(des)"; case CIPHER_MODE_ECB: return "ecb(des)"; case CIPHER_MODE_CTR: return "ctr(des)"; default: return "des"; } break; case CIPHER_ALG_3DES: switch (mode) { case CIPHER_MODE_CBC: return "cbc(des3_ede)"; case CIPHER_MODE_ECB: return "ecb(des3_ede)"; case CIPHER_MODE_CTR: return "ctr(des3_ede)"; default: return "3des"; } break; default: return "other"; } } static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp) { struct bcm_device_private *ipriv; char *buf; ssize_t ret, out_offset, out_count; int i; u32 fifo_len; u32 spu_ofifo_ctrl; u32 alg; u32 mode; u32 op_cnt; out_count = 2048; buf = kmalloc(out_count, GFP_KERNEL); if (!buf) return -ENOMEM; ipriv = filp->private_data; out_offset = 0; out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Number of SPUs.........%u\n", ipriv->spu.num_spu); out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Current sessions.......%u\n", atomic_read(&ipriv->session_count)); out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Session count..........%u\n", atomic_read(&ipriv->stream_count)); out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Cipher setkey..........%u\n", atomic_read(&ipriv->setkey_cnt[SPU_OP_CIPHER])); out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Cipher Ops.............%u\n", atomic_read(&ipriv->op_counts[SPU_OP_CIPHER])); for (alg = 0; alg < CIPHER_ALG_LAST; alg++) { for (mode = 0; mode < CIPHER_MODE_LAST; mode++) { op_cnt = atomic_read(&ipriv->cipher_cnt[alg][mode]); if (op_cnt) { out_offset += scnprintf(buf + out_offset, out_count - out_offset, " %-13s%11u\n", spu_alg_name(alg, mode), op_cnt); } } } out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Hash Ops...............%u\n", atomic_read(&ipriv->op_counts[SPU_OP_HASH])); for (alg = 0; alg < HASH_ALG_LAST; alg++) { op_cnt = atomic_read(&ipriv->hash_cnt[alg]); if (op_cnt) { out_offset += scnprintf(buf + out_offset, out_count - out_offset, " %-13s%11u\n", hash_alg_name[alg], op_cnt); } } out_offset += scnprintf(buf + out_offset, out_count - out_offset, "HMAC setkey............%u\n", atomic_read(&ipriv->setkey_cnt[SPU_OP_HMAC])); out_offset += scnprintf(buf + out_offset, out_count - out_offset, "HMAC Ops...............%u\n", atomic_read(&ipriv->op_counts[SPU_OP_HMAC])); for (alg = 0; alg < HASH_ALG_LAST; alg++) { op_cnt = atomic_read(&ipriv->hmac_cnt[alg]); if (op_cnt) { out_offset += scnprintf(buf + out_offset, out_count - out_offset, " %-13s%11u\n", hash_alg_name[alg], op_cnt); } } out_offset += scnprintf(buf + out_offset, out_count - out_offset, "AEAD setkey............%u\n", atomic_read(&ipriv->setkey_cnt[SPU_OP_AEAD])); out_offset += scnprintf(buf + out_offset, out_count - out_offset, "AEAD Ops...............%u\n", atomic_read(&ipriv->op_counts[SPU_OP_AEAD])); for (alg = 0; alg < AEAD_TYPE_LAST; alg++) { op_cnt = atomic_read(&ipriv->aead_cnt[alg]); if (op_cnt) { out_offset += scnprintf(buf + out_offset, out_count - out_offset, " %-13s%11u\n", aead_alg_name[alg], op_cnt); } } out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Bytes of req data......%llu\n", (u64)atomic64_read(&ipriv->bytes_out)); out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Bytes of resp data.....%llu\n", (u64)atomic64_read(&ipriv->bytes_in)); out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Mailbox full...........%u\n", atomic_read(&ipriv->mb_no_spc)); out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Mailbox send failures..%u\n", atomic_read(&ipriv->mb_send_fail)); out_offset += scnprintf(buf + out_offset, out_count - out_offset, "Check ICV errors.......%u\n", atomic_read(&ipriv->bad_icv)); if (ipriv->spu.spu_type == SPU_TYPE_SPUM) for (i = 0; i < ipriv->spu.num_spu; i++) { spu_ofifo_ctrl = ioread32(ipriv->spu.reg_vbase[i] + SPU_OFIFO_CTRL); fifo_len = spu_ofifo_ctrl & SPU_FIFO_WATERMARK; out_offset += scnprintf(buf + out_offset, out_count - out_offset, "SPU %d output FIFO high water.....%u\n", i, fifo_len); } if (out_offset > out_count) out_offset = out_count; ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); kfree(buf); return ret; } static const struct file_operations spu_debugfs_stats = { .owner = THIS_MODULE, .open = simple_open, .read = spu_debugfs_read, }; /* * Create the debug FS directories. If the top-level directory has not yet * been created, create it now. Create a stats file in this directory for * a SPU. */ void spu_setup_debugfs(void) { if (!debugfs_initialized()) return; if (!iproc_priv.debugfs_dir) iproc_priv.debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); if (!iproc_priv.debugfs_stats) /* Create file with permissions S_IRUSR */ debugfs_create_file("stats", 0400, iproc_priv.debugfs_dir, &iproc_priv, &spu_debugfs_stats); } void spu_free_debugfs(void) { debugfs_remove_recursive(iproc_priv.debugfs_dir); iproc_priv.debugfs_dir = NULL; } /** * format_value_ccm() - Format a value into a buffer, using a specified number * of bytes (i.e. maybe writing value X into a 4 byte * buffer, or maybe into a 12 byte buffer), as per the * SPU CCM spec. * * @val: value to write (up to max of unsigned int) * @buf: (pointer to) buffer to write the value * @len: number of bytes to use (0 to 255) * */ void format_value_ccm(unsigned int val, u8 *buf, u8 len) { int i; /* First clear full output buffer */ memset(buf, 0, len); /* Then, starting from right side, fill in with data */ for (i = 0; i < len; i++) { buf[len - i - 1] = (val >> (8 * i)) & 0xff; if (i >= 3) break; /* Only handle up to 32 bits of 'val' */ } }
linux-master
drivers/crypto/bcm/util.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2016 Broadcom */ /* * This file works with the SPU2 version of the SPU. SPU2 has different message * formats than the previous version of the SPU. All SPU message format * differences should be hidden in the spux.c,h files. */ #include <linux/kernel.h> #include <linux/string.h> #include "util.h" #include "spu.h" #include "spu2.h" #define SPU2_TX_STATUS_LEN 0 /* SPU2 has no STATUS in input packet */ /* * Controlled by pkt_stat_cnt field in CRYPTO_SS_SPU0_CORE_SPU2_CONTROL0 * register. Defaults to 2. */ #define SPU2_RX_STATUS_LEN 2 enum spu2_proto_sel { SPU2_PROTO_RESV = 0, SPU2_MACSEC_SECTAG8_ECB = 1, SPU2_MACSEC_SECTAG8_SCB = 2, SPU2_MACSEC_SECTAG16 = 3, SPU2_MACSEC_SECTAG16_8_XPN = 4, SPU2_IPSEC = 5, SPU2_IPSEC_ESN = 6, SPU2_TLS_CIPHER = 7, SPU2_TLS_AEAD = 8, SPU2_DTLS_CIPHER = 9, SPU2_DTLS_AEAD = 10 }; static char *spu2_cipher_type_names[] = { "None", "AES128", "AES192", "AES256", "DES", "3DES" }; static char *spu2_cipher_mode_names[] = { "ECB", "CBC", "CTR", "CFB", "OFB", "XTS", "CCM", "GCM" }; static char *spu2_hash_type_names[] = { "None", "AES128", "AES192", "AES256", "Reserved", "Reserved", "MD5", "SHA1", "SHA224", "SHA256", "SHA384", "SHA512", "SHA512/224", "SHA512/256", "SHA3-224", "SHA3-256", "SHA3-384", "SHA3-512" }; static char *spu2_hash_mode_names[] = { "CMAC", "CBC-MAC", "XCBC-MAC", "HMAC", "Rabin", "CCM", "GCM", "Reserved" }; static char *spu2_ciph_type_name(enum spu2_cipher_type cipher_type) { if (cipher_type >= SPU2_CIPHER_TYPE_LAST) return "Reserved"; return spu2_cipher_type_names[cipher_type]; } static char *spu2_ciph_mode_name(enum spu2_cipher_mode cipher_mode) { if (cipher_mode >= SPU2_CIPHER_MODE_LAST) return "Reserved"; return spu2_cipher_mode_names[cipher_mode]; } static char *spu2_hash_type_name(enum spu2_hash_type hash_type) { if (hash_type >= SPU2_HASH_TYPE_LAST) return "Reserved"; return spu2_hash_type_names[hash_type]; } static char *spu2_hash_mode_name(enum spu2_hash_mode hash_mode) { if (hash_mode >= SPU2_HASH_MODE_LAST) return "Reserved"; return spu2_hash_mode_names[hash_mode]; } /* * Convert from a software cipher mode value to the corresponding value * for SPU2. */ static int spu2_cipher_mode_xlate(enum spu_cipher_mode cipher_mode, enum spu2_cipher_mode *spu2_mode) { switch (cipher_mode) { case CIPHER_MODE_ECB: *spu2_mode = SPU2_CIPHER_MODE_ECB; break; case CIPHER_MODE_CBC: *spu2_mode = SPU2_CIPHER_MODE_CBC; break; case CIPHER_MODE_OFB: *spu2_mode = SPU2_CIPHER_MODE_OFB; break; case CIPHER_MODE_CFB: *spu2_mode = SPU2_CIPHER_MODE_CFB; break; case CIPHER_MODE_CTR: *spu2_mode = SPU2_CIPHER_MODE_CTR; break; case CIPHER_MODE_CCM: *spu2_mode = SPU2_CIPHER_MODE_CCM; break; case CIPHER_MODE_GCM: *spu2_mode = SPU2_CIPHER_MODE_GCM; break; case CIPHER_MODE_XTS: *spu2_mode = SPU2_CIPHER_MODE_XTS; break; default: return -EINVAL; } return 0; } /** * spu2_cipher_xlate() - Convert a cipher {alg/mode/type} triple to a SPU2 * cipher type and mode. * @cipher_alg: [in] cipher algorithm value from software enumeration * @cipher_mode: [in] cipher mode value from software enumeration * @cipher_type: [in] cipher type value from software enumeration * @spu2_type: [out] cipher type value used by spu2 hardware * @spu2_mode: [out] cipher mode value used by spu2 hardware * * Return: 0 if successful */ static int spu2_cipher_xlate(enum spu_cipher_alg cipher_alg, enum spu_cipher_mode cipher_mode, enum spu_cipher_type cipher_type, enum spu2_cipher_type *spu2_type, enum spu2_cipher_mode *spu2_mode) { int err; err = spu2_cipher_mode_xlate(cipher_mode, spu2_mode); if (err) { flow_log("Invalid cipher mode %d\n", cipher_mode); return err; } switch (cipher_alg) { case CIPHER_ALG_NONE: *spu2_type = SPU2_CIPHER_TYPE_NONE; break; case CIPHER_ALG_RC4: /* SPU2 does not support RC4 */ err = -EINVAL; *spu2_type = SPU2_CIPHER_TYPE_NONE; break; case CIPHER_ALG_DES: *spu2_type = SPU2_CIPHER_TYPE_DES; break; case CIPHER_ALG_3DES: *spu2_type = SPU2_CIPHER_TYPE_3DES; break; case CIPHER_ALG_AES: switch (cipher_type) { case CIPHER_TYPE_AES128: *spu2_type = SPU2_CIPHER_TYPE_AES128; break; case CIPHER_TYPE_AES192: *spu2_type = SPU2_CIPHER_TYPE_AES192; break; case CIPHER_TYPE_AES256: *spu2_type = SPU2_CIPHER_TYPE_AES256; break; default: err = -EINVAL; } break; case CIPHER_ALG_LAST: default: err = -EINVAL; break; } if (err) flow_log("Invalid cipher alg %d or type %d\n", cipher_alg, cipher_type); return err; } /* * Convert from a software hash mode value to the corresponding value * for SPU2. Note that HASH_MODE_NONE and HASH_MODE_XCBC have the same value. */ static int spu2_hash_mode_xlate(enum hash_mode hash_mode, enum spu2_hash_mode *spu2_mode) { switch (hash_mode) { case HASH_MODE_XCBC: *spu2_mode = SPU2_HASH_MODE_XCBC_MAC; break; case HASH_MODE_CMAC: *spu2_mode = SPU2_HASH_MODE_CMAC; break; case HASH_MODE_HMAC: *spu2_mode = SPU2_HASH_MODE_HMAC; break; case HASH_MODE_CCM: *spu2_mode = SPU2_HASH_MODE_CCM; break; case HASH_MODE_GCM: *spu2_mode = SPU2_HASH_MODE_GCM; break; default: return -EINVAL; } return 0; } /** * spu2_hash_xlate() - Convert a hash {alg/mode/type} triple to a SPU2 hash type * and mode. * @hash_alg: [in] hash algorithm value from software enumeration * @hash_mode: [in] hash mode value from software enumeration * @hash_type: [in] hash type value from software enumeration * @ciph_type: [in] cipher type value from software enumeration * @spu2_type: [out] hash type value used by SPU2 hardware * @spu2_mode: [out] hash mode value used by SPU2 hardware * * Return: 0 if successful */ static int spu2_hash_xlate(enum hash_alg hash_alg, enum hash_mode hash_mode, enum hash_type hash_type, enum spu_cipher_type ciph_type, enum spu2_hash_type *spu2_type, enum spu2_hash_mode *spu2_mode) { int err; err = spu2_hash_mode_xlate(hash_mode, spu2_mode); if (err) { flow_log("Invalid hash mode %d\n", hash_mode); return err; } switch (hash_alg) { case HASH_ALG_NONE: *spu2_type = SPU2_HASH_TYPE_NONE; break; case HASH_ALG_MD5: *spu2_type = SPU2_HASH_TYPE_MD5; break; case HASH_ALG_SHA1: *spu2_type = SPU2_HASH_TYPE_SHA1; break; case HASH_ALG_SHA224: *spu2_type = SPU2_HASH_TYPE_SHA224; break; case HASH_ALG_SHA256: *spu2_type = SPU2_HASH_TYPE_SHA256; break; case HASH_ALG_SHA384: *spu2_type = SPU2_HASH_TYPE_SHA384; break; case HASH_ALG_SHA512: *spu2_type = SPU2_HASH_TYPE_SHA512; break; case HASH_ALG_AES: switch (ciph_type) { case CIPHER_TYPE_AES128: *spu2_type = SPU2_HASH_TYPE_AES128; break; case CIPHER_TYPE_AES192: *spu2_type = SPU2_HASH_TYPE_AES192; break; case CIPHER_TYPE_AES256: *spu2_type = SPU2_HASH_TYPE_AES256; break; default: err = -EINVAL; } break; case HASH_ALG_SHA3_224: *spu2_type = SPU2_HASH_TYPE_SHA3_224; break; case HASH_ALG_SHA3_256: *spu2_type = SPU2_HASH_TYPE_SHA3_256; break; case HASH_ALG_SHA3_384: *spu2_type = SPU2_HASH_TYPE_SHA3_384; break; case HASH_ALG_SHA3_512: *spu2_type = SPU2_HASH_TYPE_SHA3_512; break; case HASH_ALG_LAST: default: err = -EINVAL; break; } if (err) flow_log("Invalid hash alg %d or type %d\n", hash_alg, hash_type); return err; } /* Dump FMD ctrl0. The ctrl0 input is in host byte order */ static void spu2_dump_fmd_ctrl0(u64 ctrl0) { enum spu2_cipher_type ciph_type; enum spu2_cipher_mode ciph_mode; enum spu2_hash_type hash_type; enum spu2_hash_mode hash_mode; char *ciph_name; char *ciph_mode_name; char *hash_name; char *hash_mode_name; u8 cfb; u8 proto; packet_log(" FMD CTRL0 %#16llx\n", ctrl0); if (ctrl0 & SPU2_CIPH_ENCRYPT_EN) packet_log(" encrypt\n"); else packet_log(" decrypt\n"); ciph_type = (ctrl0 & SPU2_CIPH_TYPE) >> SPU2_CIPH_TYPE_SHIFT; ciph_name = spu2_ciph_type_name(ciph_type); packet_log(" Cipher type: %s\n", ciph_name); if (ciph_type != SPU2_CIPHER_TYPE_NONE) { ciph_mode = (ctrl0 & SPU2_CIPH_MODE) >> SPU2_CIPH_MODE_SHIFT; ciph_mode_name = spu2_ciph_mode_name(ciph_mode); packet_log(" Cipher mode: %s\n", ciph_mode_name); } cfb = (ctrl0 & SPU2_CFB_MASK) >> SPU2_CFB_MASK_SHIFT; packet_log(" CFB %#x\n", cfb); proto = (ctrl0 & SPU2_PROTO_SEL) >> SPU2_PROTO_SEL_SHIFT; packet_log(" protocol %#x\n", proto); if (ctrl0 & SPU2_HASH_FIRST) packet_log(" hash first\n"); else packet_log(" cipher first\n"); if (ctrl0 & SPU2_CHK_TAG) packet_log(" check tag\n"); hash_type = (ctrl0 & SPU2_HASH_TYPE) >> SPU2_HASH_TYPE_SHIFT; hash_name = spu2_hash_type_name(hash_type); packet_log(" Hash type: %s\n", hash_name); if (hash_type != SPU2_HASH_TYPE_NONE) { hash_mode = (ctrl0 & SPU2_HASH_MODE) >> SPU2_HASH_MODE_SHIFT; hash_mode_name = spu2_hash_mode_name(hash_mode); packet_log(" Hash mode: %s\n", hash_mode_name); } if (ctrl0 & SPU2_CIPH_PAD_EN) { packet_log(" Cipher pad: %#2llx\n", (ctrl0 & SPU2_CIPH_PAD) >> SPU2_CIPH_PAD_SHIFT); } } /* Dump FMD ctrl1. The ctrl1 input is in host byte order */ static void spu2_dump_fmd_ctrl1(u64 ctrl1) { u8 hash_key_len; u8 ciph_key_len; u8 ret_iv_len; u8 iv_offset; u8 iv_len; u8 hash_tag_len; u8 ret_md; packet_log(" FMD CTRL1 %#16llx\n", ctrl1); if (ctrl1 & SPU2_TAG_LOC) packet_log(" Tag after payload\n"); packet_log(" Msg includes "); if (ctrl1 & SPU2_HAS_FR_DATA) packet_log("FD "); if (ctrl1 & SPU2_HAS_AAD1) packet_log("AAD1 "); if (ctrl1 & SPU2_HAS_NAAD) packet_log("NAAD "); if (ctrl1 & SPU2_HAS_AAD2) packet_log("AAD2 "); if (ctrl1 & SPU2_HAS_ESN) packet_log("ESN "); packet_log("\n"); hash_key_len = (ctrl1 & SPU2_HASH_KEY_LEN) >> SPU2_HASH_KEY_LEN_SHIFT; packet_log(" Hash key len %u\n", hash_key_len); ciph_key_len = (ctrl1 & SPU2_CIPH_KEY_LEN) >> SPU2_CIPH_KEY_LEN_SHIFT; packet_log(" Cipher key len %u\n", ciph_key_len); if (ctrl1 & SPU2_GENIV) packet_log(" Generate IV\n"); if (ctrl1 & SPU2_HASH_IV) packet_log(" IV included in hash\n"); if (ctrl1 & SPU2_RET_IV) packet_log(" Return IV in output before payload\n"); ret_iv_len = (ctrl1 & SPU2_RET_IV_LEN) >> SPU2_RET_IV_LEN_SHIFT; packet_log(" Length of returned IV %u bytes\n", ret_iv_len ? ret_iv_len : 16); iv_offset = (ctrl1 & SPU2_IV_OFFSET) >> SPU2_IV_OFFSET_SHIFT; packet_log(" IV offset %u\n", iv_offset); iv_len = (ctrl1 & SPU2_IV_LEN) >> SPU2_IV_LEN_SHIFT; packet_log(" Input IV len %u bytes\n", iv_len); hash_tag_len = (ctrl1 & SPU2_HASH_TAG_LEN) >> SPU2_HASH_TAG_LEN_SHIFT; packet_log(" Hash tag length %u bytes\n", hash_tag_len); packet_log(" Return "); ret_md = (ctrl1 & SPU2_RETURN_MD) >> SPU2_RETURN_MD_SHIFT; if (ret_md) packet_log("FMD "); if (ret_md == SPU2_RET_FMD_OMD) packet_log("OMD "); else if (ret_md == SPU2_RET_FMD_OMD_IV) packet_log("OMD IV "); if (ctrl1 & SPU2_RETURN_FD) packet_log("FD "); if (ctrl1 & SPU2_RETURN_AAD1) packet_log("AAD1 "); if (ctrl1 & SPU2_RETURN_NAAD) packet_log("NAAD "); if (ctrl1 & SPU2_RETURN_AAD2) packet_log("AAD2 "); if (ctrl1 & SPU2_RETURN_PAY) packet_log("Payload"); packet_log("\n"); } /* Dump FMD ctrl2. The ctrl2 input is in host byte order */ static void spu2_dump_fmd_ctrl2(u64 ctrl2) { packet_log(" FMD CTRL2 %#16llx\n", ctrl2); packet_log(" AAD1 offset %llu length %llu bytes\n", ctrl2 & SPU2_AAD1_OFFSET, (ctrl2 & SPU2_AAD1_LEN) >> SPU2_AAD1_LEN_SHIFT); packet_log(" AAD2 offset %llu\n", (ctrl2 & SPU2_AAD2_OFFSET) >> SPU2_AAD2_OFFSET_SHIFT); packet_log(" Payload offset %llu\n", (ctrl2 & SPU2_PL_OFFSET) >> SPU2_PL_OFFSET_SHIFT); } /* Dump FMD ctrl3. The ctrl3 input is in host byte order */ static void spu2_dump_fmd_ctrl3(u64 ctrl3) { packet_log(" FMD CTRL3 %#16llx\n", ctrl3); packet_log(" Payload length %llu bytes\n", ctrl3 & SPU2_PL_LEN); packet_log(" TLS length %llu bytes\n", (ctrl3 & SPU2_TLS_LEN) >> SPU2_TLS_LEN_SHIFT); } static void spu2_dump_fmd(struct SPU2_FMD *fmd) { spu2_dump_fmd_ctrl0(le64_to_cpu(fmd->ctrl0)); spu2_dump_fmd_ctrl1(le64_to_cpu(fmd->ctrl1)); spu2_dump_fmd_ctrl2(le64_to_cpu(fmd->ctrl2)); spu2_dump_fmd_ctrl3(le64_to_cpu(fmd->ctrl3)); } static void spu2_dump_omd(u8 *omd, u16 hash_key_len, u16 ciph_key_len, u16 hash_iv_len, u16 ciph_iv_len) { u8 *ptr = omd; packet_log(" OMD:\n"); if (hash_key_len) { packet_log(" Hash Key Length %u bytes\n", hash_key_len); packet_dump(" KEY: ", ptr, hash_key_len); ptr += hash_key_len; } if (ciph_key_len) { packet_log(" Cipher Key Length %u bytes\n", ciph_key_len); packet_dump(" KEY: ", ptr, ciph_key_len); ptr += ciph_key_len; } if (hash_iv_len) { packet_log(" Hash IV Length %u bytes\n", hash_iv_len); packet_dump(" hash IV: ", ptr, hash_iv_len); ptr += ciph_key_len; } if (ciph_iv_len) { packet_log(" Cipher IV Length %u bytes\n", ciph_iv_len); packet_dump(" cipher IV: ", ptr, ciph_iv_len); } } /* Dump a SPU2 header for debug */ void spu2_dump_msg_hdr(u8 *buf, unsigned int buf_len) { struct SPU2_FMD *fmd = (struct SPU2_FMD *)buf; u8 *omd; u64 ctrl1; u16 hash_key_len; u16 ciph_key_len; u16 hash_iv_len; u16 ciph_iv_len; u16 omd_len; packet_log("\n"); packet_log("SPU2 message header %p len: %u\n", buf, buf_len); spu2_dump_fmd(fmd); omd = (u8 *)(fmd + 1); ctrl1 = le64_to_cpu(fmd->ctrl1); hash_key_len = (ctrl1 & SPU2_HASH_KEY_LEN) >> SPU2_HASH_KEY_LEN_SHIFT; ciph_key_len = (ctrl1 & SPU2_CIPH_KEY_LEN) >> SPU2_CIPH_KEY_LEN_SHIFT; hash_iv_len = 0; ciph_iv_len = (ctrl1 & SPU2_IV_LEN) >> SPU2_IV_LEN_SHIFT; spu2_dump_omd(omd, hash_key_len, ciph_key_len, hash_iv_len, ciph_iv_len); /* Double check sanity */ omd_len = hash_key_len + ciph_key_len + hash_iv_len + ciph_iv_len; if (FMD_SIZE + omd_len != buf_len) { packet_log (" Packet parsed incorrectly. buf_len %u, sum of MD %zu\n", buf_len, FMD_SIZE + omd_len); } packet_log("\n"); } /** * spu2_fmd_init() - At setkey time, initialize the fixed meta data for * subsequent skcipher requests for this context. * @fmd: Start of FMD field to be written * @spu2_type: Cipher algorithm * @spu2_mode: Cipher mode * @cipher_key_len: Length of cipher key, in bytes * @cipher_iv_len: Length of cipher initialization vector, in bytes * * Return: 0 (success) */ static int spu2_fmd_init(struct SPU2_FMD *fmd, enum spu2_cipher_type spu2_type, enum spu2_cipher_mode spu2_mode, u32 cipher_key_len, u32 cipher_iv_len) { u64 ctrl0; u64 ctrl1; u64 ctrl2; u64 ctrl3; u32 aad1_offset; u32 aad2_offset; u16 aad1_len = 0; u64 payload_offset; ctrl0 = (spu2_type << SPU2_CIPH_TYPE_SHIFT) | (spu2_mode << SPU2_CIPH_MODE_SHIFT); ctrl1 = (cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) | ((u64)cipher_iv_len << SPU2_IV_LEN_SHIFT) | ((u64)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT) | SPU2_RETURN_PAY; /* * AAD1 offset is from start of FD. FD length is always 0 for this * driver. So AAD1_offset is always 0. */ aad1_offset = 0; aad2_offset = aad1_offset; payload_offset = 0; ctrl2 = aad1_offset | (aad1_len << SPU2_AAD1_LEN_SHIFT) | (aad2_offset << SPU2_AAD2_OFFSET_SHIFT) | (payload_offset << SPU2_PL_OFFSET_SHIFT); ctrl3 = 0; fmd->ctrl0 = cpu_to_le64(ctrl0); fmd->ctrl1 = cpu_to_le64(ctrl1); fmd->ctrl2 = cpu_to_le64(ctrl2); fmd->ctrl3 = cpu_to_le64(ctrl3); return 0; } /** * spu2_fmd_ctrl0_write() - Write ctrl0 field in fixed metadata (FMD) field of * SPU request packet. * @fmd: Start of FMD field to be written * @is_inbound: true if decrypting. false if encrypting. * @auth_first: true if alg authenticates before encrypting * @protocol: protocol selector * @cipher_type: cipher algorithm * @cipher_mode: cipher mode * @auth_type: authentication type * @auth_mode: authentication mode */ static void spu2_fmd_ctrl0_write(struct SPU2_FMD *fmd, bool is_inbound, bool auth_first, enum spu2_proto_sel protocol, enum spu2_cipher_type cipher_type, enum spu2_cipher_mode cipher_mode, enum spu2_hash_type auth_type, enum spu2_hash_mode auth_mode) { u64 ctrl0 = 0; if ((cipher_type != SPU2_CIPHER_TYPE_NONE) && !is_inbound) ctrl0 |= SPU2_CIPH_ENCRYPT_EN; ctrl0 |= ((u64)cipher_type << SPU2_CIPH_TYPE_SHIFT) | ((u64)cipher_mode << SPU2_CIPH_MODE_SHIFT); if (protocol) ctrl0 |= (u64)protocol << SPU2_PROTO_SEL_SHIFT; if (auth_first) ctrl0 |= SPU2_HASH_FIRST; if (is_inbound && (auth_type != SPU2_HASH_TYPE_NONE)) ctrl0 |= SPU2_CHK_TAG; ctrl0 |= (((u64)auth_type << SPU2_HASH_TYPE_SHIFT) | ((u64)auth_mode << SPU2_HASH_MODE_SHIFT)); fmd->ctrl0 = cpu_to_le64(ctrl0); } /** * spu2_fmd_ctrl1_write() - Write ctrl1 field in fixed metadata (FMD) field of * SPU request packet. * @fmd: Start of FMD field to be written * @is_inbound: true if decrypting. false if encrypting. * @assoc_size: Length of additional associated data, in bytes * @auth_key_len: Length of authentication key, in bytes * @cipher_key_len: Length of cipher key, in bytes * @gen_iv: If true, hw generates IV and returns in response * @hash_iv: IV participates in hash. Used for IPSEC and TLS. * @return_iv: Return IV in output packet before payload * @ret_iv_len: Length of IV returned from SPU, in bytes * @ret_iv_offset: Offset into full IV of start of returned IV * @cipher_iv_len: Length of input cipher IV, in bytes * @digest_size: Length of digest (aka, hash tag or ICV), in bytes * @return_payload: Return payload in SPU response * @return_md : return metadata in SPU response * * Packet can have AAD2 w/o AAD1. For algorithms currently supported, * associated data goes in AAD2. */ static void spu2_fmd_ctrl1_write(struct SPU2_FMD *fmd, bool is_inbound, u64 assoc_size, u64 auth_key_len, u64 cipher_key_len, bool gen_iv, bool hash_iv, bool return_iv, u64 ret_iv_len, u64 ret_iv_offset, u64 cipher_iv_len, u64 digest_size, bool return_payload, bool return_md) { u64 ctrl1 = 0; if (is_inbound && digest_size) ctrl1 |= SPU2_TAG_LOC; if (assoc_size) { ctrl1 |= SPU2_HAS_AAD2; ctrl1 |= SPU2_RETURN_AAD2; /* need aad2 for gcm aes esp */ } if (auth_key_len) ctrl1 |= ((auth_key_len << SPU2_HASH_KEY_LEN_SHIFT) & SPU2_HASH_KEY_LEN); if (cipher_key_len) ctrl1 |= ((cipher_key_len << SPU2_CIPH_KEY_LEN_SHIFT) & SPU2_CIPH_KEY_LEN); if (gen_iv) ctrl1 |= SPU2_GENIV; if (hash_iv) ctrl1 |= SPU2_HASH_IV; if (return_iv) { ctrl1 |= SPU2_RET_IV; ctrl1 |= ret_iv_len << SPU2_RET_IV_LEN_SHIFT; ctrl1 |= ret_iv_offset << SPU2_IV_OFFSET_SHIFT; } ctrl1 |= ((cipher_iv_len << SPU2_IV_LEN_SHIFT) & SPU2_IV_LEN); if (digest_size) ctrl1 |= ((digest_size << SPU2_HASH_TAG_LEN_SHIFT) & SPU2_HASH_TAG_LEN); /* Let's ask for the output pkt to include FMD, but don't need to * get keys and IVs back in OMD. */ if (return_md) ctrl1 |= ((u64)SPU2_RET_FMD_ONLY << SPU2_RETURN_MD_SHIFT); else ctrl1 |= ((u64)SPU2_RET_NO_MD << SPU2_RETURN_MD_SHIFT); /* Crypto API does not get assoc data back. So no need for AAD2. */ if (return_payload) ctrl1 |= SPU2_RETURN_PAY; fmd->ctrl1 = cpu_to_le64(ctrl1); } /** * spu2_fmd_ctrl2_write() - Set the ctrl2 field in the fixed metadata field of * SPU2 header. * @fmd: Start of FMD field to be written * @cipher_offset: Number of bytes from Start of Packet (end of FD field) where * data to be encrypted or decrypted begins * @auth_key_len: Length of authentication key, in bytes * @auth_iv_len: Length of authentication initialization vector, in bytes * @cipher_key_len: Length of cipher key, in bytes * @cipher_iv_len: Length of cipher IV, in bytes */ static void spu2_fmd_ctrl2_write(struct SPU2_FMD *fmd, u64 cipher_offset, u64 auth_key_len, u64 auth_iv_len, u64 cipher_key_len, u64 cipher_iv_len) { u64 ctrl2; u64 aad1_offset; u64 aad2_offset; u16 aad1_len = 0; u64 payload_offset; /* AAD1 offset is from start of FD. FD length always 0. */ aad1_offset = 0; aad2_offset = aad1_offset; payload_offset = cipher_offset; ctrl2 = aad1_offset | (aad1_len << SPU2_AAD1_LEN_SHIFT) | (aad2_offset << SPU2_AAD2_OFFSET_SHIFT) | (payload_offset << SPU2_PL_OFFSET_SHIFT); fmd->ctrl2 = cpu_to_le64(ctrl2); } /** * spu2_fmd_ctrl3_write() - Set the ctrl3 field in FMD * @fmd: Fixed meta data. First field in SPU2 msg header. * @payload_len: Length of payload, in bytes */ static void spu2_fmd_ctrl3_write(struct SPU2_FMD *fmd, u64 payload_len) { u64 ctrl3; ctrl3 = payload_len & SPU2_PL_LEN; fmd->ctrl3 = cpu_to_le64(ctrl3); } /** * spu2_ctx_max_payload() - Determine the maximum length of the payload for a * SPU message for a given cipher and hash alg context. * @cipher_alg: The cipher algorithm * @cipher_mode: The cipher mode * @blocksize: The size of a block of data for this algo * * For SPU2, the hardware generally ignores the PayloadLen field in ctrl3 of * FMD and just keeps computing until it receives a DMA descriptor with the EOF * flag set. So we consider the max payload to be infinite. AES CCM is an * exception. * * Return: Max payload length in bytes */ u32 spu2_ctx_max_payload(enum spu_cipher_alg cipher_alg, enum spu_cipher_mode cipher_mode, unsigned int blocksize) { if ((cipher_alg == CIPHER_ALG_AES) && (cipher_mode == CIPHER_MODE_CCM)) { u32 excess = SPU2_MAX_PAYLOAD % blocksize; return SPU2_MAX_PAYLOAD - excess; } else { return SPU_MAX_PAYLOAD_INF; } } /** * spu2_payload_length() - Given a SPU2 message header, extract the payload * length. * @spu_hdr: Start of SPU message header (FMD) * * Return: payload length, in bytes */ u32 spu2_payload_length(u8 *spu_hdr) { struct SPU2_FMD *fmd = (struct SPU2_FMD *)spu_hdr; u32 pl_len; u64 ctrl3; ctrl3 = le64_to_cpu(fmd->ctrl3); pl_len = ctrl3 & SPU2_PL_LEN; return pl_len; } /** * spu2_response_hdr_len() - Determine the expected length of a SPU response * header. * @auth_key_len: Length of authentication key, in bytes * @enc_key_len: Length of encryption key, in bytes * @is_hash: Unused * * For SPU2, includes just FMD. OMD is never requested. * * Return: Length of FMD, in bytes */ u16 spu2_response_hdr_len(u16 auth_key_len, u16 enc_key_len, bool is_hash) { return FMD_SIZE; } /** * spu2_hash_pad_len() - Calculate the length of hash padding required to extend * data to a full block size. * @hash_alg: hash algorithm * @hash_mode: hash mode * @chunksize: length of data, in bytes * @hash_block_size: size of a hash block, in bytes * * SPU2 hardware does all hash padding * * Return: length of hash pad in bytes */ u16 spu2_hash_pad_len(enum hash_alg hash_alg, enum hash_mode hash_mode, u32 chunksize, u16 hash_block_size) { return 0; } /** * spu2_gcm_ccm_pad_len() - Determine the length of GCM/CCM padding for either * the AAD field or the data. * @cipher_mode: Unused * @data_size: Unused * * Return: 0. Unlike SPU-M, SPU2 hardware does any GCM/CCM padding required. */ u32 spu2_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode, unsigned int data_size) { return 0; } /** * spu2_assoc_resp_len() - Determine the size of the AAD2 buffer needed to catch * associated data in a SPU2 output packet. * @cipher_mode: cipher mode * @assoc_len: length of additional associated data, in bytes * @iv_len: length of initialization vector, in bytes * @is_encrypt: true if encrypting. false if decrypt. * * Return: Length of buffer to catch associated data in response */ u32 spu2_assoc_resp_len(enum spu_cipher_mode cipher_mode, unsigned int assoc_len, unsigned int iv_len, bool is_encrypt) { u32 resp_len = assoc_len; if (is_encrypt) /* gcm aes esp has to write 8-byte IV in response */ resp_len += iv_len; return resp_len; } /** * spu2_aead_ivlen() - Calculate the length of the AEAD IV to be included * in a SPU request after the AAD and before the payload. * @cipher_mode: cipher mode * @iv_len: initialization vector length in bytes * * For SPU2, AEAD IV is included in OMD and does not need to be repeated * prior to the payload. * * Return: Length of AEAD IV in bytes */ u8 spu2_aead_ivlen(enum spu_cipher_mode cipher_mode, u16 iv_len) { return 0; } /** * spu2_hash_type() - Determine the type of hash operation. * @src_sent: The number of bytes in the current request that have already * been sent to the SPU to be hashed. * * SPU2 always does a FULL hash operation */ enum hash_type spu2_hash_type(u32 src_sent) { return HASH_TYPE_FULL; } /** * spu2_digest_size() - Determine the size of a hash digest to expect the SPU to * return. * @alg_digest_size: Number of bytes in the final digest for the given algo * @alg: The hash algorithm * @htype: Type of hash operation (init, update, full, etc) * */ u32 spu2_digest_size(u32 alg_digest_size, enum hash_alg alg, enum hash_type htype) { return alg_digest_size; } /** * spu2_create_request() - Build a SPU2 request message header, includint FMD and * OMD. * @spu_hdr: Start of buffer where SPU request header is to be written * @req_opts: SPU request message options * @cipher_parms: Parameters related to cipher algorithm * @hash_parms: Parameters related to hash algorithm * @aead_parms: Parameters related to AEAD operation * @data_size: Length of data to be encrypted or authenticated. If AEAD, does * not include length of AAD. * * Construct the message starting at spu_hdr. Caller should allocate this buffer * in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long. * * Return: the length of the SPU header in bytes. 0 if an error occurs. */ u32 spu2_create_request(u8 *spu_hdr, struct spu_request_opts *req_opts, struct spu_cipher_parms *cipher_parms, struct spu_hash_parms *hash_parms, struct spu_aead_parms *aead_parms, unsigned int data_size) { struct SPU2_FMD *fmd; u8 *ptr; unsigned int buf_len; int err; enum spu2_cipher_type spu2_ciph_type = SPU2_CIPHER_TYPE_NONE; enum spu2_cipher_mode spu2_ciph_mode; enum spu2_hash_type spu2_auth_type = SPU2_HASH_TYPE_NONE; enum spu2_hash_mode spu2_auth_mode; bool return_md = true; enum spu2_proto_sel proto = SPU2_PROTO_RESV; /* size of the payload */ unsigned int payload_len = hash_parms->prebuf_len + data_size + hash_parms->pad_len - ((req_opts->is_aead && req_opts->is_inbound) ? hash_parms->digestsize : 0); /* offset of prebuf or data from start of AAD2 */ unsigned int cipher_offset = aead_parms->assoc_size + aead_parms->aad_pad_len + aead_parms->iv_len; /* total size of the data following OMD (without STAT word padding) */ unsigned int real_db_size = spu_real_db_size(aead_parms->assoc_size, aead_parms->iv_len, hash_parms->prebuf_len, data_size, aead_parms->aad_pad_len, aead_parms->data_pad_len, hash_parms->pad_len); unsigned int assoc_size = aead_parms->assoc_size; if (req_opts->is_aead && (cipher_parms->alg == CIPHER_ALG_AES) && (cipher_parms->mode == CIPHER_MODE_GCM)) /* * On SPU 2, aes gcm cipher first on encrypt, auth first on * decrypt */ req_opts->auth_first = req_opts->is_inbound; /* and do opposite for ccm (auth 1st on encrypt) */ if (req_opts->is_aead && (cipher_parms->alg == CIPHER_ALG_AES) && (cipher_parms->mode == CIPHER_MODE_CCM)) req_opts->auth_first = !req_opts->is_inbound; flow_log("%s()\n", __func__); flow_log(" in:%u authFirst:%u\n", req_opts->is_inbound, req_opts->auth_first); flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg, cipher_parms->mode, cipher_parms->type); flow_log(" is_esp: %s\n", req_opts->is_esp ? "yes" : "no"); flow_log(" key: %d\n", cipher_parms->key_len); flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len); flow_log(" iv: %d\n", cipher_parms->iv_len); flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len); flow_log(" auth alg:%u mode:%u type %u\n", hash_parms->alg, hash_parms->mode, hash_parms->type); flow_log(" digestsize: %u\n", hash_parms->digestsize); flow_log(" authkey: %d\n", hash_parms->key_len); flow_dump(" authkey: ", hash_parms->key_buf, hash_parms->key_len); flow_log(" assoc_size:%u\n", assoc_size); flow_log(" prebuf_len:%u\n", hash_parms->prebuf_len); flow_log(" data_size:%u\n", data_size); flow_log(" hash_pad_len:%u\n", hash_parms->pad_len); flow_log(" real_db_size:%u\n", real_db_size); flow_log(" cipher_offset:%u payload_len:%u\n", cipher_offset, payload_len); flow_log(" aead_iv: %u\n", aead_parms->iv_len); /* Convert to spu2 values for cipher alg, hash alg */ err = spu2_cipher_xlate(cipher_parms->alg, cipher_parms->mode, cipher_parms->type, &spu2_ciph_type, &spu2_ciph_mode); /* If we are doing GCM hashing only - either via rfc4543 transform * or because we happen to do GCM with AAD only and no payload - we * need to configure hardware to use hash key rather than cipher key * and put data into payload. This is because unlike SPU-M, running * GCM cipher with 0 size payload is not permitted. */ if ((req_opts->is_rfc4543) || ((spu2_ciph_mode == SPU2_CIPHER_MODE_GCM) && (payload_len == 0))) { /* Use hashing (only) and set up hash key */ spu2_ciph_type = SPU2_CIPHER_TYPE_NONE; hash_parms->key_len = cipher_parms->key_len; memcpy(hash_parms->key_buf, cipher_parms->key_buf, cipher_parms->key_len); cipher_parms->key_len = 0; if (req_opts->is_rfc4543) payload_len += assoc_size; else payload_len = assoc_size; cipher_offset = 0; assoc_size = 0; } if (err) return 0; flow_log("spu2 cipher type %s, cipher mode %s\n", spu2_ciph_type_name(spu2_ciph_type), spu2_ciph_mode_name(spu2_ciph_mode)); err = spu2_hash_xlate(hash_parms->alg, hash_parms->mode, hash_parms->type, cipher_parms->type, &spu2_auth_type, &spu2_auth_mode); if (err) return 0; flow_log("spu2 hash type %s, hash mode %s\n", spu2_hash_type_name(spu2_auth_type), spu2_hash_mode_name(spu2_auth_mode)); fmd = (struct SPU2_FMD *)spu_hdr; spu2_fmd_ctrl0_write(fmd, req_opts->is_inbound, req_opts->auth_first, proto, spu2_ciph_type, spu2_ciph_mode, spu2_auth_type, spu2_auth_mode); spu2_fmd_ctrl1_write(fmd, req_opts->is_inbound, assoc_size, hash_parms->key_len, cipher_parms->key_len, false, false, aead_parms->return_iv, aead_parms->ret_iv_len, aead_parms->ret_iv_off, cipher_parms->iv_len, hash_parms->digestsize, !req_opts->bd_suppress, return_md); spu2_fmd_ctrl2_write(fmd, cipher_offset, hash_parms->key_len, 0, cipher_parms->key_len, cipher_parms->iv_len); spu2_fmd_ctrl3_write(fmd, payload_len); ptr = (u8 *)(fmd + 1); buf_len = sizeof(struct SPU2_FMD); /* Write OMD */ if (hash_parms->key_len) { memcpy(ptr, hash_parms->key_buf, hash_parms->key_len); ptr += hash_parms->key_len; buf_len += hash_parms->key_len; } if (cipher_parms->key_len) { memcpy(ptr, cipher_parms->key_buf, cipher_parms->key_len); ptr += cipher_parms->key_len; buf_len += cipher_parms->key_len; } if (cipher_parms->iv_len) { memcpy(ptr, cipher_parms->iv_buf, cipher_parms->iv_len); ptr += cipher_parms->iv_len; buf_len += cipher_parms->iv_len; } packet_dump(" SPU request header: ", spu_hdr, buf_len); return buf_len; } /** * spu2_cipher_req_init() - Build an skcipher SPU2 request message header, * including FMD and OMD. * @spu_hdr: Location of start of SPU request (FMD field) * @cipher_parms: Parameters describing cipher request * * Called at setkey time to initialize a msg header that can be reused for all * subsequent skcipher requests. Construct the message starting at spu_hdr. * Caller should allocate this buffer in DMA-able memory at least * SPU_HEADER_ALLOC_LEN bytes long. * * Return: the total length of the SPU header (FMD and OMD) in bytes. 0 if an * error occurs. */ u16 spu2_cipher_req_init(u8 *spu_hdr, struct spu_cipher_parms *cipher_parms) { struct SPU2_FMD *fmd; u8 *omd; enum spu2_cipher_type spu2_type = SPU2_CIPHER_TYPE_NONE; enum spu2_cipher_mode spu2_mode; int err; flow_log("%s()\n", __func__); flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms->alg, cipher_parms->mode, cipher_parms->type); flow_log(" cipher_iv_len: %u\n", cipher_parms->iv_len); flow_log(" key: %d\n", cipher_parms->key_len); flow_dump(" key: ", cipher_parms->key_buf, cipher_parms->key_len); /* Convert to spu2 values */ err = spu2_cipher_xlate(cipher_parms->alg, cipher_parms->mode, cipher_parms->type, &spu2_type, &spu2_mode); if (err) return 0; flow_log("spu2 cipher type %s, cipher mode %s\n", spu2_ciph_type_name(spu2_type), spu2_ciph_mode_name(spu2_mode)); /* Construct the FMD header */ fmd = (struct SPU2_FMD *)spu_hdr; err = spu2_fmd_init(fmd, spu2_type, spu2_mode, cipher_parms->key_len, cipher_parms->iv_len); if (err) return 0; /* Write cipher key to OMD */ omd = (u8 *)(fmd + 1); if (cipher_parms->key_buf && cipher_parms->key_len) memcpy(omd, cipher_parms->key_buf, cipher_parms->key_len); packet_dump(" SPU request header: ", spu_hdr, FMD_SIZE + cipher_parms->key_len + cipher_parms->iv_len); return FMD_SIZE + cipher_parms->key_len + cipher_parms->iv_len; } /** * spu2_cipher_req_finish() - Finish building a SPU request message header for a * block cipher request. * @spu_hdr: Start of the request message header (MH field) * @spu_req_hdr_len: Length in bytes of the SPU request header * @is_inbound: 0 encrypt, 1 decrypt * @cipher_parms: Parameters describing cipher operation to be performed * @data_size: Length of the data in the BD field * * Assumes much of the header was already filled in at setkey() time in * spu_cipher_req_init(). * spu_cipher_req_init() fills in the encryption key. */ void spu2_cipher_req_finish(u8 *spu_hdr, u16 spu_req_hdr_len, unsigned int is_inbound, struct spu_cipher_parms *cipher_parms, unsigned int data_size) { struct SPU2_FMD *fmd; u8 *omd; /* start of optional metadata */ u64 ctrl0; u64 ctrl3; flow_log("%s()\n", __func__); flow_log(" in: %u\n", is_inbound); flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms->alg, cipher_parms->type); flow_log(" iv len: %d\n", cipher_parms->iv_len); flow_dump(" iv: ", cipher_parms->iv_buf, cipher_parms->iv_len); flow_log(" data_size: %u\n", data_size); fmd = (struct SPU2_FMD *)spu_hdr; omd = (u8 *)(fmd + 1); /* * FMD ctrl0 was initialized at setkey time. update it to indicate * whether we are encrypting or decrypting. */ ctrl0 = le64_to_cpu(fmd->ctrl0); if (is_inbound) ctrl0 &= ~SPU2_CIPH_ENCRYPT_EN; /* decrypt */ else ctrl0 |= SPU2_CIPH_ENCRYPT_EN; /* encrypt */ fmd->ctrl0 = cpu_to_le64(ctrl0); if (cipher_parms->alg && cipher_parms->iv_buf && cipher_parms->iv_len) { /* cipher iv provided so put it in here */ memcpy(omd + cipher_parms->key_len, cipher_parms->iv_buf, cipher_parms->iv_len); } ctrl3 = le64_to_cpu(fmd->ctrl3); data_size &= SPU2_PL_LEN; ctrl3 |= data_size; fmd->ctrl3 = cpu_to_le64(ctrl3); packet_dump(" SPU request header: ", spu_hdr, spu_req_hdr_len); } /** * spu2_request_pad() - Create pad bytes at the end of the data. * @pad_start: Start of buffer where pad bytes are to be written * @gcm_padding: Length of GCM padding, in bytes * @hash_pad_len: Number of bytes of padding extend data to full block * @auth_alg: Authentication algorithm * @auth_mode: Authentication mode * @total_sent: Length inserted at end of hash pad * @status_padding: Number of bytes of padding to align STATUS word * * There may be three forms of pad: * 1. GCM pad - for GCM mode ciphers, pad to 16-byte alignment * 2. hash pad - pad to a block length, with 0x80 data terminator and * size at the end * 3. STAT pad - to ensure the STAT field is 4-byte aligned */ void spu2_request_pad(u8 *pad_start, u32 gcm_padding, u32 hash_pad_len, enum hash_alg auth_alg, enum hash_mode auth_mode, unsigned int total_sent, u32 status_padding) { u8 *ptr = pad_start; /* fix data alignent for GCM */ if (gcm_padding > 0) { flow_log(" GCM: padding to 16 byte alignment: %u bytes\n", gcm_padding); memset(ptr, 0, gcm_padding); ptr += gcm_padding; } if (hash_pad_len > 0) { /* clear the padding section */ memset(ptr, 0, hash_pad_len); /* terminate the data */ *ptr = 0x80; ptr += (hash_pad_len - sizeof(u64)); /* add the size at the end as required per alg */ if (auth_alg == HASH_ALG_MD5) *(__le64 *)ptr = cpu_to_le64(total_sent * 8ull); else /* SHA1, SHA2-224, SHA2-256 */ *(__be64 *)ptr = cpu_to_be64(total_sent * 8ull); ptr += sizeof(u64); } /* pad to a 4byte alignment for STAT */ if (status_padding > 0) { flow_log(" STAT: padding to 4 byte alignment: %u bytes\n", status_padding); memset(ptr, 0, status_padding); ptr += status_padding; } } /** * spu2_xts_tweak_in_payload() - Indicate that SPU2 does NOT place the XTS * tweak field in the packet payload (it uses IV instead) * * Return: 0 */ u8 spu2_xts_tweak_in_payload(void) { return 0; } /** * spu2_tx_status_len() - Return the length of the STATUS field in a SPU * response message. * * Return: Length of STATUS field in bytes. */ u8 spu2_tx_status_len(void) { return SPU2_TX_STATUS_LEN; } /** * spu2_rx_status_len() - Return the length of the STATUS field in a SPU * response message. * * Return: Length of STATUS field in bytes. */ u8 spu2_rx_status_len(void) { return SPU2_RX_STATUS_LEN; } /** * spu2_status_process() - Process the status from a SPU response message. * @statp: start of STATUS word * * Return: 0 - if status is good and response should be processed * !0 - status indicates an error and response is invalid */ int spu2_status_process(u8 *statp) { /* SPU2 status is 2 bytes by default - SPU_RX_STATUS_LEN */ u16 status = le16_to_cpu(*(__le16 *)statp); if (status == 0) return 0; flow_log("rx status is %#x\n", status); if (status == SPU2_INVALID_ICV) return SPU_INVALID_ICV; return -EBADMSG; } /** * spu2_ccm_update_iv() - Update the IV as per the requirements for CCM mode. * * @digestsize: Digest size of this request * @cipher_parms: (pointer to) cipher parmaeters, includes IV buf & IV len * @assoclen: Length of AAD data * @chunksize: length of input data to be sent in this req * @is_encrypt: true if this is an output/encrypt operation * @is_esp: true if this is an ESP / RFC4309 operation * */ void spu2_ccm_update_iv(unsigned int digestsize, struct spu_cipher_parms *cipher_parms, unsigned int assoclen, unsigned int chunksize, bool is_encrypt, bool is_esp) { int L; /* size of length field, in bytes */ /* * In RFC4309 mode, L is fixed at 4 bytes; otherwise, IV from * testmgr contains (L-1) in bottom 3 bits of first byte, * per RFC 3610. */ if (is_esp) L = CCM_ESP_L_VALUE; else L = ((cipher_parms->iv_buf[0] & CCM_B0_L_PRIME) >> CCM_B0_L_PRIME_SHIFT) + 1; /* SPU2 doesn't want these length bytes nor the first byte... */ cipher_parms->iv_len -= (1 + L); memmove(cipher_parms->iv_buf, &cipher_parms->iv_buf[1], cipher_parms->iv_len); } /** * spu2_wordalign_padlen() - SPU2 does not require padding. * @data_size: length of data field in bytes * * Return: length of status field padding, in bytes (always 0 on SPU2) */ u32 spu2_wordalign_padlen(u32 data_size) { return 0; }
linux-master
drivers/crypto/bcm/spu2.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2016 Broadcom */ #include <linux/err.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> #include <linux/crypto.h> #include <linux/kthread.h> #include <linux/rtnetlink.h> #include <linux/sched.h> #include <linux/of.h> #include <linux/io.h> #include <linux/bitops.h> #include <crypto/algapi.h> #include <crypto/aead.h> #include <crypto/internal/aead.h> #include <crypto/aes.h> #include <crypto/internal/des.h> #include <crypto/hmac.h> #include <crypto/md5.h> #include <crypto/authenc.h> #include <crypto/skcipher.h> #include <crypto/hash.h> #include <crypto/sha1.h> #include <crypto/sha2.h> #include <crypto/sha3.h> #include "util.h" #include "cipher.h" #include "spu.h" #include "spum.h" #include "spu2.h" /* ================= Device Structure ================== */ struct bcm_device_private iproc_priv; /* ==================== Parameters ===================== */ int flow_debug_logging; module_param(flow_debug_logging, int, 0644); MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging"); int packet_debug_logging; module_param(packet_debug_logging, int, 0644); MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging"); int debug_logging_sleep; module_param(debug_logging_sleep, int, 0644); MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep"); /* * The value of these module parameters is used to set the priority for each * algo type when this driver registers algos with the kernel crypto API. * To use a priority other than the default, set the priority in the insmod or * modprobe. Changing the module priority after init time has no effect. * * The default priorities are chosen to be lower (less preferred) than ARMv8 CE * algos, but more preferred than generic software algos. */ static int cipher_pri = 150; module_param(cipher_pri, int, 0644); MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos"); static int hash_pri = 100; module_param(hash_pri, int, 0644); MODULE_PARM_DESC(hash_pri, "Priority for hash algos"); static int aead_pri = 150; module_param(aead_pri, int, 0644); MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos"); /* A type 3 BCM header, expected to precede the SPU header for SPU-M. * Bits 3 and 4 in the first byte encode the channel number (the dma ringset). * 0x60 - ring 0 * 0x68 - ring 1 * 0x70 - ring 2 * 0x78 - ring 3 */ static char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 }; /* * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN * is set dynamically after reading SPU type from device tree. */ #define BCM_HDR_LEN iproc_priv.bcm_hdr_len /* min and max time to sleep before retrying when mbox queue is full. usec */ #define MBOX_SLEEP_MIN 800 #define MBOX_SLEEP_MAX 1000 /** * select_channel() - Select a SPU channel to handle a crypto request. Selects * channel in round robin order. * * Return: channel index */ static u8 select_channel(void) { u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan); return chan_idx % iproc_priv.spu.num_chan; } /** * spu_skcipher_rx_sg_create() - Build up the scatterlist of buffers used to * receive a SPU response message for an skcipher request. Includes buffers to * catch SPU message headers and the response data. * @mssg: mailbox message containing the receive sg * @rctx: crypto request context * @rx_frag_num: number of scatterlist elements required to hold the * SPU response message * @chunksize: Number of bytes of response data expected * @stat_pad_len: Number of bytes required to pad the STAT field to * a 4-byte boundary * * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() * when the request completes, whether the request is handled successfully or * there is an error. * * Returns: * 0 if successful * < 0 if an error */ static int spu_skcipher_rx_sg_create(struct brcm_message *mssg, struct iproc_reqctx_s *rctx, u8 rx_frag_num, unsigned int chunksize, u32 stat_pad_len) { struct spu_hw *spu = &iproc_priv.spu; struct scatterlist *sg; /* used to build sgs in mbox message */ struct iproc_ctx_s *ctx = rctx->ctx; u32 datalen; /* Number of bytes of response data expected */ mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), rctx->gfp); if (!mssg->spu.dst) return -ENOMEM; sg = mssg->spu.dst; sg_init_table(sg, rx_frag_num); /* Space for SPU message header */ sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len); /* If XTS tweak in payload, add buffer to receive encrypted tweak */ if ((ctx->cipher.mode == CIPHER_MODE_XTS) && spu->spu_xts_tweak_in_payload()) sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_XTS_TWEAK_SIZE); /* Copy in each dst sg entry from request, up to chunksize */ datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip, rctx->dst_nents, chunksize); if (datalen < chunksize) { pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u", __func__, chunksize, datalen); return -EFAULT; } if (stat_pad_len) sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len); memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN); sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len()); return 0; } /** * spu_skcipher_tx_sg_create() - Build up the scatterlist of buffers used to * send a SPU request message for an skcipher request. Includes SPU message * headers and the request data. * @mssg: mailbox message containing the transmit sg * @rctx: crypto request context * @tx_frag_num: number of scatterlist elements required to construct the * SPU request message * @chunksize: Number of bytes of request data * @pad_len: Number of pad bytes * * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() * when the request completes, whether the request is handled successfully or * there is an error. * * Returns: * 0 if successful * < 0 if an error */ static int spu_skcipher_tx_sg_create(struct brcm_message *mssg, struct iproc_reqctx_s *rctx, u8 tx_frag_num, unsigned int chunksize, u32 pad_len) { struct spu_hw *spu = &iproc_priv.spu; struct scatterlist *sg; /* used to build sgs in mbox message */ struct iproc_ctx_s *ctx = rctx->ctx; u32 datalen; /* Number of bytes of response data expected */ u32 stat_len; mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), rctx->gfp); if (unlikely(!mssg->spu.src)) return -ENOMEM; sg = mssg->spu.src; sg_init_table(sg, tx_frag_num); sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr, BCM_HDR_LEN + ctx->spu_req_hdr_len); /* if XTS tweak in payload, copy from IV (where crypto API puts it) */ if ((ctx->cipher.mode == CIPHER_MODE_XTS) && spu->spu_xts_tweak_in_payload()) sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE); /* Copy in each src sg entry from request, up to chunksize */ datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip, rctx->src_nents, chunksize); if (unlikely(datalen < chunksize)) { pr_err("%s(): failed to copy src sg to mbox msg", __func__); return -EFAULT; } if (pad_len) sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len); stat_len = spu->spu_tx_status_len(); if (stat_len) { memset(rctx->msg_buf.tx_stat, 0, stat_len); sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len); } return 0; } static int mailbox_send_message(struct brcm_message *mssg, u32 flags, u8 chan_idx) { int err; int retry_cnt = 0; struct device *dev = &(iproc_priv.pdev->dev); err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg); if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) { while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { /* * Mailbox queue is full. Since MAY_SLEEP is set, assume * not in atomic context and we can wait and try again. */ retry_cnt++; usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg); atomic_inc(&iproc_priv.mb_no_spc); } } if (err < 0) { atomic_inc(&iproc_priv.mb_send_fail); return err; } /* Check error returned by mailbox controller */ err = mssg->error; if (unlikely(err < 0)) { dev_err(dev, "message error %d", err); /* Signal txdone for mailbox channel */ } /* Signal txdone for mailbox channel */ mbox_client_txdone(iproc_priv.mbox[chan_idx], err); return err; } /** * handle_skcipher_req() - Submit as much of a block cipher request as fits in * a single SPU request message, starting at the current position in the request * data. * @rctx: Crypto request context * * This may be called on the crypto API thread, or, when a request is so large * it must be broken into multiple SPU messages, on the thread used to invoke * the response callback. When requests are broken into multiple SPU * messages, we assume subsequent messages depend on previous results, and * thus always wait for previous results before submitting the next message. * Because requests are submitted in lock step like this, there is no need * to synchronize access to request data structures. * * Return: -EINPROGRESS: request has been accepted and result will be returned * asynchronously * Any other value indicates an error */ static int handle_skcipher_req(struct iproc_reqctx_s *rctx) { struct spu_hw *spu = &iproc_priv.spu; struct crypto_async_request *areq = rctx->parent; struct skcipher_request *req = container_of(areq, struct skcipher_request, base); struct iproc_ctx_s *ctx = rctx->ctx; struct spu_cipher_parms cipher_parms; int err; unsigned int chunksize; /* Num bytes of request to submit */ int remaining; /* Bytes of request still to process */ int chunk_start; /* Beginning of data for current SPU msg */ /* IV or ctr value to use in this SPU msg */ u8 local_iv_ctr[MAX_IV_SIZE]; u32 stat_pad_len; /* num bytes to align status field */ u32 pad_len; /* total length of all padding */ struct brcm_message *mssg; /* mailbox message */ /* number of entries in src and dst sg in mailbox message. */ u8 rx_frag_num = 2; /* response header and STATUS */ u8 tx_frag_num = 1; /* request header */ flow_log("%s\n", __func__); cipher_parms.alg = ctx->cipher.alg; cipher_parms.mode = ctx->cipher.mode; cipher_parms.type = ctx->cipher_type; cipher_parms.key_len = ctx->enckeylen; cipher_parms.key_buf = ctx->enckey; cipher_parms.iv_buf = local_iv_ctr; cipher_parms.iv_len = rctx->iv_ctr_len; mssg = &rctx->mb_mssg; chunk_start = rctx->src_sent; remaining = rctx->total_todo - chunk_start; /* determine the chunk we are breaking off and update the indexes */ if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && (remaining > ctx->max_payload)) chunksize = ctx->max_payload; else chunksize = remaining; rctx->src_sent += chunksize; rctx->total_sent = rctx->src_sent; /* Count number of sg entries to be included in this request */ rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize); rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize); if ((ctx->cipher.mode == CIPHER_MODE_CBC) && rctx->is_encrypt && chunk_start) /* * Encrypting non-first first chunk. Copy last block of * previous result to IV for this chunk. */ sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr, rctx->iv_ctr_len, chunk_start - rctx->iv_ctr_len); if (rctx->iv_ctr_len) { /* get our local copy of the iv */ __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr, rctx->iv_ctr_len); /* generate the next IV if possible */ if ((ctx->cipher.mode == CIPHER_MODE_CBC) && !rctx->is_encrypt) { /* * CBC Decrypt: next IV is the last ciphertext block in * this chunk */ sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr, rctx->iv_ctr_len, rctx->src_sent - rctx->iv_ctr_len); } else if (ctx->cipher.mode == CIPHER_MODE_CTR) { /* * The SPU hardware increments the counter once for * each AES block of 16 bytes. So update the counter * for the next chunk, if there is one. Note that for * this chunk, the counter has already been copied to * local_iv_ctr. We can assume a block size of 16, * because we only support CTR mode for AES, not for * any other cipher alg. */ add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4); } } if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) flow_log("max_payload infinite\n"); else flow_log("max_payload %u\n", ctx->max_payload); flow_log("sent:%u start:%u remains:%u size:%u\n", rctx->src_sent, chunk_start, remaining, chunksize); /* Copy SPU header template created at setkey time */ memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr, sizeof(rctx->msg_buf.bcm_spu_req_hdr)); spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, ctx->spu_req_hdr_len, !(rctx->is_encrypt), &cipher_parms, chunksize); atomic64_add(chunksize, &iproc_priv.bytes_out); stat_pad_len = spu->spu_wordalign_padlen(chunksize); if (stat_pad_len) rx_frag_num++; pad_len = stat_pad_len; if (pad_len) { tx_frag_num++; spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0, 0, ctx->auth.alg, ctx->auth.mode, rctx->total_sent, stat_pad_len); } spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, ctx->spu_req_hdr_len); packet_log("payload:\n"); dump_sg(rctx->src_sg, rctx->src_skip, chunksize); packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len); /* * Build mailbox message containing SPU request msg and rx buffers * to catch response message */ memset(mssg, 0, sizeof(*mssg)); mssg->type = BRCM_MESSAGE_SPU; mssg->ctx = rctx; /* Will be returned in response */ /* Create rx scatterlist to catch result */ rx_frag_num += rctx->dst_nents; if ((ctx->cipher.mode == CIPHER_MODE_XTS) && spu->spu_xts_tweak_in_payload()) rx_frag_num++; /* extra sg to insert tweak */ err = spu_skcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize, stat_pad_len); if (err) return err; /* Create tx scatterlist containing SPU request message */ tx_frag_num += rctx->src_nents; if (spu->spu_tx_status_len()) tx_frag_num++; if ((ctx->cipher.mode == CIPHER_MODE_XTS) && spu->spu_xts_tweak_in_payload()) tx_frag_num++; /* extra sg to insert tweak */ err = spu_skcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize, pad_len); if (err) return err; err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); if (unlikely(err < 0)) return err; return -EINPROGRESS; } /** * handle_skcipher_resp() - Process a block cipher SPU response. Updates the * total received count for the request and updates global stats. * @rctx: Crypto request context */ static void handle_skcipher_resp(struct iproc_reqctx_s *rctx) { struct spu_hw *spu = &iproc_priv.spu; struct crypto_async_request *areq = rctx->parent; struct skcipher_request *req = skcipher_request_cast(areq); struct iproc_ctx_s *ctx = rctx->ctx; u32 payload_len; /* See how much data was returned */ payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr); /* * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the * encrypted tweak ("i") value; we don't count those. */ if ((ctx->cipher.mode == CIPHER_MODE_XTS) && spu->spu_xts_tweak_in_payload() && (payload_len >= SPU_XTS_TWEAK_SIZE)) payload_len -= SPU_XTS_TWEAK_SIZE; atomic64_add(payload_len, &iproc_priv.bytes_in); flow_log("%s() offset: %u, bd_len: %u BD:\n", __func__, rctx->total_received, payload_len); dump_sg(req->dst, rctx->total_received, payload_len); rctx->total_received += payload_len; if (rctx->total_received == rctx->total_todo) { atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]); atomic_inc( &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]); } } /** * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to * receive a SPU response message for an ahash request. * @mssg: mailbox message containing the receive sg * @rctx: crypto request context * @rx_frag_num: number of scatterlist elements required to hold the * SPU response message * @digestsize: length of hash digest, in bytes * @stat_pad_len: Number of bytes required to pad the STAT field to * a 4-byte boundary * * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() * when the request completes, whether the request is handled successfully or * there is an error. * * Return: * 0 if successful * < 0 if an error */ static int spu_ahash_rx_sg_create(struct brcm_message *mssg, struct iproc_reqctx_s *rctx, u8 rx_frag_num, unsigned int digestsize, u32 stat_pad_len) { struct spu_hw *spu = &iproc_priv.spu; struct scatterlist *sg; /* used to build sgs in mbox message */ struct iproc_ctx_s *ctx = rctx->ctx; mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), rctx->gfp); if (!mssg->spu.dst) return -ENOMEM; sg = mssg->spu.dst; sg_init_table(sg, rx_frag_num); /* Space for SPU message header */ sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len); /* Space for digest */ sg_set_buf(sg++, rctx->msg_buf.digest, digestsize); if (stat_pad_len) sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len); memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN); sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len()); return 0; } /** * spu_ahash_tx_sg_create() - Build up the scatterlist of buffers used to send * a SPU request message for an ahash request. Includes SPU message headers and * the request data. * @mssg: mailbox message containing the transmit sg * @rctx: crypto request context * @tx_frag_num: number of scatterlist elements required to construct the * SPU request message * @spu_hdr_len: length in bytes of SPU message header * @hash_carry_len: Number of bytes of data carried over from previous req * @new_data_len: Number of bytes of new request data * @pad_len: Number of pad bytes * * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() * when the request completes, whether the request is handled successfully or * there is an error. * * Return: * 0 if successful * < 0 if an error */ static int spu_ahash_tx_sg_create(struct brcm_message *mssg, struct iproc_reqctx_s *rctx, u8 tx_frag_num, u32 spu_hdr_len, unsigned int hash_carry_len, unsigned int new_data_len, u32 pad_len) { struct spu_hw *spu = &iproc_priv.spu; struct scatterlist *sg; /* used to build sgs in mbox message */ u32 datalen; /* Number of bytes of response data expected */ u32 stat_len; mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), rctx->gfp); if (!mssg->spu.src) return -ENOMEM; sg = mssg->spu.src; sg_init_table(sg, tx_frag_num); sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr, BCM_HDR_LEN + spu_hdr_len); if (hash_carry_len) sg_set_buf(sg++, rctx->hash_carry, hash_carry_len); if (new_data_len) { /* Copy in each src sg entry from request, up to chunksize */ datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip, rctx->src_nents, new_data_len); if (datalen < new_data_len) { pr_err("%s(): failed to copy src sg to mbox msg", __func__); return -EFAULT; } } if (pad_len) sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len); stat_len = spu->spu_tx_status_len(); if (stat_len) { memset(rctx->msg_buf.tx_stat, 0, stat_len); sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len); } return 0; } /** * handle_ahash_req() - Process an asynchronous hash request from the crypto * API. * @rctx: Crypto request context * * Builds a SPU request message embedded in a mailbox message and submits the * mailbox message on a selected mailbox channel. The SPU request message is * constructed as a scatterlist, including entries from the crypto API's * src scatterlist to avoid copying the data to be hashed. This function is * called either on the thread from the crypto API, or, in the case that the * crypto API request is too large to fit in a single SPU request message, * on the thread that invokes the receive callback with a response message. * Because some operations require the response from one chunk before the next * chunk can be submitted, we always wait for the response for the previous * chunk before submitting the next chunk. Because requests are submitted in * lock step like this, there is no need to synchronize access to request data * structures. * * Return: * -EINPROGRESS: request has been submitted to SPU and response will be * returned asynchronously * -EAGAIN: non-final request included a small amount of data, which for * efficiency we did not submit to the SPU, but instead stored * to be submitted to the SPU with the next part of the request * other: an error code */ static int handle_ahash_req(struct iproc_reqctx_s *rctx) { struct spu_hw *spu = &iproc_priv.spu; struct crypto_async_request *areq = rctx->parent; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct crypto_tfm *tfm = crypto_ahash_tfm(ahash); unsigned int blocksize = crypto_tfm_alg_blocksize(tfm); struct iproc_ctx_s *ctx = rctx->ctx; /* number of bytes still to be hashed in this req */ unsigned int nbytes_to_hash = 0; int err; unsigned int chunksize = 0; /* length of hash carry + new data */ /* * length of new data, not from hash carry, to be submitted in * this hw request */ unsigned int new_data_len; unsigned int __maybe_unused chunk_start = 0; u32 db_size; /* Length of data field, incl gcm and hash padding */ int pad_len = 0; /* total pad len, including gcm, hash, stat padding */ u32 data_pad_len = 0; /* length of GCM/CCM padding */ u32 stat_pad_len = 0; /* length of padding to align STATUS word */ struct brcm_message *mssg; /* mailbox message */ struct spu_request_opts req_opts; struct spu_cipher_parms cipher_parms; struct spu_hash_parms hash_parms; struct spu_aead_parms aead_parms; unsigned int local_nbuf; u32 spu_hdr_len; unsigned int digestsize; u16 rem = 0; /* * number of entries in src and dst sg. Always includes SPU msg header. * rx always includes a buffer to catch digest and STATUS. */ u8 rx_frag_num = 3; u8 tx_frag_num = 1; flow_log("total_todo %u, total_sent %u\n", rctx->total_todo, rctx->total_sent); memset(&req_opts, 0, sizeof(req_opts)); memset(&cipher_parms, 0, sizeof(cipher_parms)); memset(&hash_parms, 0, sizeof(hash_parms)); memset(&aead_parms, 0, sizeof(aead_parms)); req_opts.bd_suppress = true; hash_parms.alg = ctx->auth.alg; hash_parms.mode = ctx->auth.mode; hash_parms.type = HASH_TYPE_NONE; hash_parms.key_buf = (u8 *)ctx->authkey; hash_parms.key_len = ctx->authkeylen; /* * For hash algorithms below assignment looks bit odd but * it's needed for AES-XCBC and AES-CMAC hash algorithms * to differentiate between 128, 192, 256 bit key values. * Based on the key values, hash algorithm is selected. * For example for 128 bit key, hash algorithm is AES-128. */ cipher_parms.type = ctx->cipher_type; mssg = &rctx->mb_mssg; chunk_start = rctx->src_sent; /* * Compute the amount remaining to hash. This may include data * carried over from previous requests. */ nbytes_to_hash = rctx->total_todo - rctx->total_sent; chunksize = nbytes_to_hash; if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && (chunksize > ctx->max_payload)) chunksize = ctx->max_payload; /* * If this is not a final request and the request data is not a multiple * of a full block, then simply park the extra data and prefix it to the * data for the next request. */ if (!rctx->is_final) { u8 *dest = rctx->hash_carry + rctx->hash_carry_len; u16 new_len; /* len of data to add to hash carry */ rem = chunksize % blocksize; /* remainder */ if (rem) { /* chunksize not a multiple of blocksize */ chunksize -= rem; if (chunksize == 0) { /* Don't have a full block to submit to hw */ new_len = rem - rctx->hash_carry_len; sg_copy_part_to_buf(req->src, dest, new_len, rctx->src_sent); rctx->hash_carry_len = rem; flow_log("Exiting with hash carry len: %u\n", rctx->hash_carry_len); packet_dump(" buf: ", rctx->hash_carry, rctx->hash_carry_len); return -EAGAIN; } } } /* if we have hash carry, then prefix it to the data in this request */ local_nbuf = rctx->hash_carry_len; rctx->hash_carry_len = 0; if (local_nbuf) tx_frag_num++; new_data_len = chunksize - local_nbuf; /* Count number of sg entries to be used in this request */ rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, new_data_len); /* AES hashing keeps key size in type field, so need to copy it here */ if (hash_parms.alg == HASH_ALG_AES) hash_parms.type = (enum hash_type)cipher_parms.type; else hash_parms.type = spu->spu_hash_type(rctx->total_sent); digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg, hash_parms.type); hash_parms.digestsize = digestsize; /* update the indexes */ rctx->total_sent += chunksize; /* if you sent a prebuf then that wasn't from this req->src */ rctx->src_sent += new_data_len; if ((rctx->total_sent == rctx->total_todo) && rctx->is_final) hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg, hash_parms.mode, chunksize, blocksize); /* * If a non-first chunk, then include the digest returned from the * previous chunk so that hw can add to it (except for AES types). */ if ((hash_parms.type == HASH_TYPE_UPDT) && (hash_parms.alg != HASH_ALG_AES)) { hash_parms.key_buf = rctx->incr_hash; hash_parms.key_len = digestsize; } atomic64_add(chunksize, &iproc_priv.bytes_out); flow_log("%s() final: %u nbuf: %u ", __func__, rctx->is_final, local_nbuf); if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) flow_log("max_payload infinite\n"); else flow_log("max_payload %u\n", ctx->max_payload); flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize); /* Prepend SPU header with type 3 BCM header */ memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN); hash_parms.prebuf_len = local_nbuf; spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, &req_opts, &cipher_parms, &hash_parms, &aead_parms, new_data_len); if (spu_hdr_len == 0) { pr_err("Failed to create SPU request header\n"); return -EFAULT; } /* * Determine total length of padding required. Put all padding in one * buffer. */ data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize); db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len, 0, 0, hash_parms.pad_len); if (spu->spu_tx_status_len()) stat_pad_len = spu->spu_wordalign_padlen(db_size); if (stat_pad_len) rx_frag_num++; pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len; if (pad_len) { tx_frag_num++; spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len, hash_parms.pad_len, ctx->auth.alg, ctx->auth.mode, rctx->total_sent, stat_pad_len); } spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, spu_hdr_len); packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf); flow_log("Data:\n"); dump_sg(rctx->src_sg, rctx->src_skip, new_data_len); packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len); /* * Build mailbox message containing SPU request msg and rx buffers * to catch response message */ memset(mssg, 0, sizeof(*mssg)); mssg->type = BRCM_MESSAGE_SPU; mssg->ctx = rctx; /* Will be returned in response */ /* Create rx scatterlist to catch result */ err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize, stat_pad_len); if (err) return err; /* Create tx scatterlist containing SPU request message */ tx_frag_num += rctx->src_nents; if (spu->spu_tx_status_len()) tx_frag_num++; err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len, local_nbuf, new_data_len, pad_len); if (err) return err; err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); if (unlikely(err < 0)) return err; return -EINPROGRESS; } /** * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash * for an HMAC request. * @req: The HMAC request from the crypto API * @ctx: The session context * * Return: 0 if synchronous hash operation successful * -EINVAL if the hash algo is unrecognized * any other value indicates an error */ static int spu_hmac_outer_hash(struct ahash_request *req, struct iproc_ctx_s *ctx) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); unsigned int blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); int rc; switch (ctx->auth.alg) { case HASH_ALG_MD5: rc = do_shash("md5", req->result, ctx->opad, blocksize, req->result, ctx->digestsize, NULL, 0); break; case HASH_ALG_SHA1: rc = do_shash("sha1", req->result, ctx->opad, blocksize, req->result, ctx->digestsize, NULL, 0); break; case HASH_ALG_SHA224: rc = do_shash("sha224", req->result, ctx->opad, blocksize, req->result, ctx->digestsize, NULL, 0); break; case HASH_ALG_SHA256: rc = do_shash("sha256", req->result, ctx->opad, blocksize, req->result, ctx->digestsize, NULL, 0); break; case HASH_ALG_SHA384: rc = do_shash("sha384", req->result, ctx->opad, blocksize, req->result, ctx->digestsize, NULL, 0); break; case HASH_ALG_SHA512: rc = do_shash("sha512", req->result, ctx->opad, blocksize, req->result, ctx->digestsize, NULL, 0); break; default: pr_err("%s() Error : unknown hmac type\n", __func__); rc = -EINVAL; } return rc; } /** * ahash_req_done() - Process a hash result from the SPU hardware. * @rctx: Crypto request context * * Return: 0 if successful * < 0 if an error */ static int ahash_req_done(struct iproc_reqctx_s *rctx) { struct spu_hw *spu = &iproc_priv.spu; struct crypto_async_request *areq = rctx->parent; struct ahash_request *req = ahash_request_cast(areq); struct iproc_ctx_s *ctx = rctx->ctx; int err; memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize); if (spu->spu_type == SPU_TYPE_SPUM) { /* byte swap the output from the UPDT function to network byte * order */ if (ctx->auth.alg == HASH_ALG_MD5) { __swab32s((u32 *)req->result); __swab32s(((u32 *)req->result) + 1); __swab32s(((u32 *)req->result) + 2); __swab32s(((u32 *)req->result) + 3); __swab32s(((u32 *)req->result) + 4); } } flow_dump(" digest ", req->result, ctx->digestsize); /* if this an HMAC then do the outer hash */ if (rctx->is_sw_hmac) { err = spu_hmac_outer_hash(req, ctx); if (err < 0) return err; flow_dump(" hmac: ", req->result, ctx->digestsize); } if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) { atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]); atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]); } else { atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]); atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]); } return 0; } /** * handle_ahash_resp() - Process a SPU response message for a hash request. * Checks if the entire crypto API request has been processed, and if so, * invokes post processing on the result. * @rctx: Crypto request context */ static void handle_ahash_resp(struct iproc_reqctx_s *rctx) { struct iproc_ctx_s *ctx = rctx->ctx; struct crypto_async_request *areq = rctx->parent; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); unsigned int blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); /* * Save hash to use as input to next op if incremental. Might be copying * too much, but that's easier than figuring out actual digest size here */ memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE); flow_log("%s() blocksize:%u digestsize:%u\n", __func__, blocksize, ctx->digestsize); atomic64_add(ctx->digestsize, &iproc_priv.bytes_in); if (rctx->is_final && (rctx->total_sent == rctx->total_todo)) ahash_req_done(rctx); } /** * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive * a SPU response message for an AEAD request. Includes buffers to catch SPU * message headers and the response data. * @mssg: mailbox message containing the receive sg * @req: Crypto API request * @rctx: crypto request context * @rx_frag_num: number of scatterlist elements required to hold the * SPU response message * @assoc_len: Length of associated data included in the crypto request * @ret_iv_len: Length of IV returned in response * @resp_len: Number of bytes of response data expected to be written to * dst buffer from crypto API * @digestsize: Length of hash digest, in bytes * @stat_pad_len: Number of bytes required to pad the STAT field to * a 4-byte boundary * * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() * when the request completes, whether the request is handled successfully or * there is an error. * * Returns: * 0 if successful * < 0 if an error */ static int spu_aead_rx_sg_create(struct brcm_message *mssg, struct aead_request *req, struct iproc_reqctx_s *rctx, u8 rx_frag_num, unsigned int assoc_len, u32 ret_iv_len, unsigned int resp_len, unsigned int digestsize, u32 stat_pad_len) { struct spu_hw *spu = &iproc_priv.spu; struct scatterlist *sg; /* used to build sgs in mbox message */ struct iproc_ctx_s *ctx = rctx->ctx; u32 datalen; /* Number of bytes of response data expected */ u32 assoc_buf_len; u8 data_padlen = 0; if (ctx->is_rfc4543) { /* RFC4543: only pad after data, not after AAD */ data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, assoc_len + resp_len); assoc_buf_len = assoc_len; } else { data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, resp_len); assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode, assoc_len, ret_iv_len, rctx->is_encrypt); } if (ctx->cipher.mode == CIPHER_MODE_CCM) /* ICV (after data) must be in the next 32-bit word for CCM */ data_padlen += spu->spu_wordalign_padlen(assoc_buf_len + resp_len + data_padlen); if (data_padlen) /* have to catch gcm pad in separate buffer */ rx_frag_num++; mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), rctx->gfp); if (!mssg->spu.dst) return -ENOMEM; sg = mssg->spu.dst; sg_init_table(sg, rx_frag_num); /* Space for SPU message header */ sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len); if (assoc_buf_len) { /* * Don't write directly to req->dst, because SPU may pad the * assoc data in the response */ memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len); sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len); } if (resp_len) { /* * Copy in each dst sg entry from request, up to chunksize. * dst sg catches just the data. digest caught in separate buf. */ datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip, rctx->dst_nents, resp_len); if (datalen < (resp_len)) { pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u", __func__, resp_len, datalen); return -EFAULT; } } /* If GCM/CCM data is padded, catch padding in separate buffer */ if (data_padlen) { memset(rctx->msg_buf.a.gcmpad, 0, data_padlen); sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen); } /* Always catch ICV in separate buffer */ sg_set_buf(sg++, rctx->msg_buf.digest, digestsize); flow_log("stat_pad_len %u\n", stat_pad_len); if (stat_pad_len) { memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len); sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len); } memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN); sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len()); return 0; } /** * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a * SPU request message for an AEAD request. Includes SPU message headers and the * request data. * @mssg: mailbox message containing the transmit sg * @rctx: crypto request context * @tx_frag_num: number of scatterlist elements required to construct the * SPU request message * @spu_hdr_len: length of SPU message header in bytes * @assoc: crypto API associated data scatterlist * @assoc_len: length of associated data * @assoc_nents: number of scatterlist entries containing assoc data * @aead_iv_len: length of AEAD IV, if included * @chunksize: Number of bytes of request data * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM. * @pad_len: Number of pad bytes * @incl_icv: If true, write separate ICV buffer after data and * any padding * * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() * when the request completes, whether the request is handled successfully or * there is an error. * * Return: * 0 if successful * < 0 if an error */ static int spu_aead_tx_sg_create(struct brcm_message *mssg, struct iproc_reqctx_s *rctx, u8 tx_frag_num, u32 spu_hdr_len, struct scatterlist *assoc, unsigned int assoc_len, int assoc_nents, unsigned int aead_iv_len, unsigned int chunksize, u32 aad_pad_len, u32 pad_len, bool incl_icv) { struct spu_hw *spu = &iproc_priv.spu; struct scatterlist *sg; /* used to build sgs in mbox message */ struct scatterlist *assoc_sg = assoc; struct iproc_ctx_s *ctx = rctx->ctx; u32 datalen; /* Number of bytes of data to write */ u32 written; /* Number of bytes of data written */ u32 assoc_offset = 0; u32 stat_len; mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), rctx->gfp); if (!mssg->spu.src) return -ENOMEM; sg = mssg->spu.src; sg_init_table(sg, tx_frag_num); sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr, BCM_HDR_LEN + spu_hdr_len); if (assoc_len) { /* Copy in each associated data sg entry from request */ written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset, assoc_nents, assoc_len); if (written < assoc_len) { pr_err("%s(): failed to copy assoc sg to mbox msg", __func__); return -EFAULT; } } if (aead_iv_len) sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len); if (aad_pad_len) { memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len); sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len); } datalen = chunksize; if ((chunksize > ctx->digestsize) && incl_icv) datalen -= ctx->digestsize; if (datalen) { /* For aead, a single msg should consume the entire src sg */ written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip, rctx->src_nents, datalen); if (written < datalen) { pr_err("%s(): failed to copy src sg to mbox msg", __func__); return -EFAULT; } } if (pad_len) { memset(rctx->msg_buf.spu_req_pad, 0, pad_len); sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len); } if (incl_icv) sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize); stat_len = spu->spu_tx_status_len(); if (stat_len) { memset(rctx->msg_buf.tx_stat, 0, stat_len); sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len); } return 0; } /** * handle_aead_req() - Submit a SPU request message for the next chunk of the * current AEAD request. * @rctx: Crypto request context * * Unlike other operation types, we assume the length of the request fits in * a single SPU request message. aead_enqueue() makes sure this is true. * Comments for other op types regarding threads applies here as well. * * Unlike incremental hash ops, where the spu returns the entire hash for * truncated algs like sha-224, the SPU returns just the truncated hash in * response to aead requests. So digestsize is always ctx->digestsize here. * * Return: -EINPROGRESS: crypto request has been accepted and result will be * returned asynchronously * Any other value indicates an error */ static int handle_aead_req(struct iproc_reqctx_s *rctx) { struct spu_hw *spu = &iproc_priv.spu; struct crypto_async_request *areq = rctx->parent; struct aead_request *req = container_of(areq, struct aead_request, base); struct iproc_ctx_s *ctx = rctx->ctx; int err; unsigned int chunksize; unsigned int resp_len; u32 spu_hdr_len; u32 db_size; u32 stat_pad_len; u32 pad_len; struct brcm_message *mssg; /* mailbox message */ struct spu_request_opts req_opts; struct spu_cipher_parms cipher_parms; struct spu_hash_parms hash_parms; struct spu_aead_parms aead_parms; int assoc_nents = 0; bool incl_icv = false; unsigned int digestsize = ctx->digestsize; /* number of entries in src and dst sg. Always includes SPU msg header. */ u8 rx_frag_num = 2; /* and STATUS */ u8 tx_frag_num = 1; /* doing the whole thing at once */ chunksize = rctx->total_todo; flow_log("%s: chunksize %u\n", __func__, chunksize); memset(&req_opts, 0, sizeof(req_opts)); memset(&hash_parms, 0, sizeof(hash_parms)); memset(&aead_parms, 0, sizeof(aead_parms)); req_opts.is_inbound = !(rctx->is_encrypt); req_opts.auth_first = ctx->auth_first; req_opts.is_aead = true; req_opts.is_esp = ctx->is_esp; cipher_parms.alg = ctx->cipher.alg; cipher_parms.mode = ctx->cipher.mode; cipher_parms.type = ctx->cipher_type; cipher_parms.key_buf = ctx->enckey; cipher_parms.key_len = ctx->enckeylen; cipher_parms.iv_buf = rctx->msg_buf.iv_ctr; cipher_parms.iv_len = rctx->iv_ctr_len; hash_parms.alg = ctx->auth.alg; hash_parms.mode = ctx->auth.mode; hash_parms.type = HASH_TYPE_NONE; hash_parms.key_buf = (u8 *)ctx->authkey; hash_parms.key_len = ctx->authkeylen; hash_parms.digestsize = digestsize; if ((ctx->auth.alg == HASH_ALG_SHA224) && (ctx->authkeylen < SHA224_DIGEST_SIZE)) hash_parms.key_len = SHA224_DIGEST_SIZE; aead_parms.assoc_size = req->assoclen; if (ctx->is_esp && !ctx->is_rfc4543) { /* * 8-byte IV is included assoc data in request. SPU2 * expects AAD to include just SPI and seqno. So * subtract off the IV len. */ aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE; if (rctx->is_encrypt) { aead_parms.return_iv = true; aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE; aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE; } } else { aead_parms.ret_iv_len = 0; } /* * Count number of sg entries from the crypto API request that are to * be included in this mailbox message. For dst sg, don't count space * for digest. Digest gets caught in a separate buffer and copied back * to dst sg when processing response. */ rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize); rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize); if (aead_parms.assoc_size) assoc_nents = spu_sg_count(rctx->assoc, 0, aead_parms.assoc_size); mssg = &rctx->mb_mssg; rctx->total_sent = chunksize; rctx->src_sent = chunksize; if (spu->spu_assoc_resp_len(ctx->cipher.mode, aead_parms.assoc_size, aead_parms.ret_iv_len, rctx->is_encrypt)) rx_frag_num++; aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode, rctx->iv_ctr_len); if (ctx->auth.alg == HASH_ALG_AES) hash_parms.type = (enum hash_type)ctx->cipher_type; /* General case AAD padding (CCM and RFC4543 special cases below) */ aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, aead_parms.assoc_size); /* General case data padding (CCM decrypt special case below) */ aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize); if (ctx->cipher.mode == CIPHER_MODE_CCM) { /* * for CCM, AAD len + 2 (rather than AAD len) needs to be * 128-bit aligned */ aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len( ctx->cipher.mode, aead_parms.assoc_size + 2); /* * And when decrypting CCM, need to pad without including * size of ICV which is tacked on to end of chunk */ if (!rctx->is_encrypt) aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize - digestsize); /* CCM also requires software to rewrite portions of IV: */ spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen, chunksize, rctx->is_encrypt, ctx->is_esp); } if (ctx->is_rfc4543) { /* * RFC4543: data is included in AAD, so don't pad after AAD * and pad data based on both AAD + data size */ aead_parms.aad_pad_len = 0; if (!rctx->is_encrypt) aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len( ctx->cipher.mode, aead_parms.assoc_size + chunksize - digestsize); else aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len( ctx->cipher.mode, aead_parms.assoc_size + chunksize); req_opts.is_rfc4543 = true; } if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) { incl_icv = true; tx_frag_num++; /* Copy ICV from end of src scatterlist to digest buf */ sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize, req->assoclen + rctx->total_sent - digestsize); } atomic64_add(chunksize, &iproc_priv.bytes_out); flow_log("%s()-sent chunksize:%u\n", __func__, chunksize); /* Prepend SPU header with type 3 BCM header */ memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN); spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, &req_opts, &cipher_parms, &hash_parms, &aead_parms, chunksize); /* Determine total length of padding. Put all padding in one buffer. */ db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0, chunksize, aead_parms.aad_pad_len, aead_parms.data_pad_len, 0); stat_pad_len = spu->spu_wordalign_padlen(db_size); if (stat_pad_len) rx_frag_num++; pad_len = aead_parms.data_pad_len + stat_pad_len; if (pad_len) { tx_frag_num++; spu->spu_request_pad(rctx->msg_buf.spu_req_pad, aead_parms.data_pad_len, 0, ctx->auth.alg, ctx->auth.mode, rctx->total_sent, stat_pad_len); } spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, spu_hdr_len); dump_sg(rctx->assoc, 0, aead_parms.assoc_size); packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len); packet_log("BD:\n"); dump_sg(rctx->src_sg, rctx->src_skip, chunksize); packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len); /* * Build mailbox message containing SPU request msg and rx buffers * to catch response message */ memset(mssg, 0, sizeof(*mssg)); mssg->type = BRCM_MESSAGE_SPU; mssg->ctx = rctx; /* Will be returned in response */ /* Create rx scatterlist to catch result */ rx_frag_num += rctx->dst_nents; resp_len = chunksize; /* * Always catch ICV in separate buffer. Have to for GCM/CCM because of * padding. Have to for SHA-224 and other truncated SHAs because SPU * sends entire digest back. */ rx_frag_num++; if (((ctx->cipher.mode == CIPHER_MODE_GCM) || (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) { /* * Input is ciphertxt plus ICV, but ICV not incl * in output. */ resp_len -= ctx->digestsize; if (resp_len == 0) /* no rx frags to catch output data */ rx_frag_num -= rctx->dst_nents; } err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num, aead_parms.assoc_size, aead_parms.ret_iv_len, resp_len, digestsize, stat_pad_len); if (err) return err; /* Create tx scatterlist containing SPU request message */ tx_frag_num += rctx->src_nents; tx_frag_num += assoc_nents; if (aead_parms.aad_pad_len) tx_frag_num++; if (aead_parms.iv_len) tx_frag_num++; if (spu->spu_tx_status_len()) tx_frag_num++; err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len, rctx->assoc, aead_parms.assoc_size, assoc_nents, aead_parms.iv_len, chunksize, aead_parms.aad_pad_len, pad_len, incl_icv); if (err) return err; err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); if (unlikely(err < 0)) return err; return -EINPROGRESS; } /** * handle_aead_resp() - Process a SPU response message for an AEAD request. * @rctx: Crypto request context */ static void handle_aead_resp(struct iproc_reqctx_s *rctx) { struct spu_hw *spu = &iproc_priv.spu; struct crypto_async_request *areq = rctx->parent; struct aead_request *req = container_of(areq, struct aead_request, base); struct iproc_ctx_s *ctx = rctx->ctx; u32 payload_len; unsigned int icv_offset; u32 result_len; /* See how much data was returned */ payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr); flow_log("payload_len %u\n", payload_len); /* only count payload */ atomic64_add(payload_len, &iproc_priv.bytes_in); if (req->assoclen) packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad, req->assoclen); /* * Copy the ICV back to the destination * buffer. In decrypt case, SPU gives us back the digest, but crypto * API doesn't expect ICV in dst buffer. */ result_len = req->cryptlen; if (rctx->is_encrypt) { icv_offset = req->assoclen + rctx->total_sent; packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize); flow_log("copying ICV to dst sg at offset %u\n", icv_offset); sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest, ctx->digestsize, icv_offset); result_len += ctx->digestsize; } packet_log("response data: "); dump_sg(req->dst, req->assoclen, result_len); atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]); if (ctx->cipher.alg == CIPHER_ALG_AES) { if (ctx->cipher.mode == CIPHER_MODE_CCM) atomic_inc(&iproc_priv.aead_cnt[AES_CCM]); else if (ctx->cipher.mode == CIPHER_MODE_GCM) atomic_inc(&iproc_priv.aead_cnt[AES_GCM]); else atomic_inc(&iproc_priv.aead_cnt[AUTHENC]); } else { atomic_inc(&iproc_priv.aead_cnt[AUTHENC]); } } /** * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request * @rctx: request context * * Mailbox scatterlists are allocated for each chunk. So free them after * processing each chunk. */ static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx) { /* mailbox message used to tx request */ struct brcm_message *mssg = &rctx->mb_mssg; kfree(mssg->spu.src); kfree(mssg->spu.dst); memset(mssg, 0, sizeof(struct brcm_message)); } /** * finish_req() - Used to invoke the complete callback from the requester when * a request has been handled asynchronously. * @rctx: Request context * @err: Indicates whether the request was successful or not * * Ensures that cleanup has been done for request */ static void finish_req(struct iproc_reqctx_s *rctx, int err) { struct crypto_async_request *areq = rctx->parent; flow_log("%s() err:%d\n\n", __func__, err); /* No harm done if already called */ spu_chunk_cleanup(rctx); if (areq) crypto_request_complete(areq, err); } /** * spu_rx_callback() - Callback from mailbox framework with a SPU response. * @cl: mailbox client structure for SPU driver * @msg: mailbox message containing SPU response */ static void spu_rx_callback(struct mbox_client *cl, void *msg) { struct spu_hw *spu = &iproc_priv.spu; struct brcm_message *mssg = msg; struct iproc_reqctx_s *rctx; int err; rctx = mssg->ctx; if (unlikely(!rctx)) { /* This is fatal */ pr_err("%s(): no request context", __func__); err = -EFAULT; goto cb_finish; } /* process the SPU status */ err = spu->spu_status_process(rctx->msg_buf.rx_stat); if (err != 0) { if (err == SPU_INVALID_ICV) atomic_inc(&iproc_priv.bad_icv); err = -EBADMSG; goto cb_finish; } /* Process the SPU response message */ switch (rctx->ctx->alg->type) { case CRYPTO_ALG_TYPE_SKCIPHER: handle_skcipher_resp(rctx); break; case CRYPTO_ALG_TYPE_AHASH: handle_ahash_resp(rctx); break; case CRYPTO_ALG_TYPE_AEAD: handle_aead_resp(rctx); break; default: err = -EINVAL; goto cb_finish; } /* * If this response does not complete the request, then send the next * request chunk. */ if (rctx->total_sent < rctx->total_todo) { /* Deallocate anything specific to previous chunk */ spu_chunk_cleanup(rctx); switch (rctx->ctx->alg->type) { case CRYPTO_ALG_TYPE_SKCIPHER: err = handle_skcipher_req(rctx); break; case CRYPTO_ALG_TYPE_AHASH: err = handle_ahash_req(rctx); if (err == -EAGAIN) /* * we saved data in hash carry, but tell crypto * API we successfully completed request. */ err = 0; break; case CRYPTO_ALG_TYPE_AEAD: err = handle_aead_req(rctx); break; default: err = -EINVAL; } if (err == -EINPROGRESS) /* Successfully submitted request for next chunk */ return; } cb_finish: finish_req(rctx, err); } /* ==================== Kernel Cryptographic API ==================== */ /** * skcipher_enqueue() - Handle skcipher encrypt or decrypt request. * @req: Crypto API request * @encrypt: true if encrypting; false if decrypting * * Return: -EINPROGRESS if request accepted and result will be returned * asynchronously * < 0 if an error */ static int skcipher_enqueue(struct skcipher_request *req, bool encrypt) { struct iproc_reqctx_s *rctx = skcipher_request_ctx(req); struct iproc_ctx_s *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req)); int err; flow_log("%s() enc:%u\n", __func__, encrypt); rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; rctx->parent = &req->base; rctx->is_encrypt = encrypt; rctx->bd_suppress = false; rctx->total_todo = req->cryptlen; rctx->src_sent = 0; rctx->total_sent = 0; rctx->total_received = 0; rctx->ctx = ctx; /* Initialize current position in src and dst scatterlists */ rctx->src_sg = req->src; rctx->src_nents = 0; rctx->src_skip = 0; rctx->dst_sg = req->dst; rctx->dst_nents = 0; rctx->dst_skip = 0; if (ctx->cipher.mode == CIPHER_MODE_CBC || ctx->cipher.mode == CIPHER_MODE_CTR || ctx->cipher.mode == CIPHER_MODE_OFB || ctx->cipher.mode == CIPHER_MODE_XTS || ctx->cipher.mode == CIPHER_MODE_GCM || ctx->cipher.mode == CIPHER_MODE_CCM) { rctx->iv_ctr_len = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(req)); memcpy(rctx->msg_buf.iv_ctr, req->iv, rctx->iv_ctr_len); } else { rctx->iv_ctr_len = 0; } /* Choose a SPU to process this request */ rctx->chan_idx = select_channel(); err = handle_skcipher_req(rctx); if (err != -EINPROGRESS) /* synchronous result */ spu_chunk_cleanup(rctx); return err; } static int des_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher); int err; err = verify_skcipher_des_key(cipher, key); if (err) return err; ctx->cipher_type = CIPHER_TYPE_DES; return 0; } static int threedes_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher); int err; err = verify_skcipher_des3_key(cipher, key); if (err) return err; ctx->cipher_type = CIPHER_TYPE_3DES; return 0; } static int aes_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher); if (ctx->cipher.mode == CIPHER_MODE_XTS) /* XTS includes two keys of equal length */ keylen = keylen / 2; switch (keylen) { case AES_KEYSIZE_128: ctx->cipher_type = CIPHER_TYPE_AES128; break; case AES_KEYSIZE_192: ctx->cipher_type = CIPHER_TYPE_AES192; break; case AES_KEYSIZE_256: ctx->cipher_type = CIPHER_TYPE_AES256; break; default: return -EINVAL; } WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && ((ctx->max_payload % AES_BLOCK_SIZE) != 0)); return 0; } static int skcipher_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct spu_hw *spu = &iproc_priv.spu; struct iproc_ctx_s *ctx = crypto_skcipher_ctx(cipher); struct spu_cipher_parms cipher_parms; u32 alloc_len = 0; int err; flow_log("skcipher_setkey() keylen: %d\n", keylen); flow_dump(" key: ", key, keylen); switch (ctx->cipher.alg) { case CIPHER_ALG_DES: err = des_setkey(cipher, key, keylen); break; case CIPHER_ALG_3DES: err = threedes_setkey(cipher, key, keylen); break; case CIPHER_ALG_AES: err = aes_setkey(cipher, key, keylen); break; default: pr_err("%s() Error: unknown cipher alg\n", __func__); err = -EINVAL; } if (err) return err; memcpy(ctx->enckey, key, keylen); ctx->enckeylen = keylen; /* SPU needs XTS keys in the reverse order the crypto API presents */ if ((ctx->cipher.alg == CIPHER_ALG_AES) && (ctx->cipher.mode == CIPHER_MODE_XTS)) { unsigned int xts_keylen = keylen / 2; memcpy(ctx->enckey, key + xts_keylen, xts_keylen); memcpy(ctx->enckey + xts_keylen, key, xts_keylen); } if (spu->spu_type == SPU_TYPE_SPUM) alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN; else if (spu->spu_type == SPU_TYPE_SPU2) alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN; memset(ctx->bcm_spu_req_hdr, 0, alloc_len); cipher_parms.iv_buf = NULL; cipher_parms.iv_len = crypto_skcipher_ivsize(cipher); flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len); cipher_parms.alg = ctx->cipher.alg; cipher_parms.mode = ctx->cipher.mode; cipher_parms.type = ctx->cipher_type; cipher_parms.key_buf = ctx->enckey; cipher_parms.key_len = ctx->enckeylen; /* Prepend SPU request message with BCM header */ memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN); ctx->spu_req_hdr_len = spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN, &cipher_parms); ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, ctx->enckeylen, false); atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]); return 0; } static int skcipher_encrypt(struct skcipher_request *req) { flow_log("skcipher_encrypt() nbytes:%u\n", req->cryptlen); return skcipher_enqueue(req, true); } static int skcipher_decrypt(struct skcipher_request *req) { flow_log("skcipher_decrypt() nbytes:%u\n", req->cryptlen); return skcipher_enqueue(req, false); } static int ahash_enqueue(struct ahash_request *req) { struct iproc_reqctx_s *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); int err; const char *alg_name; flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes); rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; rctx->parent = &req->base; rctx->ctx = ctx; rctx->bd_suppress = true; memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message)); /* Initialize position in src scatterlist */ rctx->src_sg = req->src; rctx->src_skip = 0; rctx->src_nents = 0; rctx->dst_sg = NULL; rctx->dst_skip = 0; rctx->dst_nents = 0; /* SPU2 hardware does not compute hash of zero length data */ if ((rctx->is_final == 1) && (rctx->total_todo == 0) && (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) { alg_name = crypto_ahash_alg_name(tfm); flow_log("Doing %sfinal %s zero-len hash request in software\n", rctx->is_final ? "" : "non-", alg_name); err = do_shash((unsigned char *)alg_name, req->result, NULL, 0, NULL, 0, ctx->authkey, ctx->authkeylen); if (err < 0) flow_log("Hash request failed with error %d\n", err); return err; } /* Choose a SPU to process this request */ rctx->chan_idx = select_channel(); err = handle_ahash_req(rctx); if (err != -EINPROGRESS) /* synchronous result */ spu_chunk_cleanup(rctx); if (err == -EAGAIN) /* * we saved data in hash carry, but tell crypto API * we successfully completed request. */ err = 0; return err; } static int __ahash_init(struct ahash_request *req) { struct spu_hw *spu = &iproc_priv.spu; struct iproc_reqctx_s *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); flow_log("%s()\n", __func__); /* Initialize the context */ rctx->hash_carry_len = 0; rctx->is_final = 0; rctx->total_todo = 0; rctx->src_sent = 0; rctx->total_sent = 0; rctx->total_received = 0; ctx->digestsize = crypto_ahash_digestsize(tfm); /* If we add a hash whose digest is larger, catch it here. */ WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE); rctx->is_sw_hmac = false; ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0, true); return 0; } /** * spu_no_incr_hash() - Determine whether incremental hashing is supported. * @ctx: Crypto session context * * SPU-2 does not support incremental hashing (we'll have to revisit and * condition based on chip revision or device tree entry if future versions do * support incremental hash) * * SPU-M also doesn't support incremental hashing of AES-XCBC * * Return: true if incremental hashing is not supported * false otherwise */ static bool spu_no_incr_hash(struct iproc_ctx_s *ctx) { struct spu_hw *spu = &iproc_priv.spu; if (spu->spu_type == SPU_TYPE_SPU2) return true; if ((ctx->auth.alg == HASH_ALG_AES) && (ctx->auth.mode == HASH_MODE_XCBC)) return true; /* Otherwise, incremental hashing is supported */ return false; } static int ahash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); const char *alg_name; struct crypto_shash *hash; int ret; gfp_t gfp; if (spu_no_incr_hash(ctx)) { /* * If we get an incremental hashing request and it's not * supported by the hardware, we need to handle it in software * by calling synchronous hash functions. */ alg_name = crypto_ahash_alg_name(tfm); hash = crypto_alloc_shash(alg_name, 0, 0); if (IS_ERR(hash)) { ret = PTR_ERR(hash); goto err; } gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; ctx->shash = kmalloc(sizeof(*ctx->shash) + crypto_shash_descsize(hash), gfp); if (!ctx->shash) { ret = -ENOMEM; goto err_hash; } ctx->shash->tfm = hash; /* Set the key using data we already have from setkey */ if (ctx->authkeylen > 0) { ret = crypto_shash_setkey(hash, ctx->authkey, ctx->authkeylen); if (ret) goto err_shash; } /* Initialize hash w/ this key and other params */ ret = crypto_shash_init(ctx->shash); if (ret) goto err_shash; } else { /* Otherwise call the internal function which uses SPU hw */ ret = __ahash_init(req); } return ret; err_shash: kfree(ctx->shash); err_hash: crypto_free_shash(hash); err: return ret; } static int __ahash_update(struct ahash_request *req) { struct iproc_reqctx_s *rctx = ahash_request_ctx(req); flow_log("ahash_update() nbytes:%u\n", req->nbytes); if (!req->nbytes) return 0; rctx->total_todo += req->nbytes; rctx->src_sent = 0; return ahash_enqueue(req); } static int ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); u8 *tmpbuf; int ret; int nents; gfp_t gfp; if (spu_no_incr_hash(ctx)) { /* * If we get an incremental hashing request and it's not * supported by the hardware, we need to handle it in software * by calling synchronous hash functions. */ if (req->src) nents = sg_nents(req->src); else return -EINVAL; /* Copy data from req scatterlist to tmp buffer */ gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; tmpbuf = kmalloc(req->nbytes, gfp); if (!tmpbuf) return -ENOMEM; if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) != req->nbytes) { kfree(tmpbuf); return -EINVAL; } /* Call synchronous update */ ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes); kfree(tmpbuf); } else { /* Otherwise call the internal function which uses SPU hw */ ret = __ahash_update(req); } return ret; } static int __ahash_final(struct ahash_request *req) { struct iproc_reqctx_s *rctx = ahash_request_ctx(req); flow_log("ahash_final() nbytes:%u\n", req->nbytes); rctx->is_final = 1; return ahash_enqueue(req); } static int ahash_final(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); int ret; if (spu_no_incr_hash(ctx)) { /* * If we get an incremental hashing request and it's not * supported by the hardware, we need to handle it in software * by calling synchronous hash functions. */ ret = crypto_shash_final(ctx->shash, req->result); /* Done with hash, can deallocate it now */ crypto_free_shash(ctx->shash->tfm); kfree(ctx->shash); } else { /* Otherwise call the internal function which uses SPU hw */ ret = __ahash_final(req); } return ret; } static int __ahash_finup(struct ahash_request *req) { struct iproc_reqctx_s *rctx = ahash_request_ctx(req); flow_log("ahash_finup() nbytes:%u\n", req->nbytes); rctx->total_todo += req->nbytes; rctx->src_sent = 0; rctx->is_final = 1; return ahash_enqueue(req); } static int ahash_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); u8 *tmpbuf; int ret; int nents; gfp_t gfp; if (spu_no_incr_hash(ctx)) { /* * If we get an incremental hashing request and it's not * supported by the hardware, we need to handle it in software * by calling synchronous hash functions. */ if (req->src) { nents = sg_nents(req->src); } else { ret = -EINVAL; goto ahash_finup_exit; } /* Copy data from req scatterlist to tmp buffer */ gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; tmpbuf = kmalloc(req->nbytes, gfp); if (!tmpbuf) { ret = -ENOMEM; goto ahash_finup_exit; } if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) != req->nbytes) { ret = -EINVAL; goto ahash_finup_free; } /* Call synchronous update */ ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes, req->result); } else { /* Otherwise call the internal function which uses SPU hw */ return __ahash_finup(req); } ahash_finup_free: kfree(tmpbuf); ahash_finup_exit: /* Done with hash, can deallocate it now */ crypto_free_shash(ctx->shash->tfm); kfree(ctx->shash); return ret; } static int ahash_digest(struct ahash_request *req) { int err; flow_log("ahash_digest() nbytes:%u\n", req->nbytes); /* whole thing at once */ err = __ahash_init(req); if (!err) err = __ahash_finup(req); return err; } static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash); flow_log("%s() ahash:%p key:%p keylen:%u\n", __func__, ahash, key, keylen); flow_dump(" key: ", key, keylen); if (ctx->auth.alg == HASH_ALG_AES) { switch (keylen) { case AES_KEYSIZE_128: ctx->cipher_type = CIPHER_TYPE_AES128; break; case AES_KEYSIZE_192: ctx->cipher_type = CIPHER_TYPE_AES192; break; case AES_KEYSIZE_256: ctx->cipher_type = CIPHER_TYPE_AES256; break; default: pr_err("%s() Error: Invalid key length\n", __func__); return -EINVAL; } } else { pr_err("%s() Error: unknown hash alg\n", __func__); return -EINVAL; } memcpy(ctx->authkey, key, keylen); ctx->authkeylen = keylen; return 0; } static int ahash_export(struct ahash_request *req, void *out) { const struct iproc_reqctx_s *rctx = ahash_request_ctx(req); struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out; spu_exp->total_todo = rctx->total_todo; spu_exp->total_sent = rctx->total_sent; spu_exp->is_sw_hmac = rctx->is_sw_hmac; memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry)); spu_exp->hash_carry_len = rctx->hash_carry_len; memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash)); return 0; } static int ahash_import(struct ahash_request *req, const void *in) { struct iproc_reqctx_s *rctx = ahash_request_ctx(req); struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in; rctx->total_todo = spu_exp->total_todo; rctx->total_sent = spu_exp->total_sent; rctx->is_sw_hmac = spu_exp->is_sw_hmac; memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry)); rctx->hash_carry_len = spu_exp->hash_carry_len; memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash)); return 0; } static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash); unsigned int blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); unsigned int digestsize = crypto_ahash_digestsize(ahash); unsigned int index; int rc; flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n", __func__, ahash, key, keylen, blocksize, digestsize); flow_dump(" key: ", key, keylen); if (keylen > blocksize) { switch (ctx->auth.alg) { case HASH_ALG_MD5: rc = do_shash("md5", ctx->authkey, key, keylen, NULL, 0, NULL, 0); break; case HASH_ALG_SHA1: rc = do_shash("sha1", ctx->authkey, key, keylen, NULL, 0, NULL, 0); break; case HASH_ALG_SHA224: rc = do_shash("sha224", ctx->authkey, key, keylen, NULL, 0, NULL, 0); break; case HASH_ALG_SHA256: rc = do_shash("sha256", ctx->authkey, key, keylen, NULL, 0, NULL, 0); break; case HASH_ALG_SHA384: rc = do_shash("sha384", ctx->authkey, key, keylen, NULL, 0, NULL, 0); break; case HASH_ALG_SHA512: rc = do_shash("sha512", ctx->authkey, key, keylen, NULL, 0, NULL, 0); break; case HASH_ALG_SHA3_224: rc = do_shash("sha3-224", ctx->authkey, key, keylen, NULL, 0, NULL, 0); break; case HASH_ALG_SHA3_256: rc = do_shash("sha3-256", ctx->authkey, key, keylen, NULL, 0, NULL, 0); break; case HASH_ALG_SHA3_384: rc = do_shash("sha3-384", ctx->authkey, key, keylen, NULL, 0, NULL, 0); break; case HASH_ALG_SHA3_512: rc = do_shash("sha3-512", ctx->authkey, key, keylen, NULL, 0, NULL, 0); break; default: pr_err("%s() Error: unknown hash alg\n", __func__); return -EINVAL; } if (rc < 0) { pr_err("%s() Error %d computing shash for %s\n", __func__, rc, hash_alg_name[ctx->auth.alg]); return rc; } ctx->authkeylen = digestsize; flow_log(" keylen > digestsize... hashed\n"); flow_dump(" newkey: ", ctx->authkey, ctx->authkeylen); } else { memcpy(ctx->authkey, key, keylen); ctx->authkeylen = keylen; } /* * Full HMAC operation in SPUM is not verified, * So keeping the generation of IPAD, OPAD and * outer hashing in software. */ if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) { memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen); memset(ctx->ipad + ctx->authkeylen, 0, blocksize - ctx->authkeylen); ctx->authkeylen = 0; unsafe_memcpy(ctx->opad, ctx->ipad, blocksize, "fortified memcpy causes -Wrestrict warning"); for (index = 0; index < blocksize; index++) { ctx->ipad[index] ^= HMAC_IPAD_VALUE; ctx->opad[index] ^= HMAC_OPAD_VALUE; } flow_dump(" ipad: ", ctx->ipad, blocksize); flow_dump(" opad: ", ctx->opad, blocksize); } ctx->digestsize = digestsize; atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]); return 0; } static int ahash_hmac_init(struct ahash_request *req) { struct iproc_reqctx_s *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); unsigned int blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); flow_log("ahash_hmac_init()\n"); /* init the context as a hash */ ahash_init(req); if (!spu_no_incr_hash(ctx)) { /* SPU-M can do incr hashing but needs sw for outer HMAC */ rctx->is_sw_hmac = true; ctx->auth.mode = HASH_MODE_HASH; /* start with a prepended ipad */ memcpy(rctx->hash_carry, ctx->ipad, blocksize); rctx->hash_carry_len = blocksize; rctx->total_todo += blocksize; } return 0; } static int ahash_hmac_update(struct ahash_request *req) { flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes); if (!req->nbytes) return 0; return ahash_update(req); } static int ahash_hmac_final(struct ahash_request *req) { flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes); return ahash_final(req); } static int ahash_hmac_finup(struct ahash_request *req) { flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes); return ahash_finup(req); } static int ahash_hmac_digest(struct ahash_request *req) { struct iproc_reqctx_s *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); unsigned int blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes); /* Perform initialization and then call finup */ __ahash_init(req); if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) { /* * SPU2 supports full HMAC implementation in the * hardware, need not to generate IPAD, OPAD and * outer hash in software. * Only for hash key len > hash block size, SPU2 * expects to perform hashing on the key, shorten * it to digest size and feed it as hash key. */ rctx->is_sw_hmac = false; ctx->auth.mode = HASH_MODE_HMAC; } else { rctx->is_sw_hmac = true; ctx->auth.mode = HASH_MODE_HASH; /* start with a prepended ipad */ memcpy(rctx->hash_carry, ctx->ipad, blocksize); rctx->hash_carry_len = blocksize; rctx->total_todo += blocksize; } return __ahash_finup(req); } /* aead helpers */ static int aead_need_fallback(struct aead_request *req) { struct iproc_reqctx_s *rctx = aead_request_ctx(req); struct spu_hw *spu = &iproc_priv.spu; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct iproc_ctx_s *ctx = crypto_aead_ctx(aead); u32 payload_len; /* * SPU hardware cannot handle the AES-GCM/CCM case where plaintext * and AAD are both 0 bytes long. So use fallback in this case. */ if (((ctx->cipher.mode == CIPHER_MODE_GCM) || (ctx->cipher.mode == CIPHER_MODE_CCM)) && (req->assoclen == 0)) { if ((rctx->is_encrypt && (req->cryptlen == 0)) || (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) { flow_log("AES GCM/CCM needs fallback for 0 len req\n"); return 1; } } /* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */ if ((ctx->cipher.mode == CIPHER_MODE_CCM) && (spu->spu_type == SPU_TYPE_SPUM) && (ctx->digestsize != 8) && (ctx->digestsize != 12) && (ctx->digestsize != 16)) { flow_log("%s() AES CCM needs fallback for digest size %d\n", __func__, ctx->digestsize); return 1; } /* * SPU-M on NSP has an issue where AES-CCM hash is not correct * when AAD size is 0 */ if ((ctx->cipher.mode == CIPHER_MODE_CCM) && (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) && (req->assoclen == 0)) { flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n", __func__); return 1; } /* * RFC4106 and RFC4543 cannot handle the case where AAD is other than * 16 or 20 bytes long. So use fallback in this case. */ if (ctx->cipher.mode == CIPHER_MODE_GCM && ctx->cipher.alg == CIPHER_ALG_AES && rctx->iv_ctr_len == GCM_RFC4106_IV_SIZE && req->assoclen != 16 && req->assoclen != 20) { flow_log("RFC4106/RFC4543 needs fallback for assoclen" " other than 16 or 20 bytes\n"); return 1; } payload_len = req->cryptlen; if (spu->spu_type == SPU_TYPE_SPUM) payload_len += req->assoclen; flow_log("%s() payload len: %u\n", __func__, payload_len); if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) return 0; else return payload_len > ctx->max_payload; } static int aead_do_fallback(struct aead_request *req, bool is_encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct iproc_reqctx_s *rctx = aead_request_ctx(req); struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); struct aead_request *subreq; flow_log("%s() enc:%u\n", __func__, is_encrypt); if (!ctx->fallback_cipher) return -EINVAL; subreq = &rctx->req; aead_request_set_tfm(subreq, ctx->fallback_cipher); aead_request_set_callback(subreq, aead_request_flags(req), req->base.complete, req->base.data); aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(subreq, req->assoclen); return is_encrypt ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req); } static int aead_enqueue(struct aead_request *req, bool is_encrypt) { struct iproc_reqctx_s *rctx = aead_request_ctx(req); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct iproc_ctx_s *ctx = crypto_aead_ctx(aead); int err; flow_log("%s() enc:%u\n", __func__, is_encrypt); if (req->assoclen > MAX_ASSOC_SIZE) { pr_err ("%s() Error: associated data too long. (%u > %u bytes)\n", __func__, req->assoclen, MAX_ASSOC_SIZE); return -EINVAL; } rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; rctx->parent = &req->base; rctx->is_encrypt = is_encrypt; rctx->bd_suppress = false; rctx->total_todo = req->cryptlen; rctx->src_sent = 0; rctx->total_sent = 0; rctx->total_received = 0; rctx->is_sw_hmac = false; rctx->ctx = ctx; memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message)); /* assoc data is at start of src sg */ rctx->assoc = req->src; /* * Init current position in src scatterlist to be after assoc data. * src_skip set to buffer offset where data begins. (Assoc data could * end in the middle of a buffer.) */ if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg, &rctx->src_skip) < 0) { pr_err("%s() Error: Unable to find start of src data\n", __func__); return -EINVAL; } rctx->src_nents = 0; rctx->dst_nents = 0; if (req->dst == req->src) { rctx->dst_sg = rctx->src_sg; rctx->dst_skip = rctx->src_skip; } else { /* * Expect req->dst to have room for assoc data followed by * output data and ICV, if encrypt. So initialize dst_sg * to point beyond assoc len offset. */ if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg, &rctx->dst_skip) < 0) { pr_err("%s() Error: Unable to find start of dst data\n", __func__); return -EINVAL; } } if (ctx->cipher.mode == CIPHER_MODE_CBC || ctx->cipher.mode == CIPHER_MODE_CTR || ctx->cipher.mode == CIPHER_MODE_OFB || ctx->cipher.mode == CIPHER_MODE_XTS || ctx->cipher.mode == CIPHER_MODE_GCM) { rctx->iv_ctr_len = ctx->salt_len + crypto_aead_ivsize(crypto_aead_reqtfm(req)); } else if (ctx->cipher.mode == CIPHER_MODE_CCM) { rctx->iv_ctr_len = CCM_AES_IV_SIZE; } else { rctx->iv_ctr_len = 0; } rctx->hash_carry_len = 0; flow_log(" src sg: %p\n", req->src); flow_log(" rctx->src_sg: %p, src_skip %u\n", rctx->src_sg, rctx->src_skip); flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen); flow_log(" dst sg: %p\n", req->dst); flow_log(" rctx->dst_sg: %p, dst_skip %u\n", rctx->dst_sg, rctx->dst_skip); flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len); flow_dump(" iv: ", req->iv, rctx->iv_ctr_len); flow_log(" authkeylen:%u\n", ctx->authkeylen); flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no"); if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) flow_log(" max_payload infinite"); else flow_log(" max_payload: %u\n", ctx->max_payload); if (unlikely(aead_need_fallback(req))) return aead_do_fallback(req, is_encrypt); /* * Do memory allocations for request after fallback check, because if we * do fallback, we won't call finish_req() to dealloc. */ if (rctx->iv_ctr_len) { if (ctx->salt_len) memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset, ctx->salt, ctx->salt_len); memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len, req->iv, rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset); } rctx->chan_idx = select_channel(); err = handle_aead_req(rctx); if (err != -EINPROGRESS) /* synchronous result */ spu_chunk_cleanup(rctx); return err; } static int aead_authenc_setkey(struct crypto_aead *cipher, const u8 *key, unsigned int keylen) { struct spu_hw *spu = &iproc_priv.spu; struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); struct crypto_tfm *tfm = crypto_aead_tfm(cipher); struct crypto_authenc_keys keys; int ret; flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, keylen); flow_dump(" key: ", key, keylen); ret = crypto_authenc_extractkeys(&keys, key, keylen); if (ret) goto badkey; if (keys.enckeylen > MAX_KEY_SIZE || keys.authkeylen > MAX_KEY_SIZE) goto badkey; ctx->enckeylen = keys.enckeylen; ctx->authkeylen = keys.authkeylen; memcpy(ctx->enckey, keys.enckey, keys.enckeylen); /* May end up padding auth key. So make sure it's zeroed. */ memset(ctx->authkey, 0, sizeof(ctx->authkey)); memcpy(ctx->authkey, keys.authkey, keys.authkeylen); switch (ctx->alg->cipher_info.alg) { case CIPHER_ALG_DES: if (verify_aead_des_key(cipher, keys.enckey, keys.enckeylen)) return -EINVAL; ctx->cipher_type = CIPHER_TYPE_DES; break; case CIPHER_ALG_3DES: if (verify_aead_des3_key(cipher, keys.enckey, keys.enckeylen)) return -EINVAL; ctx->cipher_type = CIPHER_TYPE_3DES; break; case CIPHER_ALG_AES: switch (ctx->enckeylen) { case AES_KEYSIZE_128: ctx->cipher_type = CIPHER_TYPE_AES128; break; case AES_KEYSIZE_192: ctx->cipher_type = CIPHER_TYPE_AES192; break; case AES_KEYSIZE_256: ctx->cipher_type = CIPHER_TYPE_AES256; break; default: goto badkey; } break; default: pr_err("%s() Error: Unknown cipher alg\n", __func__); return -EINVAL; } flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, ctx->authkeylen); flow_dump(" enc: ", ctx->enckey, ctx->enckeylen); flow_dump(" auth: ", ctx->authkey, ctx->authkeylen); /* setkey the fallback just in case we needto use it */ if (ctx->fallback_cipher) { flow_log(" running fallback setkey()\n"); ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; ctx->fallback_cipher->base.crt_flags |= tfm->crt_flags & CRYPTO_TFM_REQ_MASK; ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen); if (ret) flow_log(" fallback setkey() returned:%d\n", ret); } ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, ctx->enckeylen, false); atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]); return ret; badkey: ctx->enckeylen = 0; ctx->authkeylen = 0; ctx->digestsize = 0; return -EINVAL; } static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, const u8 *key, unsigned int keylen) { struct spu_hw *spu = &iproc_priv.spu; struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); struct crypto_tfm *tfm = crypto_aead_tfm(cipher); int ret = 0; flow_log("%s() keylen:%u\n", __func__, keylen); flow_dump(" key: ", key, keylen); if (!ctx->is_esp) ctx->digestsize = keylen; ctx->enckeylen = keylen; ctx->authkeylen = 0; switch (ctx->enckeylen) { case AES_KEYSIZE_128: ctx->cipher_type = CIPHER_TYPE_AES128; break; case AES_KEYSIZE_192: ctx->cipher_type = CIPHER_TYPE_AES192; break; case AES_KEYSIZE_256: ctx->cipher_type = CIPHER_TYPE_AES256; break; default: goto badkey; } memcpy(ctx->enckey, key, ctx->enckeylen); flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, ctx->authkeylen); flow_dump(" enc: ", ctx->enckey, ctx->enckeylen); flow_dump(" auth: ", ctx->authkey, ctx->authkeylen); /* setkey the fallback just in case we need to use it */ if (ctx->fallback_cipher) { flow_log(" running fallback setkey()\n"); ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; ctx->fallback_cipher->base.crt_flags |= tfm->crt_flags & CRYPTO_TFM_REQ_MASK; ret = crypto_aead_setkey(ctx->fallback_cipher, key, keylen + ctx->salt_len); if (ret) flow_log(" fallback setkey() returned:%d\n", ret); } ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, ctx->enckeylen, false); atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]); flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, ctx->authkeylen); return ret; badkey: ctx->enckeylen = 0; ctx->authkeylen = 0; ctx->digestsize = 0; return -EINVAL; } /** * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES. * @cipher: AEAD structure * @key: Key followed by 4 bytes of salt * @keylen: Length of key plus salt, in bytes * * Extracts salt from key and stores it to be prepended to IV on each request. * Digest is always 16 bytes * * Return: Value from generic gcm setkey. */ static int aead_gcm_esp_setkey(struct crypto_aead *cipher, const u8 *key, unsigned int keylen) { struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); if (keylen < GCM_ESP_SALT_SIZE) return -EINVAL; ctx->salt_len = GCM_ESP_SALT_SIZE; ctx->salt_offset = GCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); keylen -= GCM_ESP_SALT_SIZE; ctx->digestsize = GCM_ESP_DIGESTSIZE; ctx->is_esp = true; flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE); return aead_gcm_ccm_setkey(cipher, key, keylen); } /** * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC. * @cipher: AEAD structure * @key: Key followed by 4 bytes of salt * @keylen: Length of key plus salt, in bytes * * Extracts salt from key and stores it to be prepended to IV on each request. * Digest is always 16 bytes * * Return: Value from generic gcm setkey. */ static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher, const u8 *key, unsigned int keylen) { struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); if (keylen < GCM_ESP_SALT_SIZE) return -EINVAL; ctx->salt_len = GCM_ESP_SALT_SIZE; ctx->salt_offset = GCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); keylen -= GCM_ESP_SALT_SIZE; ctx->digestsize = GCM_ESP_DIGESTSIZE; ctx->is_esp = true; ctx->is_rfc4543 = true; flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE); return aead_gcm_ccm_setkey(cipher, key, keylen); } /** * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES. * @cipher: AEAD structure * @key: Key followed by 4 bytes of salt * @keylen: Length of key plus salt, in bytes * * Extracts salt from key and stores it to be prepended to IV on each request. * Digest is always 16 bytes * * Return: Value from generic ccm setkey. */ static int aead_ccm_esp_setkey(struct crypto_aead *cipher, const u8 *key, unsigned int keylen) { struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); flow_log("%s\n", __func__); if (keylen < CCM_ESP_SALT_SIZE) return -EINVAL; ctx->salt_len = CCM_ESP_SALT_SIZE; ctx->salt_offset = CCM_ESP_SALT_OFFSET; memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE); keylen -= CCM_ESP_SALT_SIZE; ctx->is_esp = true; flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE); return aead_gcm_ccm_setkey(cipher, key, keylen); } static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize) { struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); int ret = 0; flow_log("%s() authkeylen:%u authsize:%u\n", __func__, ctx->authkeylen, authsize); ctx->digestsize = authsize; /* setkey the fallback just in case we needto use it */ if (ctx->fallback_cipher) { flow_log(" running fallback setauth()\n"); ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize); if (ret) flow_log(" fallback setauth() returned:%d\n", ret); } return ret; } static int aead_encrypt(struct aead_request *req) { flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen, req->cryptlen); dump_sg(req->src, 0, req->cryptlen + req->assoclen); flow_log(" assoc_len:%u\n", req->assoclen); return aead_enqueue(req, true); } static int aead_decrypt(struct aead_request *req) { flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen); dump_sg(req->src, 0, req->cryptlen + req->assoclen); flow_log(" assoc_len:%u\n", req->assoclen); return aead_enqueue(req, false); } /* ==================== Supported Cipher Algorithms ==================== */ static struct iproc_alg_s driver_algs[] = { { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "gcm(aes)", .cra_driver_name = "gcm-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK }, .setkey = aead_gcm_ccm_setkey, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_GCM, }, .auth_info = { .alg = HASH_ALG_AES, .mode = HASH_MODE_GCM, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "ccm(aes)", .cra_driver_name = "ccm-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK }, .setkey = aead_gcm_ccm_setkey, .ivsize = CCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_CCM, }, .auth_info = { .alg = HASH_ALG_AES, .mode = HASH_MODE_CCM, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "gcm-aes-esp-iproc", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK }, .setkey = aead_gcm_esp_setkey, .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_GCM, }, .auth_info = { .alg = HASH_ALG_AES, .mode = HASH_MODE_GCM, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "rfc4309(ccm(aes))", .cra_driver_name = "ccm-aes-esp-iproc", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK }, .setkey = aead_ccm_esp_setkey, .ivsize = CCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_CCM, }, .auth_info = { .alg = HASH_ALG_AES, .mode = HASH_MODE_CCM, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "rfc4543(gcm(aes))", .cra_driver_name = "gmac-aes-esp-iproc", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK }, .setkey = rfc4543_gcm_esp_setkey, .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_GCM, }, .auth_info = { .alg = HASH_ALG_AES, .mode = HASH_MODE_GCM, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(md5),cbc(aes))", .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_MD5, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_SHA1, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_SHA256, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(md5),cbc(des))", .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_MD5, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha1),cbc(des))", .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_SHA1, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha224),cbc(des))", .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_SHA224, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha256),cbc(des))", .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_SHA256, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha384),cbc(des))", .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_SHA384, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha512),cbc(des))", .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc", .cra_blocksize = DES_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_SHA512, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_3DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_MD5, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_3DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_SHA1, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_3DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_SHA224, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_3DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_SHA256, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_3DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_SHA384, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, { .type = CRYPTO_ALG_TYPE_AEAD, .alg.aead = { .base = { .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc", .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY }, .setkey = aead_authenc_setkey, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_3DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_SHA512, .mode = HASH_MODE_HMAC, }, .auth_first = 0, }, /* SKCIPHER algorithms. */ { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ofb(des)", .base.cra_driver_name = "ofb-des-iproc", .base.cra_blocksize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_DES, .mode = CIPHER_MODE_OFB, }, .auth_info = { .alg = HASH_ALG_NONE, .mode = HASH_MODE_NONE, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "cbc(des)", .base.cra_driver_name = "cbc-des-iproc", .base.cra_blocksize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_NONE, .mode = HASH_MODE_NONE, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ecb(des)", .base.cra_driver_name = "ecb-des-iproc", .base.cra_blocksize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = 0, }, .cipher_info = { .alg = CIPHER_ALG_DES, .mode = CIPHER_MODE_ECB, }, .auth_info = { .alg = HASH_ALG_NONE, .mode = HASH_MODE_NONE, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ofb(des3_ede)", .base.cra_driver_name = "ofb-des3-iproc", .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_3DES, .mode = CIPHER_MODE_OFB, }, .auth_info = { .alg = HASH_ALG_NONE, .mode = HASH_MODE_NONE, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cbc-des3-iproc", .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_3DES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_NONE, .mode = HASH_MODE_NONE, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "ecb-des3-iproc", .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = 0, }, .cipher_info = { .alg = CIPHER_ALG_3DES, .mode = CIPHER_MODE_ECB, }, .auth_info = { .alg = HASH_ALG_NONE, .mode = HASH_MODE_NONE, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ofb(aes)", .base.cra_driver_name = "ofb-aes-iproc", .base.cra_blocksize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_OFB, }, .auth_info = { .alg = HASH_ALG_NONE, .mode = HASH_MODE_NONE, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cbc-aes-iproc", .base.cra_blocksize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_CBC, }, .auth_info = { .alg = HASH_ALG_NONE, .mode = HASH_MODE_NONE, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ecb(aes)", .base.cra_driver_name = "ecb-aes-iproc", .base.cra_blocksize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = 0, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_ECB, }, .auth_info = { .alg = HASH_ALG_NONE, .mode = HASH_MODE_NONE, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "ctr(aes)", .base.cra_driver_name = "ctr-aes-iproc", .base.cra_blocksize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_CTR, }, .auth_info = { .alg = HASH_ALG_NONE, .mode = HASH_MODE_NONE, }, }, { .type = CRYPTO_ALG_TYPE_SKCIPHER, .alg.skcipher = { .base.cra_name = "xts(aes)", .base.cra_driver_name = "xts-aes-iproc", .base.cra_blocksize = AES_BLOCK_SIZE, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_info = { .alg = CIPHER_ALG_AES, .mode = CIPHER_MODE_XTS, }, .auth_info = { .alg = HASH_ALG_NONE, .mode = HASH_MODE_NONE, }, }, /* AHASH algorithms. */ { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = MD5_DIGEST_SIZE, .halg.base = { .cra_name = "md5", .cra_driver_name = "md5-iproc", .cra_blocksize = MD5_BLOCK_WORDS * 4, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_MD5, .mode = HASH_MODE_HASH, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = MD5_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(md5)", .cra_driver_name = "hmac-md5-iproc", .cra_blocksize = MD5_BLOCK_WORDS * 4, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_MD5, .mode = HASH_MODE_HMAC, }, }, {.type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA1_DIGEST_SIZE, .halg.base = { .cra_name = "sha1", .cra_driver_name = "sha1-iproc", .cra_blocksize = SHA1_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA1, .mode = HASH_MODE_HASH, }, }, {.type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA1_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha1)", .cra_driver_name = "hmac-sha1-iproc", .cra_blocksize = SHA1_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA1, .mode = HASH_MODE_HMAC, }, }, {.type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA224_DIGEST_SIZE, .halg.base = { .cra_name = "sha224", .cra_driver_name = "sha224-iproc", .cra_blocksize = SHA224_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA224, .mode = HASH_MODE_HASH, }, }, {.type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA224_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha224)", .cra_driver_name = "hmac-sha224-iproc", .cra_blocksize = SHA224_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA224, .mode = HASH_MODE_HMAC, }, }, {.type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA256_DIGEST_SIZE, .halg.base = { .cra_name = "sha256", .cra_driver_name = "sha256-iproc", .cra_blocksize = SHA256_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA256, .mode = HASH_MODE_HASH, }, }, {.type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA256_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha256)", .cra_driver_name = "hmac-sha256-iproc", .cra_blocksize = SHA256_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA256, .mode = HASH_MODE_HMAC, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA384_DIGEST_SIZE, .halg.base = { .cra_name = "sha384", .cra_driver_name = "sha384-iproc", .cra_blocksize = SHA384_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA384, .mode = HASH_MODE_HASH, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA384_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha384)", .cra_driver_name = "hmac-sha384-iproc", .cra_blocksize = SHA384_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA384, .mode = HASH_MODE_HMAC, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA512_DIGEST_SIZE, .halg.base = { .cra_name = "sha512", .cra_driver_name = "sha512-iproc", .cra_blocksize = SHA512_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA512, .mode = HASH_MODE_HASH, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA512_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha512)", .cra_driver_name = "hmac-sha512-iproc", .cra_blocksize = SHA512_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA512, .mode = HASH_MODE_HMAC, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA3_224_DIGEST_SIZE, .halg.base = { .cra_name = "sha3-224", .cra_driver_name = "sha3-224-iproc", .cra_blocksize = SHA3_224_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA3_224, .mode = HASH_MODE_HASH, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA3_224_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha3-224)", .cra_driver_name = "hmac-sha3-224-iproc", .cra_blocksize = SHA3_224_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA3_224, .mode = HASH_MODE_HMAC }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA3_256_DIGEST_SIZE, .halg.base = { .cra_name = "sha3-256", .cra_driver_name = "sha3-256-iproc", .cra_blocksize = SHA3_256_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA3_256, .mode = HASH_MODE_HASH, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA3_256_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha3-256)", .cra_driver_name = "hmac-sha3-256-iproc", .cra_blocksize = SHA3_256_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA3_256, .mode = HASH_MODE_HMAC, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA3_384_DIGEST_SIZE, .halg.base = { .cra_name = "sha3-384", .cra_driver_name = "sha3-384-iproc", .cra_blocksize = SHA3_224_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA3_384, .mode = HASH_MODE_HASH, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA3_384_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha3-384)", .cra_driver_name = "hmac-sha3-384-iproc", .cra_blocksize = SHA3_384_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA3_384, .mode = HASH_MODE_HMAC, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA3_512_DIGEST_SIZE, .halg.base = { .cra_name = "sha3-512", .cra_driver_name = "sha3-512-iproc", .cra_blocksize = SHA3_512_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA3_512, .mode = HASH_MODE_HASH, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = SHA3_512_DIGEST_SIZE, .halg.base = { .cra_name = "hmac(sha3-512)", .cra_driver_name = "hmac-sha3-512-iproc", .cra_blocksize = SHA3_512_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_SHA3_512, .mode = HASH_MODE_HMAC, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = AES_BLOCK_SIZE, .halg.base = { .cra_name = "xcbc(aes)", .cra_driver_name = "xcbc-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_AES, .mode = HASH_MODE_XCBC, }, }, { .type = CRYPTO_ALG_TYPE_AHASH, .alg.hash = { .halg.digestsize = AES_BLOCK_SIZE, .halg.base = { .cra_name = "cmac(aes)", .cra_driver_name = "cmac-aes-iproc", .cra_blocksize = AES_BLOCK_SIZE, } }, .cipher_info = { .alg = CIPHER_ALG_NONE, .mode = CIPHER_MODE_NONE, }, .auth_info = { .alg = HASH_ALG_AES, .mode = HASH_MODE_CMAC, }, }, }; static int generic_cra_init(struct crypto_tfm *tfm, struct iproc_alg_s *cipher_alg) { struct spu_hw *spu = &iproc_priv.spu; struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); unsigned int blocksize = crypto_tfm_alg_blocksize(tfm); flow_log("%s()\n", __func__); ctx->alg = cipher_alg; ctx->cipher = cipher_alg->cipher_info; ctx->auth = cipher_alg->auth_info; ctx->auth_first = cipher_alg->auth_first; ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg, ctx->cipher.mode, blocksize); ctx->fallback_cipher = NULL; ctx->enckeylen = 0; ctx->authkeylen = 0; atomic_inc(&iproc_priv.stream_count); atomic_inc(&iproc_priv.session_count); return 0; } static int skcipher_init_tfm(struct crypto_skcipher *skcipher) { struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); struct iproc_alg_s *cipher_alg; flow_log("%s()\n", __func__); crypto_skcipher_set_reqsize(skcipher, sizeof(struct iproc_reqctx_s)); cipher_alg = container_of(alg, struct iproc_alg_s, alg.skcipher); return generic_cra_init(tfm, cipher_alg); } static int ahash_cra_init(struct crypto_tfm *tfm) { int err; struct crypto_alg *alg = tfm->__crt_alg; struct iproc_alg_s *cipher_alg; cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s, alg.hash); err = generic_cra_init(tfm, cipher_alg); flow_log("%s()\n", __func__); /* * export state size has to be < 512 bytes. So don't include msg bufs * in state size. */ crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct iproc_reqctx_s)); return err; } static int aead_cra_init(struct crypto_aead *aead) { unsigned int reqsize = sizeof(struct iproc_reqctx_s); struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); struct crypto_alg *alg = tfm->__crt_alg; struct aead_alg *aalg = container_of(alg, struct aead_alg, base); struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s, alg.aead); int err = generic_cra_init(tfm, cipher_alg); flow_log("%s()\n", __func__); ctx->is_esp = false; ctx->salt_len = 0; ctx->salt_offset = 0; /* random first IV */ get_random_bytes(ctx->iv, MAX_IV_SIZE); flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE); if (err) goto out; if (!(alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK)) goto reqsize; flow_log("%s() creating fallback cipher\n", __func__); ctx->fallback_cipher = crypto_alloc_aead(alg->cra_name, 0, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback_cipher)) { pr_err("%s() Error: failed to allocate fallback for %s\n", __func__, alg->cra_name); return PTR_ERR(ctx->fallback_cipher); } reqsize += crypto_aead_reqsize(ctx->fallback_cipher); reqsize: crypto_aead_set_reqsize(aead, reqsize); out: return err; } static void generic_cra_exit(struct crypto_tfm *tfm) { atomic_dec(&iproc_priv.session_count); } static void skcipher_exit_tfm(struct crypto_skcipher *tfm) { generic_cra_exit(crypto_skcipher_tfm(tfm)); } static void aead_cra_exit(struct crypto_aead *aead) { struct crypto_tfm *tfm = crypto_aead_tfm(aead); struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); generic_cra_exit(tfm); if (ctx->fallback_cipher) { crypto_free_aead(ctx->fallback_cipher); ctx->fallback_cipher = NULL; } } /** * spu_functions_register() - Specify hardware-specific SPU functions based on * SPU type read from device tree. * @dev: device structure * @spu_type: SPU hardware generation * @spu_subtype: SPU hardware version */ static void spu_functions_register(struct device *dev, enum spu_spu_type spu_type, enum spu_spu_subtype spu_subtype) { struct spu_hw *spu = &iproc_priv.spu; if (spu_type == SPU_TYPE_SPUM) { dev_dbg(dev, "Registering SPUM functions"); spu->spu_dump_msg_hdr = spum_dump_msg_hdr; spu->spu_payload_length = spum_payload_length; spu->spu_response_hdr_len = spum_response_hdr_len; spu->spu_hash_pad_len = spum_hash_pad_len; spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len; spu->spu_assoc_resp_len = spum_assoc_resp_len; spu->spu_aead_ivlen = spum_aead_ivlen; spu->spu_hash_type = spum_hash_type; spu->spu_digest_size = spum_digest_size; spu->spu_create_request = spum_create_request; spu->spu_cipher_req_init = spum_cipher_req_init; spu->spu_cipher_req_finish = spum_cipher_req_finish; spu->spu_request_pad = spum_request_pad; spu->spu_tx_status_len = spum_tx_status_len; spu->spu_rx_status_len = spum_rx_status_len; spu->spu_status_process = spum_status_process; spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload; spu->spu_ccm_update_iv = spum_ccm_update_iv; spu->spu_wordalign_padlen = spum_wordalign_padlen; if (spu_subtype == SPU_SUBTYPE_SPUM_NS2) spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload; else spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload; } else { dev_dbg(dev, "Registering SPU2 functions"); spu->spu_dump_msg_hdr = spu2_dump_msg_hdr; spu->spu_ctx_max_payload = spu2_ctx_max_payload; spu->spu_payload_length = spu2_payload_length; spu->spu_response_hdr_len = spu2_response_hdr_len; spu->spu_hash_pad_len = spu2_hash_pad_len; spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len; spu->spu_assoc_resp_len = spu2_assoc_resp_len; spu->spu_aead_ivlen = spu2_aead_ivlen; spu->spu_hash_type = spu2_hash_type; spu->spu_digest_size = spu2_digest_size; spu->spu_create_request = spu2_create_request; spu->spu_cipher_req_init = spu2_cipher_req_init; spu->spu_cipher_req_finish = spu2_cipher_req_finish; spu->spu_request_pad = spu2_request_pad; spu->spu_tx_status_len = spu2_tx_status_len; spu->spu_rx_status_len = spu2_rx_status_len; spu->spu_status_process = spu2_status_process; spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload; spu->spu_ccm_update_iv = spu2_ccm_update_iv; spu->spu_wordalign_padlen = spu2_wordalign_padlen; } } /** * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox * channel for the SPU being probed. * @dev: SPU driver device structure * * Return: 0 if successful * < 0 otherwise */ static int spu_mb_init(struct device *dev) { struct mbox_client *mcl = &iproc_priv.mcl; int err, i; iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan, sizeof(struct mbox_chan *), GFP_KERNEL); if (!iproc_priv.mbox) return -ENOMEM; mcl->dev = dev; mcl->tx_block = false; mcl->tx_tout = 0; mcl->knows_txdone = true; mcl->rx_callback = spu_rx_callback; mcl->tx_done = NULL; for (i = 0; i < iproc_priv.spu.num_chan; i++) { iproc_priv.mbox[i] = mbox_request_channel(mcl, i); if (IS_ERR(iproc_priv.mbox[i])) { err = PTR_ERR(iproc_priv.mbox[i]); dev_err(dev, "Mbox channel %d request failed with err %d", i, err); iproc_priv.mbox[i] = NULL; goto free_channels; } } return 0; free_channels: for (i = 0; i < iproc_priv.spu.num_chan; i++) { if (iproc_priv.mbox[i]) mbox_free_channel(iproc_priv.mbox[i]); } return err; } static void spu_mb_release(struct platform_device *pdev) { int i; for (i = 0; i < iproc_priv.spu.num_chan; i++) mbox_free_channel(iproc_priv.mbox[i]); } static void spu_counters_init(void) { int i; int j; atomic_set(&iproc_priv.session_count, 0); atomic_set(&iproc_priv.stream_count, 0); atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan); atomic64_set(&iproc_priv.bytes_in, 0); atomic64_set(&iproc_priv.bytes_out, 0); for (i = 0; i < SPU_OP_NUM; i++) { atomic_set(&iproc_priv.op_counts[i], 0); atomic_set(&iproc_priv.setkey_cnt[i], 0); } for (i = 0; i < CIPHER_ALG_LAST; i++) for (j = 0; j < CIPHER_MODE_LAST; j++) atomic_set(&iproc_priv.cipher_cnt[i][j], 0); for (i = 0; i < HASH_ALG_LAST; i++) { atomic_set(&iproc_priv.hash_cnt[i], 0); atomic_set(&iproc_priv.hmac_cnt[i], 0); } for (i = 0; i < AEAD_TYPE_LAST; i++) atomic_set(&iproc_priv.aead_cnt[i], 0); atomic_set(&iproc_priv.mb_no_spc, 0); atomic_set(&iproc_priv.mb_send_fail, 0); atomic_set(&iproc_priv.bad_icv, 0); } static int spu_register_skcipher(struct iproc_alg_s *driver_alg) { struct skcipher_alg *crypto = &driver_alg->alg.skcipher; int err; crypto->base.cra_module = THIS_MODULE; crypto->base.cra_priority = cipher_pri; crypto->base.cra_alignmask = 0; crypto->base.cra_ctxsize = sizeof(struct iproc_ctx_s); crypto->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; crypto->init = skcipher_init_tfm; crypto->exit = skcipher_exit_tfm; crypto->setkey = skcipher_setkey; crypto->encrypt = skcipher_encrypt; crypto->decrypt = skcipher_decrypt; err = crypto_register_skcipher(crypto); /* Mark alg as having been registered, if successful */ if (err == 0) driver_alg->registered = true; pr_debug(" registered skcipher %s\n", crypto->base.cra_driver_name); return err; } static int spu_register_ahash(struct iproc_alg_s *driver_alg) { struct spu_hw *spu = &iproc_priv.spu; struct ahash_alg *hash = &driver_alg->alg.hash; int err; /* AES-XCBC is the only AES hash type currently supported on SPU-M */ if ((driver_alg->auth_info.alg == HASH_ALG_AES) && (driver_alg->auth_info.mode != HASH_MODE_XCBC) && (spu->spu_type == SPU_TYPE_SPUM)) return 0; /* SHA3 algorithm variants are not registered for SPU-M or SPU2. */ if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) && (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2)) return 0; hash->halg.base.cra_module = THIS_MODULE; hash->halg.base.cra_priority = hash_pri; hash->halg.base.cra_alignmask = 0; hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s); hash->halg.base.cra_init = ahash_cra_init; hash->halg.base.cra_exit = generic_cra_exit; hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; hash->halg.statesize = sizeof(struct spu_hash_export_s); if (driver_alg->auth_info.mode != HASH_MODE_HMAC) { hash->init = ahash_init; hash->update = ahash_update; hash->final = ahash_final; hash->finup = ahash_finup; hash->digest = ahash_digest; if ((driver_alg->auth_info.alg == HASH_ALG_AES) && ((driver_alg->auth_info.mode == HASH_MODE_XCBC) || (driver_alg->auth_info.mode == HASH_MODE_CMAC))) { hash->setkey = ahash_setkey; } } else { hash->setkey = ahash_hmac_setkey; hash->init = ahash_hmac_init; hash->update = ahash_hmac_update; hash->final = ahash_hmac_final; hash->finup = ahash_hmac_finup; hash->digest = ahash_hmac_digest; } hash->export = ahash_export; hash->import = ahash_import; err = crypto_register_ahash(hash); /* Mark alg as having been registered, if successful */ if (err == 0) driver_alg->registered = true; pr_debug(" registered ahash %s\n", hash->halg.base.cra_driver_name); return err; } static int spu_register_aead(struct iproc_alg_s *driver_alg) { struct aead_alg *aead = &driver_alg->alg.aead; int err; aead->base.cra_module = THIS_MODULE; aead->base.cra_priority = aead_pri; aead->base.cra_alignmask = 0; aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s); aead->base.cra_flags |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; /* setkey set in alg initialization */ aead->setauthsize = aead_setauthsize; aead->encrypt = aead_encrypt; aead->decrypt = aead_decrypt; aead->init = aead_cra_init; aead->exit = aead_cra_exit; err = crypto_register_aead(aead); /* Mark alg as having been registered, if successful */ if (err == 0) driver_alg->registered = true; pr_debug(" registered aead %s\n", aead->base.cra_driver_name); return err; } /* register crypto algorithms the device supports */ static int spu_algs_register(struct device *dev) { int i, j; int err; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { switch (driver_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: err = spu_register_skcipher(&driver_algs[i]); break; case CRYPTO_ALG_TYPE_AHASH: err = spu_register_ahash(&driver_algs[i]); break; case CRYPTO_ALG_TYPE_AEAD: err = spu_register_aead(&driver_algs[i]); break; default: dev_err(dev, "iproc-crypto: unknown alg type: %d", driver_algs[i].type); err = -EINVAL; } if (err) { dev_err(dev, "alg registration failed with error %d\n", err); goto err_algs; } } return 0; err_algs: for (j = 0; j < i; j++) { /* Skip any algorithm not registered */ if (!driver_algs[j].registered) continue; switch (driver_algs[j].type) { case CRYPTO_ALG_TYPE_SKCIPHER: crypto_unregister_skcipher(&driver_algs[j].alg.skcipher); driver_algs[j].registered = false; break; case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&driver_algs[j].alg.hash); driver_algs[j].registered = false; break; case CRYPTO_ALG_TYPE_AEAD: crypto_unregister_aead(&driver_algs[j].alg.aead); driver_algs[j].registered = false; break; } } return err; } /* ==================== Kernel Platform API ==================== */ static struct spu_type_subtype spum_ns2_types = { SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2 }; static struct spu_type_subtype spum_nsp_types = { SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP }; static struct spu_type_subtype spu2_types = { SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1 }; static struct spu_type_subtype spu2_v2_types = { SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2 }; static const struct of_device_id bcm_spu_dt_ids[] = { { .compatible = "brcm,spum-crypto", .data = &spum_ns2_types, }, { .compatible = "brcm,spum-nsp-crypto", .data = &spum_nsp_types, }, { .compatible = "brcm,spu2-crypto", .data = &spu2_types, }, { .compatible = "brcm,spu2-v2-crypto", .data = &spu2_v2_types, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids); static int spu_dt_read(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct spu_hw *spu = &iproc_priv.spu; struct resource *spu_ctrl_regs; const struct spu_type_subtype *matched_spu_type; struct device_node *dn = pdev->dev.of_node; int err, i; /* Count number of mailbox channels */ spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells"); matched_spu_type = of_device_get_match_data(dev); if (!matched_spu_type) { dev_err(dev, "Failed to match device\n"); return -ENODEV; } spu->spu_type = matched_spu_type->type; spu->spu_subtype = matched_spu_type->subtype; for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs = platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) { spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs); if (IS_ERR(spu->reg_vbase[i])) { err = PTR_ERR(spu->reg_vbase[i]); dev_err(dev, "Failed to map registers: %d\n", err); spu->reg_vbase[i] = NULL; return err; } } spu->num_spu = i; dev_dbg(dev, "Device has %d SPUs", spu->num_spu); return 0; } static int bcm_spu_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct spu_hw *spu = &iproc_priv.spu; int err; iproc_priv.pdev = pdev; platform_set_drvdata(iproc_priv.pdev, &iproc_priv); err = spu_dt_read(pdev); if (err < 0) goto failure; err = spu_mb_init(dev); if (err < 0) goto failure; if (spu->spu_type == SPU_TYPE_SPUM) iproc_priv.bcm_hdr_len = 8; else if (spu->spu_type == SPU_TYPE_SPU2) iproc_priv.bcm_hdr_len = 0; spu_functions_register(dev, spu->spu_type, spu->spu_subtype); spu_counters_init(); spu_setup_debugfs(); err = spu_algs_register(dev); if (err < 0) goto fail_reg; return 0; fail_reg: spu_free_debugfs(); failure: spu_mb_release(pdev); dev_err(dev, "%s failed with error %d.\n", __func__, err); return err; } static int bcm_spu_remove(struct platform_device *pdev) { int i; struct device *dev = &pdev->dev; char *cdn; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { /* * Not all algorithms were registered, depending on whether * hardware is SPU or SPU2. So here we make sure to skip * those algorithms that were not previously registered. */ if (!driver_algs[i].registered) continue; switch (driver_algs[i].type) { case CRYPTO_ALG_TYPE_SKCIPHER: crypto_unregister_skcipher(&driver_algs[i].alg.skcipher); dev_dbg(dev, " unregistered cipher %s\n", driver_algs[i].alg.skcipher.base.cra_driver_name); driver_algs[i].registered = false; break; case CRYPTO_ALG_TYPE_AHASH: crypto_unregister_ahash(&driver_algs[i].alg.hash); cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name; dev_dbg(dev, " unregistered hash %s\n", cdn); driver_algs[i].registered = false; break; case CRYPTO_ALG_TYPE_AEAD: crypto_unregister_aead(&driver_algs[i].alg.aead); dev_dbg(dev, " unregistered aead %s\n", driver_algs[i].alg.aead.base.cra_driver_name); driver_algs[i].registered = false; break; } } spu_free_debugfs(); spu_mb_release(pdev); return 0; } /* ===== Kernel Module API ===== */ static struct platform_driver bcm_spu_pdriver = { .driver = { .name = "brcm-spu-crypto", .of_match_table = of_match_ptr(bcm_spu_dt_ids), }, .probe = bcm_spu_probe, .remove = bcm_spu_remove, }; module_platform_driver(bcm_spu_pdriver); MODULE_AUTHOR("Rob Rice <[email protected]>"); MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/crypto/bcm/cipher.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2021 Aspeed Technology Inc. */ #include "aspeed-hace.h" #include <crypto/engine.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/of.h> #include <linux/platform_device.h> #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG #define HACE_DBG(d, fmt, ...) \ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) #else #define HACE_DBG(d, fmt, ...) \ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) #endif /* HACE interrupt service routine */ static irqreturn_t aspeed_hace_irq(int irq, void *dev) { struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)dev; struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; u32 sts; sts = ast_hace_read(hace_dev, ASPEED_HACE_STS); ast_hace_write(hace_dev, sts, ASPEED_HACE_STS); HACE_DBG(hace_dev, "irq status: 0x%x\n", sts); if (sts & HACE_HASH_ISR) { if (hash_engine->flags & CRYPTO_FLAGS_BUSY) tasklet_schedule(&hash_engine->done_task); else dev_warn(hace_dev->dev, "HASH no active requests.\n"); } if (sts & HACE_CRYPTO_ISR) { if (crypto_engine->flags & CRYPTO_FLAGS_BUSY) tasklet_schedule(&crypto_engine->done_task); else dev_warn(hace_dev->dev, "CRYPTO no active requests.\n"); } return IRQ_HANDLED; } static void aspeed_hace_crypto_done_task(unsigned long data) { struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; crypto_engine->resume(hace_dev); } static void aspeed_hace_hash_done_task(unsigned long data) { struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; hash_engine->resume(hace_dev); } static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev) { #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH aspeed_register_hace_hash_algs(hace_dev); #endif #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO aspeed_register_hace_crypto_algs(hace_dev); #endif } static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev) { #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH aspeed_unregister_hace_hash_algs(hace_dev); #endif #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO aspeed_unregister_hace_crypto_algs(hace_dev); #endif } static const struct of_device_id aspeed_hace_of_matches[] = { { .compatible = "aspeed,ast2500-hace", .data = (void *)5, }, { .compatible = "aspeed,ast2600-hace", .data = (void *)6, }, {}, }; static int aspeed_hace_probe(struct platform_device *pdev) { struct aspeed_engine_crypto *crypto_engine; const struct of_device_id *hace_dev_id; struct aspeed_engine_hash *hash_engine; struct aspeed_hace_dev *hace_dev; int rc; hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev), GFP_KERNEL); if (!hace_dev) return -ENOMEM; hace_dev_id = of_match_device(aspeed_hace_of_matches, &pdev->dev); if (!hace_dev_id) { dev_err(&pdev->dev, "Failed to match hace dev id\n"); return -EINVAL; } hace_dev->dev = &pdev->dev; hace_dev->version = (unsigned long)hace_dev_id->data; hash_engine = &hace_dev->hash_engine; crypto_engine = &hace_dev->crypto_engine; platform_set_drvdata(pdev, hace_dev); hace_dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); if (IS_ERR(hace_dev->regs)) return PTR_ERR(hace_dev->regs); /* Get irq number and register it */ hace_dev->irq = platform_get_irq(pdev, 0); if (hace_dev->irq < 0) return -ENXIO; rc = devm_request_irq(&pdev->dev, hace_dev->irq, aspeed_hace_irq, 0, dev_name(&pdev->dev), hace_dev); if (rc) { dev_err(&pdev->dev, "Failed to request interrupt\n"); return rc; } /* Get clk and enable it */ hace_dev->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(hace_dev->clk)) { dev_err(&pdev->dev, "Failed to get clk\n"); return -ENODEV; } rc = clk_prepare_enable(hace_dev->clk); if (rc) { dev_err(&pdev->dev, "Failed to enable clock 0x%x\n", rc); return rc; } /* Initialize crypto hardware engine structure for hash */ hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev, true); if (!hace_dev->crypt_engine_hash) { rc = -ENOMEM; goto clk_exit; } rc = crypto_engine_start(hace_dev->crypt_engine_hash); if (rc) goto err_engine_hash_start; tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task, (unsigned long)hace_dev); /* Initialize crypto hardware engine structure for crypto */ hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev, true); if (!hace_dev->crypt_engine_crypto) { rc = -ENOMEM; goto err_engine_hash_start; } rc = crypto_engine_start(hace_dev->crypt_engine_crypto); if (rc) goto err_engine_crypto_start; tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task, (unsigned long)hace_dev); /* Allocate DMA buffer for hash engine input used */ hash_engine->ahash_src_addr = dmam_alloc_coherent(&pdev->dev, ASPEED_HASH_SRC_DMA_BUF_LEN, &hash_engine->ahash_src_dma_addr, GFP_KERNEL); if (!hash_engine->ahash_src_addr) { dev_err(&pdev->dev, "Failed to allocate dma buffer\n"); rc = -ENOMEM; goto err_engine_crypto_start; } /* Allocate DMA buffer for crypto engine context used */ crypto_engine->cipher_ctx = dmam_alloc_coherent(&pdev->dev, PAGE_SIZE, &crypto_engine->cipher_ctx_dma, GFP_KERNEL); if (!crypto_engine->cipher_ctx) { dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n"); rc = -ENOMEM; goto err_engine_crypto_start; } /* Allocate DMA buffer for crypto engine input used */ crypto_engine->cipher_addr = dmam_alloc_coherent(&pdev->dev, ASPEED_CRYPTO_SRC_DMA_BUF_LEN, &crypto_engine->cipher_dma_addr, GFP_KERNEL); if (!crypto_engine->cipher_addr) { dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n"); rc = -ENOMEM; goto err_engine_crypto_start; } /* Allocate DMA buffer for crypto engine output used */ if (hace_dev->version == AST2600_VERSION) { crypto_engine->dst_sg_addr = dmam_alloc_coherent(&pdev->dev, ASPEED_CRYPTO_DST_DMA_BUF_LEN, &crypto_engine->dst_sg_dma_addr, GFP_KERNEL); if (!crypto_engine->dst_sg_addr) { dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n"); rc = -ENOMEM; goto err_engine_crypto_start; } } aspeed_hace_register(hace_dev); dev_info(&pdev->dev, "Aspeed Crypto Accelerator successfully registered\n"); return 0; err_engine_crypto_start: crypto_engine_exit(hace_dev->crypt_engine_crypto); err_engine_hash_start: crypto_engine_exit(hace_dev->crypt_engine_hash); clk_exit: clk_disable_unprepare(hace_dev->clk); return rc; } static int aspeed_hace_remove(struct platform_device *pdev) { struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev); struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; aspeed_hace_unregister(hace_dev); crypto_engine_exit(hace_dev->crypt_engine_hash); crypto_engine_exit(hace_dev->crypt_engine_crypto); tasklet_kill(&hash_engine->done_task); tasklet_kill(&crypto_engine->done_task); clk_disable_unprepare(hace_dev->clk); return 0; } MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches); static struct platform_driver aspeed_hace_driver = { .probe = aspeed_hace_probe, .remove = aspeed_hace_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = aspeed_hace_of_matches, }, }; module_platform_driver(aspeed_hace_driver); MODULE_AUTHOR("Neal Liu <[email protected]>"); MODULE_DESCRIPTION("Aspeed HACE driver Crypto Accelerator"); MODULE_LICENSE("GPL");
linux-master
drivers/crypto/aspeed/aspeed-hace.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2021 Aspeed Technology Inc. */ #include "aspeed-hace.h" #include <crypto/engine.h> #include <crypto/hmac.h> #include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> #include <crypto/sha1.h> #include <crypto/sha2.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/string.h> #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG #define AHASH_DBG(h, fmt, ...) \ dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) #else #define AHASH_DBG(h, fmt, ...) \ dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) #endif /* Initialization Vectors for SHA-family */ static const __be32 sha1_iv[8] = { cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1), cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3), cpu_to_be32(SHA1_H4), 0, 0, 0 }; static const __be32 sha224_iv[8] = { cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1), cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3), cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5), cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7), }; static const __be32 sha256_iv[8] = { cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1), cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3), cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5), cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7), }; static const __be64 sha384_iv[8] = { cpu_to_be64(SHA384_H0), cpu_to_be64(SHA384_H1), cpu_to_be64(SHA384_H2), cpu_to_be64(SHA384_H3), cpu_to_be64(SHA384_H4), cpu_to_be64(SHA384_H5), cpu_to_be64(SHA384_H6), cpu_to_be64(SHA384_H7) }; static const __be64 sha512_iv[8] = { cpu_to_be64(SHA512_H0), cpu_to_be64(SHA512_H1), cpu_to_be64(SHA512_H2), cpu_to_be64(SHA512_H3), cpu_to_be64(SHA512_H4), cpu_to_be64(SHA512_H5), cpu_to_be64(SHA512_H6), cpu_to_be64(SHA512_H7) }; /* The purpose of this padding is to ensure that the padded message is a * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). * The bit "1" is appended at the end of the message followed by * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or * 128 bits block (SHA384/SHA512) equals to the message length in bits * is appended. * * For SHA1/SHA224/SHA256, padlen is calculated as followed: * - if message length < 56 bytes then padlen = 56 - message length * - else padlen = 64 + 56 - message length * * For SHA384/SHA512, padlen is calculated as followed: * - if message length < 112 bytes then padlen = 112 - message length * - else padlen = 128 + 112 - message length */ static void aspeed_ahash_fill_padding(struct aspeed_hace_dev *hace_dev, struct aspeed_sham_reqctx *rctx) { unsigned int index, padlen; __be64 bits[2]; AHASH_DBG(hace_dev, "rctx flags:0x%x\n", (u32)rctx->flags); switch (rctx->flags & SHA_FLAGS_MASK) { case SHA_FLAGS_SHA1: case SHA_FLAGS_SHA224: case SHA_FLAGS_SHA256: bits[0] = cpu_to_be64(rctx->digcnt[0] << 3); index = rctx->bufcnt & 0x3f; padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); *(rctx->buffer + rctx->bufcnt) = 0x80; memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1); memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 8); rctx->bufcnt += padlen + 8; break; default: bits[1] = cpu_to_be64(rctx->digcnt[0] << 3); bits[0] = cpu_to_be64(rctx->digcnt[1] << 3 | rctx->digcnt[0] >> 61); index = rctx->bufcnt & 0x7f; padlen = (index < 112) ? (112 - index) : ((128 + 112) - index); *(rctx->buffer + rctx->bufcnt) = 0x80; memset(rctx->buffer + rctx->bufcnt + 1, 0, padlen - 1); memcpy(rctx->buffer + rctx->bufcnt + padlen, bits, 16); rctx->bufcnt += padlen + 16; break; } } /* * Prepare DMA buffer before hardware engine * processing. */ static int aspeed_ahash_dma_prepare(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; struct ahash_request *req = hash_engine->req; struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); int length, remain; length = rctx->total + rctx->bufcnt; remain = length % rctx->block_size; AHASH_DBG(hace_dev, "length:0x%x, remain:0x%x\n", length, remain); if (rctx->bufcnt) memcpy(hash_engine->ahash_src_addr, rctx->buffer, rctx->bufcnt); if (rctx->total + rctx->bufcnt < ASPEED_CRYPTO_SRC_DMA_BUF_LEN) { scatterwalk_map_and_copy(hash_engine->ahash_src_addr + rctx->bufcnt, rctx->src_sg, rctx->offset, rctx->total - remain, 0); rctx->offset += rctx->total - remain; } else { dev_warn(hace_dev->dev, "Hash data length is too large\n"); return -EINVAL; } scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset, remain, 0); rctx->bufcnt = remain; rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); return -ENOMEM; } hash_engine->src_length = length - remain; hash_engine->src_dma = hash_engine->ahash_src_dma_addr; hash_engine->digest_dma = rctx->digest_dma_addr; return 0; } /* * Prepare DMA buffer as SG list buffer before * hardware engine processing. */ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; struct ahash_request *req = hash_engine->req; struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); struct aspeed_sg_list *src_list; struct scatterlist *s; int length, remain, sg_len, i; int rc = 0; remain = (rctx->total + rctx->bufcnt) % rctx->block_size; length = rctx->total + rctx->bufcnt - remain; AHASH_DBG(hace_dev, "%s:0x%x, %s:%zu, %s:0x%x, %s:0x%x\n", "rctx total", rctx->total, "bufcnt", rctx->bufcnt, "length", length, "remain", remain); sg_len = dma_map_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents, DMA_TO_DEVICE); if (!sg_len) { dev_warn(hace_dev->dev, "dma_map_sg() src error\n"); rc = -ENOMEM; goto end; } src_list = (struct aspeed_sg_list *)hash_engine->ahash_src_addr; rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); rc = -ENOMEM; goto free_src_sg; } if (rctx->bufcnt != 0) { u32 phy_addr; u32 len; rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer, rctx->block_size * 2, DMA_TO_DEVICE); if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) { dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n"); rc = -ENOMEM; goto free_rctx_digest; } phy_addr = rctx->buffer_dma_addr; len = rctx->bufcnt; length -= len; /* Last sg list */ if (length == 0) len |= HASH_SG_LAST_LIST; src_list[0].phy_addr = cpu_to_le32(phy_addr); src_list[0].len = cpu_to_le32(len); src_list++; } if (length != 0) { for_each_sg(rctx->src_sg, s, sg_len, i) { u32 phy_addr = sg_dma_address(s); u32 len = sg_dma_len(s); if (length > len) length -= len; else { /* Last sg list */ len = length; len |= HASH_SG_LAST_LIST; length = 0; } src_list[i].phy_addr = cpu_to_le32(phy_addr); src_list[i].len = cpu_to_le32(len); } } if (length != 0) { rc = -EINVAL; goto free_rctx_buffer; } rctx->offset = rctx->total - remain; hash_engine->src_length = rctx->total + rctx->bufcnt - remain; hash_engine->src_dma = hash_engine->ahash_src_dma_addr; hash_engine->digest_dma = rctx->digest_dma_addr; return 0; free_rctx_buffer: if (rctx->bufcnt != 0) dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, rctx->block_size * 2, DMA_TO_DEVICE); free_rctx_digest: dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); free_src_sg: dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents, DMA_TO_DEVICE); end: return rc; } static int aspeed_ahash_complete(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; struct ahash_request *req = hash_engine->req; AHASH_DBG(hace_dev, "\n"); hash_engine->flags &= ~CRYPTO_FLAGS_BUSY; crypto_finalize_hash_request(hace_dev->crypt_engine_hash, req, 0); return 0; } /* * Copy digest to the corresponding request result. * This function will be called at final() stage. */ static int aspeed_ahash_transfer(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; struct ahash_request *req = hash_engine->req; struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); AHASH_DBG(hace_dev, "\n"); dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, rctx->block_size * 2, DMA_TO_DEVICE); memcpy(req->result, rctx->digest, rctx->digsize); return aspeed_ahash_complete(hace_dev); } /* * Trigger hardware engines to do the math. */ static int aspeed_hace_ahash_trigger(struct aspeed_hace_dev *hace_dev, aspeed_hace_fn_t resume) { struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; struct ahash_request *req = hash_engine->req; struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); AHASH_DBG(hace_dev, "src_dma:%pad, digest_dma:%pad, length:%zu\n", &hash_engine->src_dma, &hash_engine->digest_dma, hash_engine->src_length); rctx->cmd |= HASH_CMD_INT_ENABLE; hash_engine->resume = resume; ast_hace_write(hace_dev, hash_engine->src_dma, ASPEED_HACE_HASH_SRC); ast_hace_write(hace_dev, hash_engine->digest_dma, ASPEED_HACE_HASH_DIGEST_BUFF); ast_hace_write(hace_dev, hash_engine->digest_dma, ASPEED_HACE_HASH_KEY_BUFF); ast_hace_write(hace_dev, hash_engine->src_length, ASPEED_HACE_HASH_DATA_LEN); /* Memory barrier to ensure all data setup before engine starts */ mb(); ast_hace_write(hace_dev, rctx->cmd, ASPEED_HACE_HASH_CMD); return -EINPROGRESS; } /* * HMAC resume aims to do the second pass produces * the final HMAC code derived from the inner hash * result and the outer key. */ static int aspeed_ahash_hmac_resume(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; struct ahash_request *req = hash_engine->req; struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct aspeed_sha_hmac_ctx *bctx = tctx->base; int rc = 0; AHASH_DBG(hace_dev, "\n"); dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, rctx->block_size * 2, DMA_TO_DEVICE); /* o key pad + hash sum 1 */ memcpy(rctx->buffer, bctx->opad, rctx->block_size); memcpy(rctx->buffer + rctx->block_size, rctx->digest, rctx->digsize); rctx->bufcnt = rctx->block_size + rctx->digsize; rctx->digcnt[0] = rctx->block_size + rctx->digsize; aspeed_ahash_fill_padding(hace_dev, rctx); memcpy(rctx->digest, rctx->sha_iv, rctx->ivsize); rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); rc = -ENOMEM; goto end; } rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer, rctx->block_size * 2, DMA_TO_DEVICE); if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) { dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n"); rc = -ENOMEM; goto free_rctx_digest; } hash_engine->src_dma = rctx->buffer_dma_addr; hash_engine->src_length = rctx->bufcnt; hash_engine->digest_dma = rctx->digest_dma_addr; return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer); free_rctx_digest: dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); end: return rc; } static int aspeed_ahash_req_final(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; struct ahash_request *req = hash_engine->req; struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); int rc = 0; AHASH_DBG(hace_dev, "\n"); aspeed_ahash_fill_padding(hace_dev, rctx); rctx->digest_dma_addr = dma_map_single(hace_dev->dev, rctx->digest, SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(hace_dev->dev, rctx->digest_dma_addr)) { dev_warn(hace_dev->dev, "dma_map() rctx digest error\n"); rc = -ENOMEM; goto end; } rctx->buffer_dma_addr = dma_map_single(hace_dev->dev, rctx->buffer, rctx->block_size * 2, DMA_TO_DEVICE); if (dma_mapping_error(hace_dev->dev, rctx->buffer_dma_addr)) { dev_warn(hace_dev->dev, "dma_map() rctx buffer error\n"); rc = -ENOMEM; goto free_rctx_digest; } hash_engine->src_dma = rctx->buffer_dma_addr; hash_engine->src_length = rctx->bufcnt; hash_engine->digest_dma = rctx->digest_dma_addr; if (rctx->flags & SHA_FLAGS_HMAC) return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_hmac_resume); return aspeed_hace_ahash_trigger(hace_dev, aspeed_ahash_transfer); free_rctx_digest: dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); end: return rc; } static int aspeed_ahash_update_resume_sg(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; struct ahash_request *req = hash_engine->req; struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); AHASH_DBG(hace_dev, "\n"); dma_unmap_sg(hace_dev->dev, rctx->src_sg, rctx->src_nents, DMA_TO_DEVICE); if (rctx->bufcnt != 0) dma_unmap_single(hace_dev->dev, rctx->buffer_dma_addr, rctx->block_size * 2, DMA_TO_DEVICE); dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); scatterwalk_map_and_copy(rctx->buffer, rctx->src_sg, rctx->offset, rctx->total - rctx->offset, 0); rctx->bufcnt = rctx->total - rctx->offset; rctx->cmd &= ~HASH_CMD_HASH_SRC_SG_CTRL; if (rctx->flags & SHA_FLAGS_FINUP) return aspeed_ahash_req_final(hace_dev); return aspeed_ahash_complete(hace_dev); } static int aspeed_ahash_update_resume(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; struct ahash_request *req = hash_engine->req; struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); AHASH_DBG(hace_dev, "\n"); dma_unmap_single(hace_dev->dev, rctx->digest_dma_addr, SHA512_DIGEST_SIZE, DMA_BIDIRECTIONAL); if (rctx->flags & SHA_FLAGS_FINUP) return aspeed_ahash_req_final(hace_dev); return aspeed_ahash_complete(hace_dev); } static int aspeed_ahash_req_update(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; struct ahash_request *req = hash_engine->req; struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); aspeed_hace_fn_t resume; int ret; AHASH_DBG(hace_dev, "\n"); if (hace_dev->version == AST2600_VERSION) { rctx->cmd |= HASH_CMD_HASH_SRC_SG_CTRL; resume = aspeed_ahash_update_resume_sg; } else { resume = aspeed_ahash_update_resume; } ret = hash_engine->dma_prepare(hace_dev); if (ret) return ret; return aspeed_hace_ahash_trigger(hace_dev, resume); } static int aspeed_hace_hash_handle_queue(struct aspeed_hace_dev *hace_dev, struct ahash_request *req) { return crypto_transfer_hash_request_to_engine( hace_dev->crypt_engine_hash, req); } static int aspeed_ahash_do_request(struct crypto_engine *engine, void *areq) { struct ahash_request *req = ahash_request_cast(areq); struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct aspeed_hace_dev *hace_dev = tctx->hace_dev; struct aspeed_engine_hash *hash_engine; int ret = 0; hash_engine = &hace_dev->hash_engine; hash_engine->flags |= CRYPTO_FLAGS_BUSY; if (rctx->op == SHA_OP_UPDATE) ret = aspeed_ahash_req_update(hace_dev); else if (rctx->op == SHA_OP_FINAL) ret = aspeed_ahash_req_final(hace_dev); if (ret != -EINPROGRESS) return ret; return 0; } static void aspeed_ahash_prepare_request(struct crypto_engine *engine, void *areq) { struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct aspeed_hace_dev *hace_dev = tctx->hace_dev; struct aspeed_engine_hash *hash_engine; hash_engine = &hace_dev->hash_engine; hash_engine->req = req; if (hace_dev->version == AST2600_VERSION) hash_engine->dma_prepare = aspeed_ahash_dma_prepare_sg; else hash_engine->dma_prepare = aspeed_ahash_dma_prepare; } static int aspeed_ahash_do_one(struct crypto_engine *engine, void *areq) { aspeed_ahash_prepare_request(engine, areq); return aspeed_ahash_do_request(engine, areq); } static int aspeed_sham_update(struct ahash_request *req) { struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct aspeed_hace_dev *hace_dev = tctx->hace_dev; AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes); rctx->total = req->nbytes; rctx->src_sg = req->src; rctx->offset = 0; rctx->src_nents = sg_nents(req->src); rctx->op = SHA_OP_UPDATE; rctx->digcnt[0] += rctx->total; if (rctx->digcnt[0] < rctx->total) rctx->digcnt[1]++; if (rctx->bufcnt + rctx->total < rctx->block_size) { scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, rctx->src_sg, rctx->offset, rctx->total, 0); rctx->bufcnt += rctx->total; return 0; } return aspeed_hace_hash_handle_queue(hace_dev, req); } static int aspeed_sham_shash_digest(struct crypto_shash *tfm, u32 flags, const u8 *data, unsigned int len, u8 *out) { SHASH_DESC_ON_STACK(shash, tfm); shash->tfm = tfm; return crypto_shash_digest(shash, data, len, out); } static int aspeed_sham_final(struct ahash_request *req) { struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct aspeed_hace_dev *hace_dev = tctx->hace_dev; AHASH_DBG(hace_dev, "req->nbytes:%d, rctx->total:%d\n", req->nbytes, rctx->total); rctx->op = SHA_OP_FINAL; return aspeed_hace_hash_handle_queue(hace_dev, req); } static int aspeed_sham_finup(struct ahash_request *req) { struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct aspeed_hace_dev *hace_dev = tctx->hace_dev; int rc1, rc2; AHASH_DBG(hace_dev, "req->nbytes: %d\n", req->nbytes); rctx->flags |= SHA_FLAGS_FINUP; rc1 = aspeed_sham_update(req); if (rc1 == -EINPROGRESS || rc1 == -EBUSY) return rc1; /* * final() has to be always called to cleanup resources * even if update() failed, except EINPROGRESS */ rc2 = aspeed_sham_final(req); return rc1 ? : rc2; } static int aspeed_sham_init(struct ahash_request *req) { struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct aspeed_hace_dev *hace_dev = tctx->hace_dev; struct aspeed_sha_hmac_ctx *bctx = tctx->base; AHASH_DBG(hace_dev, "%s: digest size:%d\n", crypto_tfm_alg_name(&tfm->base), crypto_ahash_digestsize(tfm)); rctx->cmd = HASH_CMD_ACC_MODE; rctx->flags = 0; switch (crypto_ahash_digestsize(tfm)) { case SHA1_DIGEST_SIZE: rctx->cmd |= HASH_CMD_SHA1 | HASH_CMD_SHA_SWAP; rctx->flags |= SHA_FLAGS_SHA1; rctx->digsize = SHA1_DIGEST_SIZE; rctx->block_size = SHA1_BLOCK_SIZE; rctx->sha_iv = sha1_iv; rctx->ivsize = 32; memcpy(rctx->digest, sha1_iv, rctx->ivsize); break; case SHA224_DIGEST_SIZE: rctx->cmd |= HASH_CMD_SHA224 | HASH_CMD_SHA_SWAP; rctx->flags |= SHA_FLAGS_SHA224; rctx->digsize = SHA224_DIGEST_SIZE; rctx->block_size = SHA224_BLOCK_SIZE; rctx->sha_iv = sha224_iv; rctx->ivsize = 32; memcpy(rctx->digest, sha224_iv, rctx->ivsize); break; case SHA256_DIGEST_SIZE: rctx->cmd |= HASH_CMD_SHA256 | HASH_CMD_SHA_SWAP; rctx->flags |= SHA_FLAGS_SHA256; rctx->digsize = SHA256_DIGEST_SIZE; rctx->block_size = SHA256_BLOCK_SIZE; rctx->sha_iv = sha256_iv; rctx->ivsize = 32; memcpy(rctx->digest, sha256_iv, rctx->ivsize); break; case SHA384_DIGEST_SIZE: rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA384 | HASH_CMD_SHA_SWAP; rctx->flags |= SHA_FLAGS_SHA384; rctx->digsize = SHA384_DIGEST_SIZE; rctx->block_size = SHA384_BLOCK_SIZE; rctx->sha_iv = (const __be32 *)sha384_iv; rctx->ivsize = 64; memcpy(rctx->digest, sha384_iv, rctx->ivsize); break; case SHA512_DIGEST_SIZE: rctx->cmd |= HASH_CMD_SHA512_SER | HASH_CMD_SHA512 | HASH_CMD_SHA_SWAP; rctx->flags |= SHA_FLAGS_SHA512; rctx->digsize = SHA512_DIGEST_SIZE; rctx->block_size = SHA512_BLOCK_SIZE; rctx->sha_iv = (const __be32 *)sha512_iv; rctx->ivsize = 64; memcpy(rctx->digest, sha512_iv, rctx->ivsize); break; default: dev_warn(tctx->hace_dev->dev, "digest size %d not support\n", crypto_ahash_digestsize(tfm)); return -EINVAL; } rctx->bufcnt = 0; rctx->total = 0; rctx->digcnt[0] = 0; rctx->digcnt[1] = 0; /* HMAC init */ if (tctx->flags & SHA_FLAGS_HMAC) { rctx->digcnt[0] = rctx->block_size; rctx->bufcnt = rctx->block_size; memcpy(rctx->buffer, bctx->ipad, rctx->block_size); rctx->flags |= SHA_FLAGS_HMAC; } return 0; } static int aspeed_sham_digest(struct ahash_request *req) { return aspeed_sham_init(req) ? : aspeed_sham_finup(req); } static int aspeed_sham_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { struct aspeed_sham_ctx *tctx = crypto_ahash_ctx(tfm); struct aspeed_hace_dev *hace_dev = tctx->hace_dev; struct aspeed_sha_hmac_ctx *bctx = tctx->base; int ds = crypto_shash_digestsize(bctx->shash); int bs = crypto_shash_blocksize(bctx->shash); int err = 0; int i; AHASH_DBG(hace_dev, "%s: keylen:%d\n", crypto_tfm_alg_name(&tfm->base), keylen); if (keylen > bs) { err = aspeed_sham_shash_digest(bctx->shash, crypto_shash_get_flags(bctx->shash), key, keylen, bctx->ipad); if (err) return err; keylen = ds; } else { memcpy(bctx->ipad, key, keylen); } memset(bctx->ipad + keylen, 0, bs - keylen); memcpy(bctx->opad, bctx->ipad, bs); for (i = 0; i < bs; i++) { bctx->ipad[i] ^= HMAC_IPAD_VALUE; bctx->opad[i] ^= HMAC_OPAD_VALUE; } return err; } static int aspeed_sham_cra_init(struct crypto_tfm *tfm) { struct ahash_alg *alg = __crypto_ahash_alg(tfm->__crt_alg); struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm); struct aspeed_hace_alg *ast_alg; ast_alg = container_of(alg, struct aspeed_hace_alg, alg.ahash.base); tctx->hace_dev = ast_alg->hace_dev; tctx->flags = 0; crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct aspeed_sham_reqctx)); if (ast_alg->alg_base) { /* hmac related */ struct aspeed_sha_hmac_ctx *bctx = tctx->base; tctx->flags |= SHA_FLAGS_HMAC; bctx->shash = crypto_alloc_shash(ast_alg->alg_base, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(bctx->shash)) { dev_warn(ast_alg->hace_dev->dev, "base driver '%s' could not be loaded.\n", ast_alg->alg_base); return PTR_ERR(bctx->shash); } } return 0; } static void aspeed_sham_cra_exit(struct crypto_tfm *tfm) { struct aspeed_sham_ctx *tctx = crypto_tfm_ctx(tfm); struct aspeed_hace_dev *hace_dev = tctx->hace_dev; AHASH_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(tfm)); if (tctx->flags & SHA_FLAGS_HMAC) { struct aspeed_sha_hmac_ctx *bctx = tctx->base; crypto_free_shash(bctx->shash); } } static int aspeed_sham_export(struct ahash_request *req, void *out) { struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); memcpy(out, rctx, sizeof(*rctx)); return 0; } static int aspeed_sham_import(struct ahash_request *req, const void *in) { struct aspeed_sham_reqctx *rctx = ahash_request_ctx(req); memcpy(rctx, in, sizeof(*rctx)); return 0; } static struct aspeed_hace_alg aspeed_ahash_algs[] = { { .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, .finup = aspeed_sham_finup, .digest = aspeed_sham_digest, .export = aspeed_sham_export, .import = aspeed_sham_import, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct aspeed_sham_reqctx), .base = { .cra_name = "sha1", .cra_driver_name = "aspeed-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_sham_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = aspeed_sham_cra_init, .cra_exit = aspeed_sham_cra_exit, } } }, .alg.ahash.op = { .do_one_request = aspeed_ahash_do_one, }, }, { .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, .finup = aspeed_sham_finup, .digest = aspeed_sham_digest, .export = aspeed_sham_export, .import = aspeed_sham_import, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct aspeed_sham_reqctx), .base = { .cra_name = "sha256", .cra_driver_name = "aspeed-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_sham_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = aspeed_sham_cra_init, .cra_exit = aspeed_sham_cra_exit, } } }, .alg.ahash.op = { .do_one_request = aspeed_ahash_do_one, }, }, { .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, .finup = aspeed_sham_finup, .digest = aspeed_sham_digest, .export = aspeed_sham_export, .import = aspeed_sham_import, .halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct aspeed_sham_reqctx), .base = { .cra_name = "sha224", .cra_driver_name = "aspeed-sha224", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_sham_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = aspeed_sham_cra_init, .cra_exit = aspeed_sham_cra_exit, } } }, .alg.ahash.op = { .do_one_request = aspeed_ahash_do_one, }, }, { .alg_base = "sha1", .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, .finup = aspeed_sham_finup, .digest = aspeed_sham_digest, .setkey = aspeed_sham_setkey, .export = aspeed_sham_export, .import = aspeed_sham_import, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct aspeed_sham_reqctx), .base = { .cra_name = "hmac(sha1)", .cra_driver_name = "aspeed-hmac-sha1", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + sizeof(struct aspeed_sha_hmac_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = aspeed_sham_cra_init, .cra_exit = aspeed_sham_cra_exit, } } }, .alg.ahash.op = { .do_one_request = aspeed_ahash_do_one, }, }, { .alg_base = "sha224", .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, .finup = aspeed_sham_finup, .digest = aspeed_sham_digest, .setkey = aspeed_sham_setkey, .export = aspeed_sham_export, .import = aspeed_sham_import, .halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct aspeed_sham_reqctx), .base = { .cra_name = "hmac(sha224)", .cra_driver_name = "aspeed-hmac-sha224", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA224_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + sizeof(struct aspeed_sha_hmac_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = aspeed_sham_cra_init, .cra_exit = aspeed_sham_cra_exit, } } }, .alg.ahash.op = { .do_one_request = aspeed_ahash_do_one, }, }, { .alg_base = "sha256", .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, .finup = aspeed_sham_finup, .digest = aspeed_sham_digest, .setkey = aspeed_sham_setkey, .export = aspeed_sham_export, .import = aspeed_sham_import, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct aspeed_sham_reqctx), .base = { .cra_name = "hmac(sha256)", .cra_driver_name = "aspeed-hmac-sha256", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA256_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + sizeof(struct aspeed_sha_hmac_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = aspeed_sham_cra_init, .cra_exit = aspeed_sham_cra_exit, } } }, .alg.ahash.op = { .do_one_request = aspeed_ahash_do_one, }, }, }; static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = { { .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, .finup = aspeed_sham_finup, .digest = aspeed_sham_digest, .export = aspeed_sham_export, .import = aspeed_sham_import, .halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct aspeed_sham_reqctx), .base = { .cra_name = "sha384", .cra_driver_name = "aspeed-sha384", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_sham_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = aspeed_sham_cra_init, .cra_exit = aspeed_sham_cra_exit, } } }, .alg.ahash.op = { .do_one_request = aspeed_ahash_do_one, }, }, { .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, .finup = aspeed_sham_finup, .digest = aspeed_sham_digest, .export = aspeed_sham_export, .import = aspeed_sham_import, .halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct aspeed_sham_reqctx), .base = { .cra_name = "sha512", .cra_driver_name = "aspeed-sha512", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_sham_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = aspeed_sham_cra_init, .cra_exit = aspeed_sham_cra_exit, } } }, .alg.ahash.op = { .do_one_request = aspeed_ahash_do_one, }, }, { .alg_base = "sha384", .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, .finup = aspeed_sham_finup, .digest = aspeed_sham_digest, .setkey = aspeed_sham_setkey, .export = aspeed_sham_export, .import = aspeed_sham_import, .halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct aspeed_sham_reqctx), .base = { .cra_name = "hmac(sha384)", .cra_driver_name = "aspeed-hmac-sha384", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA384_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + sizeof(struct aspeed_sha_hmac_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = aspeed_sham_cra_init, .cra_exit = aspeed_sham_cra_exit, } } }, .alg.ahash.op = { .do_one_request = aspeed_ahash_do_one, }, }, { .alg_base = "sha512", .alg.ahash.base = { .init = aspeed_sham_init, .update = aspeed_sham_update, .final = aspeed_sham_final, .finup = aspeed_sham_finup, .digest = aspeed_sham_digest, .setkey = aspeed_sham_setkey, .export = aspeed_sham_export, .import = aspeed_sham_import, .halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct aspeed_sham_reqctx), .base = { .cra_name = "hmac(sha512)", .cra_driver_name = "aspeed-hmac-sha512", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY, .cra_blocksize = SHA512_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_sham_ctx) + sizeof(struct aspeed_sha_hmac_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, .cra_init = aspeed_sham_cra_init, .cra_exit = aspeed_sham_cra_exit, } } }, .alg.ahash.op = { .do_one_request = aspeed_ahash_do_one, }, }, }; void aspeed_unregister_hace_hash_algs(struct aspeed_hace_dev *hace_dev) { int i; for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) crypto_engine_unregister_ahash(&aspeed_ahash_algs[i].alg.ahash); if (hace_dev->version != AST2600_VERSION) return; for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) crypto_engine_unregister_ahash(&aspeed_ahash_algs_g6[i].alg.ahash); } void aspeed_register_hace_hash_algs(struct aspeed_hace_dev *hace_dev) { int rc, i; AHASH_DBG(hace_dev, "\n"); for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs); i++) { aspeed_ahash_algs[i].hace_dev = hace_dev; rc = crypto_engine_register_ahash(&aspeed_ahash_algs[i].alg.ahash); if (rc) { AHASH_DBG(hace_dev, "Failed to register %s\n", aspeed_ahash_algs[i].alg.ahash.base.halg.base.cra_name); } } if (hace_dev->version != AST2600_VERSION) return; for (i = 0; i < ARRAY_SIZE(aspeed_ahash_algs_g6); i++) { aspeed_ahash_algs_g6[i].hace_dev = hace_dev; rc = crypto_engine_register_ahash(&aspeed_ahash_algs_g6[i].alg.ahash); if (rc) { AHASH_DBG(hace_dev, "Failed to register %s\n", aspeed_ahash_algs_g6[i].alg.ahash.base.halg.base.cra_name); } } }
linux-master
drivers/crypto/aspeed/aspeed-hace-hash.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright 2021 Aspeed Technology Inc. */ #include <crypto/engine.h> #include <crypto/internal/akcipher.h> #include <crypto/internal/rsa.h> #include <crypto/scatterwalk.h> #include <linux/clk.h> #include <linux/count_zeros.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/string.h> #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG #define ACRY_DBG(d, fmt, ...) \ dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) #else #define ACRY_DBG(d, fmt, ...) \ dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) #endif /***************************** * * * ACRY register definitions * * * * ***************************/ #define ASPEED_ACRY_TRIGGER 0x000 /* ACRY Engine Control: trigger */ #define ASPEED_ACRY_DMA_CMD 0x048 /* ACRY Engine Control: Command */ #define ASPEED_ACRY_DMA_SRC_BASE 0x04C /* ACRY DRAM base address for DMA */ #define ASPEED_ACRY_DMA_LEN 0x050 /* ACRY Data Length of DMA */ #define ASPEED_ACRY_RSA_KEY_LEN 0x058 /* ACRY RSA Exp/Mod Key Length (Bits) */ #define ASPEED_ACRY_INT_MASK 0x3F8 /* ACRY Interrupt Mask */ #define ASPEED_ACRY_STATUS 0x3FC /* ACRY Interrupt Status */ /* rsa trigger */ #define ACRY_CMD_RSA_TRIGGER BIT(0) #define ACRY_CMD_DMA_RSA_TRIGGER BIT(1) /* rsa dma cmd */ #define ACRY_CMD_DMA_SRAM_MODE_RSA (0x3 << 4) #define ACRY_CMD_DMEM_AHB BIT(8) #define ACRY_CMD_DMA_SRAM_AHB_ENGINE 0 /* rsa key len */ #define RSA_E_BITS_LEN(x) ((x) << 16) #define RSA_M_BITS_LEN(x) (x) /* acry isr */ #define ACRY_RSA_ISR BIT(1) #define ASPEED_ACRY_BUFF_SIZE 0x1800 /* DMA buffer size */ #define ASPEED_ACRY_SRAM_MAX_LEN 2048 /* ACRY SRAM maximum length (Bytes) */ #define ASPEED_ACRY_RSA_MAX_KEY_LEN 512 /* ACRY RSA maximum key length (Bytes) */ #define CRYPTO_FLAGS_BUSY BIT(1) #define BYTES_PER_DWORD 4 /***************************** * * * AHBC register definitions * * * * ***************************/ #define AHBC_REGION_PROT 0x240 #define REGION_ACRYM BIT(23) #define ast_acry_write(acry, val, offset) \ writel((val), (acry)->regs + (offset)) #define ast_acry_read(acry, offset) \ readl((acry)->regs + (offset)) struct aspeed_acry_dev; typedef int (*aspeed_acry_fn_t)(struct aspeed_acry_dev *); struct aspeed_acry_dev { void __iomem *regs; struct device *dev; int irq; struct clk *clk; struct regmap *ahbc; struct akcipher_request *req; struct tasklet_struct done_task; aspeed_acry_fn_t resume; unsigned long flags; /* ACRY output SRAM buffer */ void __iomem *acry_sram; /* ACRY input DMA buffer */ void *buf_addr; dma_addr_t buf_dma_addr; struct crypto_engine *crypt_engine_rsa; /* ACRY SRAM memory mapped */ int exp_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN]; int mod_dw_mapping[ASPEED_ACRY_RSA_MAX_KEY_LEN]; int data_byte_mapping[ASPEED_ACRY_SRAM_MAX_LEN]; }; struct aspeed_acry_ctx { struct aspeed_acry_dev *acry_dev; struct rsa_key key; int enc; u8 *n; u8 *e; u8 *d; size_t n_sz; size_t e_sz; size_t d_sz; aspeed_acry_fn_t trigger; struct crypto_akcipher *fallback_tfm; }; struct aspeed_acry_alg { struct aspeed_acry_dev *acry_dev; struct akcipher_engine_alg akcipher; }; enum aspeed_rsa_key_mode { ASPEED_RSA_EXP_MODE = 0, ASPEED_RSA_MOD_MODE, ASPEED_RSA_DATA_MODE, }; static inline struct akcipher_request * akcipher_request_cast(struct crypto_async_request *req) { return container_of(req, struct akcipher_request, base); } static int aspeed_acry_do_fallback(struct akcipher_request *req) { struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); int err; akcipher_request_set_tfm(req, ctx->fallback_tfm); if (ctx->enc) err = crypto_akcipher_encrypt(req); else err = crypto_akcipher_decrypt(req); akcipher_request_set_tfm(req, cipher); return err; } static bool aspeed_acry_need_fallback(struct akcipher_request *req) { struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); return ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN; } static int aspeed_acry_handle_queue(struct aspeed_acry_dev *acry_dev, struct akcipher_request *req) { if (aspeed_acry_need_fallback(req)) { ACRY_DBG(acry_dev, "SW fallback\n"); return aspeed_acry_do_fallback(req); } return crypto_transfer_akcipher_request_to_engine(acry_dev->crypt_engine_rsa, req); } static int aspeed_acry_do_request(struct crypto_engine *engine, void *areq) { struct akcipher_request *req = akcipher_request_cast(areq); struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); struct aspeed_acry_dev *acry_dev = ctx->acry_dev; acry_dev->req = req; acry_dev->flags |= CRYPTO_FLAGS_BUSY; return ctx->trigger(acry_dev); } static int aspeed_acry_complete(struct aspeed_acry_dev *acry_dev, int err) { struct akcipher_request *req = acry_dev->req; acry_dev->flags &= ~CRYPTO_FLAGS_BUSY; crypto_finalize_akcipher_request(acry_dev->crypt_engine_rsa, req, err); return err; } /* * Copy Data to DMA buffer for engine used. */ static void aspeed_acry_rsa_sg_copy_to_buffer(struct aspeed_acry_dev *acry_dev, u8 *buf, struct scatterlist *src, size_t nbytes) { static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN]; int i = 0, j; int data_idx; ACRY_DBG(acry_dev, "\n"); scatterwalk_map_and_copy(dram_buffer, src, 0, nbytes, 0); for (j = nbytes - 1; j >= 0; j--) { data_idx = acry_dev->data_byte_mapping[i]; buf[data_idx] = dram_buffer[j]; i++; } for (; i < ASPEED_ACRY_SRAM_MAX_LEN; i++) { data_idx = acry_dev->data_byte_mapping[i]; buf[data_idx] = 0; } } /* * Copy Exp/Mod to DMA buffer for engine used. * * Params: * - mode 0 : Exponential * - mode 1 : Modulus * * Example: * - DRAM memory layout: * D[0], D[4], D[8], D[12] * - ACRY SRAM memory layout should reverse the order of source data: * D[12], D[8], D[4], D[0] */ static int aspeed_acry_rsa_ctx_copy(struct aspeed_acry_dev *acry_dev, void *buf, const void *xbuf, size_t nbytes, enum aspeed_rsa_key_mode mode) { const u8 *src = xbuf; __le32 *dw_buf = buf; int nbits, ndw; int i, j, idx; u32 data = 0; ACRY_DBG(acry_dev, "nbytes:%zu, mode:%d\n", nbytes, mode); if (nbytes > ASPEED_ACRY_RSA_MAX_KEY_LEN) return -ENOMEM; /* Remove the leading zeros */ while (nbytes > 0 && src[0] == 0) { src++; nbytes--; } nbits = nbytes * 8; if (nbytes > 0) nbits -= count_leading_zeros(src[0]) - (BITS_PER_LONG - 8); /* double-world alignment */ ndw = DIV_ROUND_UP(nbytes, BYTES_PER_DWORD); if (nbytes > 0) { i = BYTES_PER_DWORD - nbytes % BYTES_PER_DWORD; i %= BYTES_PER_DWORD; for (j = ndw; j > 0; j--) { for (; i < BYTES_PER_DWORD; i++) { data <<= 8; data |= *src++; } i = 0; if (mode == ASPEED_RSA_EXP_MODE) idx = acry_dev->exp_dw_mapping[j - 1]; else /* mode == ASPEED_RSA_MOD_MODE */ idx = acry_dev->mod_dw_mapping[j - 1]; dw_buf[idx] = cpu_to_le32(data); } } return nbits; } static int aspeed_acry_rsa_transfer(struct aspeed_acry_dev *acry_dev) { struct akcipher_request *req = acry_dev->req; u8 __iomem *sram_buffer = acry_dev->acry_sram; struct scatterlist *out_sg = req->dst; static u8 dram_buffer[ASPEED_ACRY_SRAM_MAX_LEN]; int leading_zero = 1; int result_nbytes; int i = 0, j; int data_idx; /* Set Data Memory to AHB(CPU) Access Mode */ ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD); /* Disable ACRY SRAM protection */ regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT, REGION_ACRYM, 0); result_nbytes = ASPEED_ACRY_SRAM_MAX_LEN; for (j = ASPEED_ACRY_SRAM_MAX_LEN - 1; j >= 0; j--) { data_idx = acry_dev->data_byte_mapping[j]; if (readb(sram_buffer + data_idx) == 0 && leading_zero) { result_nbytes--; } else { leading_zero = 0; dram_buffer[i] = readb(sram_buffer + data_idx); i++; } } ACRY_DBG(acry_dev, "result_nbytes:%d, req->dst_len:%d\n", result_nbytes, req->dst_len); if (result_nbytes <= req->dst_len) { scatterwalk_map_and_copy(dram_buffer, out_sg, 0, result_nbytes, 1); req->dst_len = result_nbytes; } else { dev_err(acry_dev->dev, "RSA engine error!\n"); } memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE); return aspeed_acry_complete(acry_dev, 0); } static int aspeed_acry_rsa_trigger(struct aspeed_acry_dev *acry_dev) { struct akcipher_request *req = acry_dev->req; struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); int ne, nm; if (!ctx->n || !ctx->n_sz) { dev_err(acry_dev->dev, "%s: key n is not set\n", __func__); return -EINVAL; } memzero_explicit(acry_dev->buf_addr, ASPEED_ACRY_BUFF_SIZE); /* Copy source data to DMA buffer */ aspeed_acry_rsa_sg_copy_to_buffer(acry_dev, acry_dev->buf_addr, req->src, req->src_len); nm = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, ctx->n, ctx->n_sz, ASPEED_RSA_MOD_MODE); if (ctx->enc) { if (!ctx->e || !ctx->e_sz) { dev_err(acry_dev->dev, "%s: key e is not set\n", __func__); return -EINVAL; } /* Copy key e to DMA buffer */ ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, ctx->e, ctx->e_sz, ASPEED_RSA_EXP_MODE); } else { if (!ctx->d || !ctx->d_sz) { dev_err(acry_dev->dev, "%s: key d is not set\n", __func__); return -EINVAL; } /* Copy key d to DMA buffer */ ne = aspeed_acry_rsa_ctx_copy(acry_dev, acry_dev->buf_addr, ctx->key.d, ctx->key.d_sz, ASPEED_RSA_EXP_MODE); } ast_acry_write(acry_dev, acry_dev->buf_dma_addr, ASPEED_ACRY_DMA_SRC_BASE); ast_acry_write(acry_dev, (ne << 16) + nm, ASPEED_ACRY_RSA_KEY_LEN); ast_acry_write(acry_dev, ASPEED_ACRY_BUFF_SIZE, ASPEED_ACRY_DMA_LEN); acry_dev->resume = aspeed_acry_rsa_transfer; /* Enable ACRY SRAM protection */ regmap_update_bits(acry_dev->ahbc, AHBC_REGION_PROT, REGION_ACRYM, REGION_ACRYM); ast_acry_write(acry_dev, ACRY_RSA_ISR, ASPEED_ACRY_INT_MASK); ast_acry_write(acry_dev, ACRY_CMD_DMA_SRAM_MODE_RSA | ACRY_CMD_DMA_SRAM_AHB_ENGINE, ASPEED_ACRY_DMA_CMD); /* Trigger RSA engines */ ast_acry_write(acry_dev, ACRY_CMD_RSA_TRIGGER | ACRY_CMD_DMA_RSA_TRIGGER, ASPEED_ACRY_TRIGGER); return 0; } static int aspeed_acry_rsa_enc(struct akcipher_request *req) { struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); struct aspeed_acry_dev *acry_dev = ctx->acry_dev; ctx->trigger = aspeed_acry_rsa_trigger; ctx->enc = 1; return aspeed_acry_handle_queue(acry_dev, req); } static int aspeed_acry_rsa_dec(struct akcipher_request *req) { struct crypto_akcipher *cipher = crypto_akcipher_reqtfm(req); struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(cipher); struct aspeed_acry_dev *acry_dev = ctx->acry_dev; ctx->trigger = aspeed_acry_rsa_trigger; ctx->enc = 0; return aspeed_acry_handle_queue(acry_dev, req); } static u8 *aspeed_rsa_key_copy(u8 *src, size_t len) { return kmemdup(src, len, GFP_KERNEL); } static int aspeed_rsa_set_n(struct aspeed_acry_ctx *ctx, u8 *value, size_t len) { ctx->n_sz = len; ctx->n = aspeed_rsa_key_copy(value, len); if (!ctx->n) return -ENOMEM; return 0; } static int aspeed_rsa_set_e(struct aspeed_acry_ctx *ctx, u8 *value, size_t len) { ctx->e_sz = len; ctx->e = aspeed_rsa_key_copy(value, len); if (!ctx->e) return -ENOMEM; return 0; } static int aspeed_rsa_set_d(struct aspeed_acry_ctx *ctx, u8 *value, size_t len) { ctx->d_sz = len; ctx->d = aspeed_rsa_key_copy(value, len); if (!ctx->d) return -ENOMEM; return 0; } static void aspeed_rsa_key_free(struct aspeed_acry_ctx *ctx) { kfree_sensitive(ctx->n); kfree_sensitive(ctx->e); kfree_sensitive(ctx->d); ctx->n_sz = 0; ctx->e_sz = 0; ctx->d_sz = 0; } static int aspeed_acry_rsa_setkey(struct crypto_akcipher *tfm, const void *key, unsigned int keylen, int priv) { struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); struct aspeed_acry_dev *acry_dev = ctx->acry_dev; int ret; if (priv) ret = rsa_parse_priv_key(&ctx->key, key, keylen); else ret = rsa_parse_pub_key(&ctx->key, key, keylen); if (ret) { dev_err(acry_dev->dev, "rsa parse key failed, ret:0x%x\n", ret); return ret; } /* Aspeed engine supports up to 4096 bits, * Use software fallback instead. */ if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN) return 0; ret = aspeed_rsa_set_n(ctx, (u8 *)ctx->key.n, ctx->key.n_sz); if (ret) goto err; ret = aspeed_rsa_set_e(ctx, (u8 *)ctx->key.e, ctx->key.e_sz); if (ret) goto err; if (priv) { ret = aspeed_rsa_set_d(ctx, (u8 *)ctx->key.d, ctx->key.d_sz); if (ret) goto err; } return 0; err: dev_err(acry_dev->dev, "rsa set key failed\n"); aspeed_rsa_key_free(ctx); return ret; } static int aspeed_acry_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); int ret; ret = crypto_akcipher_set_pub_key(ctx->fallback_tfm, key, keylen); if (ret) return ret; return aspeed_acry_rsa_setkey(tfm, key, keylen, 0); } static int aspeed_acry_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); int ret; ret = crypto_akcipher_set_priv_key(ctx->fallback_tfm, key, keylen); if (ret) return ret; return aspeed_acry_rsa_setkey(tfm, key, keylen, 1); } static unsigned int aspeed_acry_rsa_max_size(struct crypto_akcipher *tfm) { struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); if (ctx->key.n_sz > ASPEED_ACRY_RSA_MAX_KEY_LEN) return crypto_akcipher_maxsize(ctx->fallback_tfm); return ctx->n_sz; } static int aspeed_acry_rsa_init_tfm(struct crypto_akcipher *tfm) { struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); struct akcipher_alg *alg = crypto_akcipher_alg(tfm); const char *name = crypto_tfm_alg_name(&tfm->base); struct aspeed_acry_alg *acry_alg; acry_alg = container_of(alg, struct aspeed_acry_alg, akcipher.base); ctx->acry_dev = acry_alg->acry_dev; ctx->fallback_tfm = crypto_alloc_akcipher(name, 0, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback_tfm)) { dev_err(ctx->acry_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(ctx->fallback_tfm)); return PTR_ERR(ctx->fallback_tfm); } return 0; } static void aspeed_acry_rsa_exit_tfm(struct crypto_akcipher *tfm) { struct aspeed_acry_ctx *ctx = akcipher_tfm_ctx(tfm); crypto_free_akcipher(ctx->fallback_tfm); } static struct aspeed_acry_alg aspeed_acry_akcipher_algs[] = { { .akcipher.base = { .encrypt = aspeed_acry_rsa_enc, .decrypt = aspeed_acry_rsa_dec, .sign = aspeed_acry_rsa_dec, .verify = aspeed_acry_rsa_enc, .set_pub_key = aspeed_acry_rsa_set_pub_key, .set_priv_key = aspeed_acry_rsa_set_priv_key, .max_size = aspeed_acry_rsa_max_size, .init = aspeed_acry_rsa_init_tfm, .exit = aspeed_acry_rsa_exit_tfm, .base = { .cra_name = "rsa", .cra_driver_name = "aspeed-rsa", .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_AKCIPHER | CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_NEED_FALLBACK, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct aspeed_acry_ctx), }, }, .akcipher.op = { .do_one_request = aspeed_acry_do_request, }, }, }; static void aspeed_acry_register(struct aspeed_acry_dev *acry_dev) { int i, rc; for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) { aspeed_acry_akcipher_algs[i].acry_dev = acry_dev; rc = crypto_engine_register_akcipher(&aspeed_acry_akcipher_algs[i].akcipher); if (rc) { ACRY_DBG(acry_dev, "Failed to register %s\n", aspeed_acry_akcipher_algs[i].akcipher.base.base.cra_name); } } } static void aspeed_acry_unregister(struct aspeed_acry_dev *acry_dev) { int i; for (i = 0; i < ARRAY_SIZE(aspeed_acry_akcipher_algs); i++) crypto_engine_unregister_akcipher(&aspeed_acry_akcipher_algs[i].akcipher); } /* ACRY interrupt service routine. */ static irqreturn_t aspeed_acry_irq(int irq, void *dev) { struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)dev; u32 sts; sts = ast_acry_read(acry_dev, ASPEED_ACRY_STATUS); ast_acry_write(acry_dev, sts, ASPEED_ACRY_STATUS); ACRY_DBG(acry_dev, "irq sts:0x%x\n", sts); if (sts & ACRY_RSA_ISR) { /* Stop RSA engine */ ast_acry_write(acry_dev, 0, ASPEED_ACRY_TRIGGER); if (acry_dev->flags & CRYPTO_FLAGS_BUSY) tasklet_schedule(&acry_dev->done_task); else dev_err(acry_dev->dev, "RSA no active requests.\n"); } return IRQ_HANDLED; } /* * ACRY SRAM has its own memory layout. * Set the DRAM to SRAM indexing for future used. */ static void aspeed_acry_sram_mapping(struct aspeed_acry_dev *acry_dev) { int i, j = 0; for (i = 0; i < (ASPEED_ACRY_SRAM_MAX_LEN / BYTES_PER_DWORD); i++) { acry_dev->exp_dw_mapping[i] = j; acry_dev->mod_dw_mapping[i] = j + 4; acry_dev->data_byte_mapping[(i * 4)] = (j + 8) * 4; acry_dev->data_byte_mapping[(i * 4) + 1] = (j + 8) * 4 + 1; acry_dev->data_byte_mapping[(i * 4) + 2] = (j + 8) * 4 + 2; acry_dev->data_byte_mapping[(i * 4) + 3] = (j + 8) * 4 + 3; j++; j = j % 4 ? j : j + 8; } } static void aspeed_acry_done_task(unsigned long data) { struct aspeed_acry_dev *acry_dev = (struct aspeed_acry_dev *)data; (void)acry_dev->resume(acry_dev); } static const struct of_device_id aspeed_acry_of_matches[] = { { .compatible = "aspeed,ast2600-acry", }, {}, }; static int aspeed_acry_probe(struct platform_device *pdev) { struct aspeed_acry_dev *acry_dev; struct device *dev = &pdev->dev; int rc; acry_dev = devm_kzalloc(dev, sizeof(struct aspeed_acry_dev), GFP_KERNEL); if (!acry_dev) return -ENOMEM; acry_dev->dev = dev; platform_set_drvdata(pdev, acry_dev); acry_dev->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(acry_dev->regs)) return PTR_ERR(acry_dev->regs); acry_dev->acry_sram = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(acry_dev->acry_sram)) return PTR_ERR(acry_dev->acry_sram); /* Get irq number and register it */ acry_dev->irq = platform_get_irq(pdev, 0); if (acry_dev->irq < 0) return -ENXIO; rc = devm_request_irq(dev, acry_dev->irq, aspeed_acry_irq, 0, dev_name(dev), acry_dev); if (rc) { dev_err(dev, "Failed to request irq.\n"); return rc; } acry_dev->clk = devm_clk_get_enabled(dev, NULL); if (IS_ERR(acry_dev->clk)) { dev_err(dev, "Failed to get acry clk\n"); return PTR_ERR(acry_dev->clk); } acry_dev->ahbc = syscon_regmap_lookup_by_phandle(dev->of_node, "aspeed,ahbc"); if (IS_ERR(acry_dev->ahbc)) { dev_err(dev, "Failed to get AHBC regmap\n"); return -ENODEV; } /* Initialize crypto hardware engine structure for RSA */ acry_dev->crypt_engine_rsa = crypto_engine_alloc_init(dev, true); if (!acry_dev->crypt_engine_rsa) { rc = -ENOMEM; goto clk_exit; } rc = crypto_engine_start(acry_dev->crypt_engine_rsa); if (rc) goto err_engine_rsa_start; tasklet_init(&acry_dev->done_task, aspeed_acry_done_task, (unsigned long)acry_dev); /* Set Data Memory to AHB(CPU) Access Mode */ ast_acry_write(acry_dev, ACRY_CMD_DMEM_AHB, ASPEED_ACRY_DMA_CMD); /* Initialize ACRY SRAM index */ aspeed_acry_sram_mapping(acry_dev); acry_dev->buf_addr = dmam_alloc_coherent(dev, ASPEED_ACRY_BUFF_SIZE, &acry_dev->buf_dma_addr, GFP_KERNEL); if (!acry_dev->buf_addr) { rc = -ENOMEM; goto err_engine_rsa_start; } aspeed_acry_register(acry_dev); dev_info(dev, "Aspeed ACRY Accelerator successfully registered\n"); return 0; err_engine_rsa_start: crypto_engine_exit(acry_dev->crypt_engine_rsa); clk_exit: clk_disable_unprepare(acry_dev->clk); return rc; } static int aspeed_acry_remove(struct platform_device *pdev) { struct aspeed_acry_dev *acry_dev = platform_get_drvdata(pdev); aspeed_acry_unregister(acry_dev); crypto_engine_exit(acry_dev->crypt_engine_rsa); tasklet_kill(&acry_dev->done_task); clk_disable_unprepare(acry_dev->clk); return 0; } MODULE_DEVICE_TABLE(of, aspeed_acry_of_matches); static struct platform_driver aspeed_acry_driver = { .probe = aspeed_acry_probe, .remove = aspeed_acry_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = aspeed_acry_of_matches, }, }; module_platform_driver(aspeed_acry_driver); MODULE_AUTHOR("Neal Liu <[email protected]>"); MODULE_DESCRIPTION("ASPEED ACRY driver for hardware RSA Engine"); MODULE_LICENSE("GPL");
linux-master
drivers/crypto/aspeed/aspeed-acry.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2021 Aspeed Technology Inc. */ #include "aspeed-hace.h" #include <crypto/des.h> #include <crypto/engine.h> #include <crypto/internal/des.h> #include <crypto/internal/skcipher.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/string.h> #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO_DEBUG #define CIPHER_DBG(h, fmt, ...) \ dev_info((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) #else #define CIPHER_DBG(h, fmt, ...) \ dev_dbg((h)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) #endif static int aspeed_crypto_do_fallback(struct skcipher_request *areq) { struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq); struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(areq); struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); int err; skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); skcipher_request_set_callback(&rctx->fallback_req, areq->base.flags, areq->base.complete, areq->base.data); skcipher_request_set_crypt(&rctx->fallback_req, areq->src, areq->dst, areq->cryptlen, areq->iv); if (rctx->enc_cmd & HACE_CMD_ENCRYPT) err = crypto_skcipher_encrypt(&rctx->fallback_req); else err = crypto_skcipher_decrypt(&rctx->fallback_req); return err; } static bool aspeed_crypto_need_fallback(struct skcipher_request *areq) { struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(areq); if (areq->cryptlen == 0) return true; if ((rctx->enc_cmd & HACE_CMD_DES_SELECT) && !IS_ALIGNED(areq->cryptlen, DES_BLOCK_SIZE)) return true; if ((!(rctx->enc_cmd & HACE_CMD_DES_SELECT)) && !IS_ALIGNED(areq->cryptlen, AES_BLOCK_SIZE)) return true; return false; } static int aspeed_hace_crypto_handle_queue(struct aspeed_hace_dev *hace_dev, struct skcipher_request *req) { if (hace_dev->version == AST2500_VERSION && aspeed_crypto_need_fallback(req)) { CIPHER_DBG(hace_dev, "SW fallback\n"); return aspeed_crypto_do_fallback(req); } return crypto_transfer_skcipher_request_to_engine( hace_dev->crypt_engine_crypto, req); } static int aspeed_crypto_do_request(struct crypto_engine *engine, void *areq) { struct skcipher_request *req = skcipher_request_cast(areq); struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); struct aspeed_hace_dev *hace_dev = ctx->hace_dev; struct aspeed_engine_crypto *crypto_engine; int rc; crypto_engine = &hace_dev->crypto_engine; crypto_engine->req = req; crypto_engine->flags |= CRYPTO_FLAGS_BUSY; rc = ctx->start(hace_dev); if (rc != -EINPROGRESS) return -EIO; return 0; } static int aspeed_sk_complete(struct aspeed_hace_dev *hace_dev, int err) { struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; struct aspeed_cipher_reqctx *rctx; struct skcipher_request *req; CIPHER_DBG(hace_dev, "\n"); req = crypto_engine->req; rctx = skcipher_request_ctx(req); if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) { if (rctx->enc_cmd & HACE_CMD_DES_SELECT) memcpy(req->iv, crypto_engine->cipher_ctx + DES_KEY_SIZE, DES_KEY_SIZE); else memcpy(req->iv, crypto_engine->cipher_ctx, AES_BLOCK_SIZE); } crypto_engine->flags &= ~CRYPTO_FLAGS_BUSY; crypto_finalize_skcipher_request(hace_dev->crypt_engine_crypto, req, err); return err; } static int aspeed_sk_transfer_sg(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; struct device *dev = hace_dev->dev; struct aspeed_cipher_reqctx *rctx; struct skcipher_request *req; CIPHER_DBG(hace_dev, "\n"); req = crypto_engine->req; rctx = skcipher_request_ctx(req); if (req->src == req->dst) { dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_BIDIRECTIONAL); } else { dma_unmap_sg(dev, req->src, rctx->src_nents, DMA_TO_DEVICE); dma_unmap_sg(dev, req->dst, rctx->dst_nents, DMA_FROM_DEVICE); } return aspeed_sk_complete(hace_dev, 0); } static int aspeed_sk_transfer(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; struct aspeed_cipher_reqctx *rctx; struct skcipher_request *req; struct scatterlist *out_sg; int nbytes = 0; int rc = 0; req = crypto_engine->req; rctx = skcipher_request_ctx(req); out_sg = req->dst; /* Copy output buffer to dst scatter-gather lists */ nbytes = sg_copy_from_buffer(out_sg, rctx->dst_nents, crypto_engine->cipher_addr, req->cryptlen); if (!nbytes) { dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n", "nbytes", nbytes, "cryptlen", req->cryptlen); rc = -EINVAL; } CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n", "nbytes", nbytes, "req->cryptlen", req->cryptlen, "nb_out_sg", rctx->dst_nents, "cipher addr", crypto_engine->cipher_addr); return aspeed_sk_complete(hace_dev, rc); } static int aspeed_sk_start(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; struct aspeed_cipher_reqctx *rctx; struct skcipher_request *req; struct scatterlist *in_sg; int nbytes; req = crypto_engine->req; rctx = skcipher_request_ctx(req); in_sg = req->src; nbytes = sg_copy_to_buffer(in_sg, rctx->src_nents, crypto_engine->cipher_addr, req->cryptlen); CIPHER_DBG(hace_dev, "%s:%d, %s:%d, %s:%d, %s:%p\n", "nbytes", nbytes, "req->cryptlen", req->cryptlen, "nb_in_sg", rctx->src_nents, "cipher addr", crypto_engine->cipher_addr); if (!nbytes) { dev_warn(hace_dev->dev, "invalid sg copy, %s:0x%x, %s:0x%x\n", "nbytes", nbytes, "cryptlen", req->cryptlen); return -EINVAL; } crypto_engine->resume = aspeed_sk_transfer; /* Trigger engines */ ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr, ASPEED_HACE_SRC); ast_hace_write(hace_dev, crypto_engine->cipher_dma_addr, ASPEED_HACE_DEST); ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN); ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD); return -EINPROGRESS; } static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; struct aspeed_sg_list *src_list, *dst_list; dma_addr_t src_dma_addr, dst_dma_addr; struct aspeed_cipher_reqctx *rctx; struct skcipher_request *req; struct scatterlist *s; int src_sg_len; int dst_sg_len; int total, i; int rc; CIPHER_DBG(hace_dev, "\n"); req = crypto_engine->req; rctx = skcipher_request_ctx(req); rctx->enc_cmd |= HACE_CMD_DES_SG_CTRL | HACE_CMD_SRC_SG_CTRL | HACE_CMD_AES_KEY_HW_EXP | HACE_CMD_MBUS_REQ_SYNC_EN; /* BIDIRECTIONAL */ if (req->dst == req->src) { src_sg_len = dma_map_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_BIDIRECTIONAL); dst_sg_len = src_sg_len; if (!src_sg_len) { dev_warn(hace_dev->dev, "dma_map_sg() src error\n"); return -EINVAL; } } else { src_sg_len = dma_map_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); if (!src_sg_len) { dev_warn(hace_dev->dev, "dma_map_sg() src error\n"); return -EINVAL; } dst_sg_len = dma_map_sg(hace_dev->dev, req->dst, rctx->dst_nents, DMA_FROM_DEVICE); if (!dst_sg_len) { dev_warn(hace_dev->dev, "dma_map_sg() dst error\n"); rc = -EINVAL; goto free_req_src; } } src_list = (struct aspeed_sg_list *)crypto_engine->cipher_addr; src_dma_addr = crypto_engine->cipher_dma_addr; total = req->cryptlen; for_each_sg(req->src, s, src_sg_len, i) { u32 phy_addr = sg_dma_address(s); u32 len = sg_dma_len(s); if (total > len) total -= len; else { /* last sg list */ len = total; len |= BIT(31); total = 0; } src_list[i].phy_addr = cpu_to_le32(phy_addr); src_list[i].len = cpu_to_le32(len); } if (total != 0) { rc = -EINVAL; goto free_req; } if (req->dst == req->src) { dst_list = src_list; dst_dma_addr = src_dma_addr; } else { dst_list = (struct aspeed_sg_list *)crypto_engine->dst_sg_addr; dst_dma_addr = crypto_engine->dst_sg_dma_addr; total = req->cryptlen; for_each_sg(req->dst, s, dst_sg_len, i) { u32 phy_addr = sg_dma_address(s); u32 len = sg_dma_len(s); if (total > len) total -= len; else { /* last sg list */ len = total; len |= BIT(31); total = 0; } dst_list[i].phy_addr = cpu_to_le32(phy_addr); dst_list[i].len = cpu_to_le32(len); } dst_list[dst_sg_len].phy_addr = 0; dst_list[dst_sg_len].len = 0; } if (total != 0) { rc = -EINVAL; goto free_req; } crypto_engine->resume = aspeed_sk_transfer_sg; /* Memory barrier to ensure all data setup before engine starts */ mb(); /* Trigger engines */ ast_hace_write(hace_dev, src_dma_addr, ASPEED_HACE_SRC); ast_hace_write(hace_dev, dst_dma_addr, ASPEED_HACE_DEST); ast_hace_write(hace_dev, req->cryptlen, ASPEED_HACE_DATA_LEN); ast_hace_write(hace_dev, rctx->enc_cmd, ASPEED_HACE_CMD); return -EINPROGRESS; free_req: if (req->dst == req->src) { dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_BIDIRECTIONAL); } else { dma_unmap_sg(hace_dev->dev, req->dst, rctx->dst_nents, DMA_TO_DEVICE); dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); } return rc; free_req_src: dma_unmap_sg(hace_dev->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); return rc; } static int aspeed_hace_skcipher_trigger(struct aspeed_hace_dev *hace_dev) { struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; struct aspeed_cipher_reqctx *rctx; struct crypto_skcipher *cipher; struct aspeed_cipher_ctx *ctx; struct skcipher_request *req; CIPHER_DBG(hace_dev, "\n"); req = crypto_engine->req; rctx = skcipher_request_ctx(req); cipher = crypto_skcipher_reqtfm(req); ctx = crypto_skcipher_ctx(cipher); /* enable interrupt */ rctx->enc_cmd |= HACE_CMD_ISR_EN; rctx->dst_nents = sg_nents(req->dst); rctx->src_nents = sg_nents(req->src); ast_hace_write(hace_dev, crypto_engine->cipher_ctx_dma, ASPEED_HACE_CONTEXT); if (rctx->enc_cmd & HACE_CMD_IV_REQUIRE) { if (rctx->enc_cmd & HACE_CMD_DES_SELECT) memcpy(crypto_engine->cipher_ctx + DES_BLOCK_SIZE, req->iv, DES_BLOCK_SIZE); else memcpy(crypto_engine->cipher_ctx, req->iv, AES_BLOCK_SIZE); } if (hace_dev->version == AST2600_VERSION) { memcpy(crypto_engine->cipher_ctx + 16, ctx->key, ctx->key_len); return aspeed_sk_start_sg(hace_dev); } memcpy(crypto_engine->cipher_ctx + 16, ctx->key, AES_MAX_KEYLENGTH); return aspeed_sk_start(hace_dev); } static int aspeed_des_crypt(struct skcipher_request *req, u32 cmd) { struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); struct aspeed_hace_dev *hace_dev = ctx->hace_dev; u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK; CIPHER_DBG(hace_dev, "\n"); if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) { if (!IS_ALIGNED(req->cryptlen, DES_BLOCK_SIZE)) return -EINVAL; } rctx->enc_cmd = cmd | HACE_CMD_DES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE | HACE_CMD_DES | HACE_CMD_CONTEXT_LOAD_ENABLE | HACE_CMD_CONTEXT_SAVE_ENABLE; return aspeed_hace_crypto_handle_queue(hace_dev, req); } static int aspeed_des_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); struct aspeed_hace_dev *hace_dev = ctx->hace_dev; int rc; CIPHER_DBG(hace_dev, "keylen: %d bits\n", keylen); if (keylen != DES_KEY_SIZE && keylen != DES3_EDE_KEY_SIZE) { dev_warn(hace_dev->dev, "invalid keylen: %d bits\n", keylen); return -EINVAL; } if (keylen == DES_KEY_SIZE) { rc = crypto_des_verify_key(tfm, key); if (rc) return rc; } else if (keylen == DES3_EDE_KEY_SIZE) { rc = crypto_des3_ede_verify_key(tfm, key); if (rc) return rc; } memcpy(ctx->key, key, keylen); ctx->key_len = keylen; crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int aspeed_tdes_ctr_decrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR | HACE_CMD_TRIPLE_DES); } static int aspeed_tdes_ctr_encrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR | HACE_CMD_TRIPLE_DES); } static int aspeed_tdes_ofb_decrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB | HACE_CMD_TRIPLE_DES); } static int aspeed_tdes_ofb_encrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB | HACE_CMD_TRIPLE_DES); } static int aspeed_tdes_cfb_decrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB | HACE_CMD_TRIPLE_DES); } static int aspeed_tdes_cfb_encrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB | HACE_CMD_TRIPLE_DES); } static int aspeed_tdes_cbc_decrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC | HACE_CMD_TRIPLE_DES); } static int aspeed_tdes_cbc_encrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC | HACE_CMD_TRIPLE_DES); } static int aspeed_tdes_ecb_decrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB | HACE_CMD_TRIPLE_DES); } static int aspeed_tdes_ecb_encrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB | HACE_CMD_TRIPLE_DES); } static int aspeed_des_ctr_decrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR | HACE_CMD_SINGLE_DES); } static int aspeed_des_ctr_encrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR | HACE_CMD_SINGLE_DES); } static int aspeed_des_ofb_decrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB | HACE_CMD_SINGLE_DES); } static int aspeed_des_ofb_encrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB | HACE_CMD_SINGLE_DES); } static int aspeed_des_cfb_decrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB | HACE_CMD_SINGLE_DES); } static int aspeed_des_cfb_encrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB | HACE_CMD_SINGLE_DES); } static int aspeed_des_cbc_decrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC | HACE_CMD_SINGLE_DES); } static int aspeed_des_cbc_encrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC | HACE_CMD_SINGLE_DES); } static int aspeed_des_ecb_decrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB | HACE_CMD_SINGLE_DES); } static int aspeed_des_ecb_encrypt(struct skcipher_request *req) { return aspeed_des_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB | HACE_CMD_SINGLE_DES); } static int aspeed_aes_crypt(struct skcipher_request *req, u32 cmd) { struct aspeed_cipher_reqctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); struct aspeed_hace_dev *hace_dev = ctx->hace_dev; u32 crypto_alg = cmd & HACE_CMD_OP_MODE_MASK; if (crypto_alg == HACE_CMD_CBC || crypto_alg == HACE_CMD_ECB) { if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE)) return -EINVAL; } CIPHER_DBG(hace_dev, "%s\n", (cmd & HACE_CMD_ENCRYPT) ? "encrypt" : "decrypt"); cmd |= HACE_CMD_AES_SELECT | HACE_CMD_RI_WO_DATA_ENABLE | HACE_CMD_CONTEXT_LOAD_ENABLE | HACE_CMD_CONTEXT_SAVE_ENABLE; switch (ctx->key_len) { case AES_KEYSIZE_128: cmd |= HACE_CMD_AES128; break; case AES_KEYSIZE_192: cmd |= HACE_CMD_AES192; break; case AES_KEYSIZE_256: cmd |= HACE_CMD_AES256; break; default: return -EINVAL; } rctx->enc_cmd = cmd; return aspeed_hace_crypto_handle_queue(hace_dev, req); } static int aspeed_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(cipher); struct aspeed_hace_dev *hace_dev = ctx->hace_dev; struct crypto_aes_ctx gen_aes_key; CIPHER_DBG(hace_dev, "keylen: %d bits\n", (keylen * 8)); if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256) return -EINVAL; if (ctx->hace_dev->version == AST2500_VERSION) { aes_expandkey(&gen_aes_key, key, keylen); memcpy(ctx->key, gen_aes_key.key_enc, AES_MAX_KEYLENGTH); } else { memcpy(ctx->key, key, keylen); } ctx->key_len = keylen; crypto_skcipher_clear_flags(ctx->fallback_tfm, CRYPTO_TFM_REQ_MASK); crypto_skcipher_set_flags(ctx->fallback_tfm, cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK); return crypto_skcipher_setkey(ctx->fallback_tfm, key, keylen); } static int aspeed_aes_ctr_decrypt(struct skcipher_request *req) { return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CTR); } static int aspeed_aes_ctr_encrypt(struct skcipher_request *req) { return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CTR); } static int aspeed_aes_ofb_decrypt(struct skcipher_request *req) { return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_OFB); } static int aspeed_aes_ofb_encrypt(struct skcipher_request *req) { return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_OFB); } static int aspeed_aes_cfb_decrypt(struct skcipher_request *req) { return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CFB); } static int aspeed_aes_cfb_encrypt(struct skcipher_request *req) { return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CFB); } static int aspeed_aes_cbc_decrypt(struct skcipher_request *req) { return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_CBC); } static int aspeed_aes_cbc_encrypt(struct skcipher_request *req) { return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_CBC); } static int aspeed_aes_ecb_decrypt(struct skcipher_request *req) { return aspeed_aes_crypt(req, HACE_CMD_DECRYPT | HACE_CMD_ECB); } static int aspeed_aes_ecb_encrypt(struct skcipher_request *req) { return aspeed_aes_crypt(req, HACE_CMD_ENCRYPT | HACE_CMD_ECB); } static int aspeed_crypto_cra_init(struct crypto_skcipher *tfm) { struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct skcipher_alg *alg = crypto_skcipher_alg(tfm); const char *name = crypto_tfm_alg_name(&tfm->base); struct aspeed_hace_alg *crypto_alg; crypto_alg = container_of(alg, struct aspeed_hace_alg, alg.skcipher.base); ctx->hace_dev = crypto_alg->hace_dev; ctx->start = aspeed_hace_skcipher_trigger; CIPHER_DBG(ctx->hace_dev, "%s\n", name); ctx->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(ctx->fallback_tfm)) { dev_err(ctx->hace_dev->dev, "ERROR: Cannot allocate fallback for %s %ld\n", name, PTR_ERR(ctx->fallback_tfm)); return PTR_ERR(ctx->fallback_tfm); } crypto_skcipher_set_reqsize(tfm, sizeof(struct aspeed_cipher_reqctx) + crypto_skcipher_reqsize(ctx->fallback_tfm)); return 0; } static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm) { struct aspeed_cipher_ctx *ctx = crypto_skcipher_ctx(tfm); struct aspeed_hace_dev *hace_dev = ctx->hace_dev; CIPHER_DBG(hace_dev, "%s\n", crypto_tfm_alg_name(&tfm->base)); crypto_free_skcipher(ctx->fallback_tfm); } static struct aspeed_hace_alg aspeed_crypto_algs[] = { { .alg.skcipher.base = { .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = aspeed_aes_setkey, .encrypt = aspeed_aes_ecb_encrypt, .decrypt = aspeed_aes_ecb_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "ecb(aes)", .cra_driver_name = "aspeed-ecb-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = aspeed_aes_setkey, .encrypt = aspeed_aes_cbc_encrypt, .decrypt = aspeed_aes_cbc_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "cbc(aes)", .cra_driver_name = "aspeed-cbc-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = aspeed_aes_setkey, .encrypt = aspeed_aes_cfb_encrypt, .decrypt = aspeed_aes_cfb_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "cfb(aes)", .cra_driver_name = "aspeed-cfb-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = aspeed_aes_setkey, .encrypt = aspeed_aes_ofb_encrypt, .decrypt = aspeed_aes_ofb_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "ofb(aes)", .cra_driver_name = "aspeed-ofb-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = aspeed_des_setkey, .encrypt = aspeed_des_ecb_encrypt, .decrypt = aspeed_des_ecb_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "ecb(des)", .cra_driver_name = "aspeed-ecb-des", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = aspeed_des_setkey, .encrypt = aspeed_des_cbc_encrypt, .decrypt = aspeed_des_cbc_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "cbc(des)", .cra_driver_name = "aspeed-cbc-des", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = aspeed_des_setkey, .encrypt = aspeed_des_cfb_encrypt, .decrypt = aspeed_des_cfb_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "cfb(des)", .cra_driver_name = "aspeed-cfb-des", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = aspeed_des_setkey, .encrypt = aspeed_des_ofb_encrypt, .decrypt = aspeed_des_ofb_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "ofb(des)", .cra_driver_name = "aspeed-ofb-des", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = aspeed_des_setkey, .encrypt = aspeed_tdes_ecb_encrypt, .decrypt = aspeed_tdes_ecb_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "aspeed-ecb-tdes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = aspeed_des_setkey, .encrypt = aspeed_tdes_cbc_encrypt, .decrypt = aspeed_tdes_cbc_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "aspeed-cbc-tdes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = aspeed_des_setkey, .encrypt = aspeed_tdes_cfb_encrypt, .decrypt = aspeed_tdes_cfb_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "cfb(des3_ede)", .cra_driver_name = "aspeed-cfb-tdes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = aspeed_des_setkey, .encrypt = aspeed_tdes_ofb_encrypt, .decrypt = aspeed_tdes_ofb_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "ofb(des3_ede)", .cra_driver_name = "aspeed-ofb-tdes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = DES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, }; static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = { { .alg.skcipher.base = { .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = aspeed_aes_setkey, .encrypt = aspeed_aes_ctr_encrypt, .decrypt = aspeed_aes_ctr_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "ctr(aes)", .cra_driver_name = "aspeed-ctr-aes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .setkey = aspeed_des_setkey, .encrypt = aspeed_des_ctr_encrypt, .decrypt = aspeed_des_ctr_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "ctr(des)", .cra_driver_name = "aspeed-ctr-des", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, { .alg.skcipher.base = { .ivsize = DES_BLOCK_SIZE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .setkey = aspeed_des_setkey, .encrypt = aspeed_tdes_ctr_encrypt, .decrypt = aspeed_tdes_ctr_decrypt, .init = aspeed_crypto_cra_init, .exit = aspeed_crypto_cra_exit, .base = { .cra_name = "ctr(des3_ede)", .cra_driver_name = "aspeed-ctr-tdes", .cra_priority = 300, .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct aspeed_cipher_ctx), .cra_alignmask = 0x0f, .cra_module = THIS_MODULE, } }, .alg.skcipher.op = { .do_one_request = aspeed_crypto_do_request, }, }, }; void aspeed_unregister_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) { int i; for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) crypto_engine_unregister_skcipher(&aspeed_crypto_algs[i].alg.skcipher); if (hace_dev->version != AST2600_VERSION) return; for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) crypto_engine_unregister_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher); } void aspeed_register_hace_crypto_algs(struct aspeed_hace_dev *hace_dev) { int rc, i; CIPHER_DBG(hace_dev, "\n"); for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs); i++) { aspeed_crypto_algs[i].hace_dev = hace_dev; rc = crypto_engine_register_skcipher(&aspeed_crypto_algs[i].alg.skcipher); if (rc) { CIPHER_DBG(hace_dev, "Failed to register %s\n", aspeed_crypto_algs[i].alg.skcipher.base.base.cra_name); } } if (hace_dev->version != AST2600_VERSION) return; for (i = 0; i < ARRAY_SIZE(aspeed_crypto_algs_g6); i++) { aspeed_crypto_algs_g6[i].hace_dev = hace_dev; rc = crypto_engine_register_skcipher(&aspeed_crypto_algs_g6[i].alg.skcipher); if (rc) { CIPHER_DBG(hace_dev, "Failed to register %s\n", aspeed_crypto_algs_g6[i].alg.skcipher.base.base.cra_name); } } }
linux-master
drivers/crypto/aspeed/aspeed-hace-crypto.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Cavium, Inc. */ #include <linux/interrupt.h> #include <linux/module.h> #include "cptvf.h" #define DRV_NAME "thunder-cptvf" #define DRV_VERSION "1.0" struct cptvf_wqe { struct tasklet_struct twork; void *cptvf; u32 qno; }; struct cptvf_wqe_info { struct cptvf_wqe vq_wqe[CPT_NUM_QS_PER_VF]; }; static void vq_work_handler(unsigned long data) { struct cptvf_wqe_info *cwqe_info = (struct cptvf_wqe_info *)data; struct cptvf_wqe *cwqe = &cwqe_info->vq_wqe[0]; vq_post_process(cwqe->cptvf, cwqe->qno); } static int init_worker_threads(struct cpt_vf *cptvf) { struct pci_dev *pdev = cptvf->pdev; struct cptvf_wqe_info *cwqe_info; int i; cwqe_info = kzalloc(sizeof(*cwqe_info), GFP_KERNEL); if (!cwqe_info) return -ENOMEM; if (cptvf->nr_queues) { dev_info(&pdev->dev, "Creating VQ worker threads (%d)\n", cptvf->nr_queues); } for (i = 0; i < cptvf->nr_queues; i++) { tasklet_init(&cwqe_info->vq_wqe[i].twork, vq_work_handler, (u64)cwqe_info); cwqe_info->vq_wqe[i].qno = i; cwqe_info->vq_wqe[i].cptvf = cptvf; } cptvf->wqe_info = cwqe_info; return 0; } static void cleanup_worker_threads(struct cpt_vf *cptvf) { struct cptvf_wqe_info *cwqe_info; struct pci_dev *pdev = cptvf->pdev; int i; cwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info; if (!cwqe_info) return; if (cptvf->nr_queues) { dev_info(&pdev->dev, "Cleaning VQ worker threads (%u)\n", cptvf->nr_queues); } for (i = 0; i < cptvf->nr_queues; i++) tasklet_kill(&cwqe_info->vq_wqe[i].twork); kfree_sensitive(cwqe_info); cptvf->wqe_info = NULL; } static void free_pending_queues(struct pending_qinfo *pqinfo) { int i; struct pending_queue *queue; for_each_pending_queue(pqinfo, queue, i) { if (!queue->head) continue; /* free single queue */ kfree_sensitive((queue->head)); queue->front = 0; queue->rear = 0; return; } pqinfo->qlen = 0; pqinfo->nr_queues = 0; } static int alloc_pending_queues(struct pending_qinfo *pqinfo, u32 qlen, u32 nr_queues) { u32 i; int ret; struct pending_queue *queue = NULL; pqinfo->nr_queues = nr_queues; pqinfo->qlen = qlen; for_each_pending_queue(pqinfo, queue, i) { queue->head = kcalloc(qlen, sizeof(*queue->head), GFP_KERNEL); if (!queue->head) { ret = -ENOMEM; goto pending_qfail; } queue->front = 0; queue->rear = 0; atomic64_set((&queue->pending_count), (0)); /* init queue spin lock */ spin_lock_init(&queue->lock); } return 0; pending_qfail: free_pending_queues(pqinfo); return ret; } static int init_pending_queues(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues) { struct pci_dev *pdev = cptvf->pdev; int ret; if (!nr_queues) return 0; ret = alloc_pending_queues(&cptvf->pqinfo, qlen, nr_queues); if (ret) { dev_err(&pdev->dev, "failed to setup pending queues (%u)\n", nr_queues); return ret; } return 0; } static void cleanup_pending_queues(struct cpt_vf *cptvf) { struct pci_dev *pdev = cptvf->pdev; if (!cptvf->nr_queues) return; dev_info(&pdev->dev, "Cleaning VQ pending queue (%u)\n", cptvf->nr_queues); free_pending_queues(&cptvf->pqinfo); } static void free_command_queues(struct cpt_vf *cptvf, struct command_qinfo *cqinfo) { int i; struct command_queue *queue = NULL; struct command_chunk *chunk = NULL; struct pci_dev *pdev = cptvf->pdev; struct hlist_node *node; /* clean up for each queue */ for (i = 0; i < cptvf->nr_queues; i++) { queue = &cqinfo->queue[i]; if (hlist_empty(&cqinfo->queue[i].chead)) continue; hlist_for_each_entry_safe(chunk, node, &cqinfo->queue[i].chead, nextchunk) { dma_free_coherent(&pdev->dev, chunk->size, chunk->head, chunk->dma_addr); chunk->head = NULL; chunk->dma_addr = 0; hlist_del(&chunk->nextchunk); kfree_sensitive(chunk); } queue->nchunks = 0; queue->idx = 0; } /* common cleanup */ cqinfo->cmd_size = 0; } static int alloc_command_queues(struct cpt_vf *cptvf, struct command_qinfo *cqinfo, size_t cmd_size, u32 qlen) { int i; size_t q_size; struct command_queue *queue = NULL; struct pci_dev *pdev = cptvf->pdev; /* common init */ cqinfo->cmd_size = cmd_size; /* Qsize in dwords, needed for SADDR config, 1-next chunk pointer */ cptvf->qsize = min(qlen, cqinfo->qchunksize) * CPT_NEXT_CHUNK_PTR_SIZE + 1; /* Qsize in bytes to create space for alignment */ q_size = qlen * cqinfo->cmd_size; /* per queue initialization */ for (i = 0; i < cptvf->nr_queues; i++) { size_t c_size = 0; size_t rem_q_size = q_size; struct command_chunk *curr = NULL, *first = NULL, *last = NULL; u32 qcsize_bytes = cqinfo->qchunksize * cqinfo->cmd_size; queue = &cqinfo->queue[i]; INIT_HLIST_HEAD(&cqinfo->queue[i].chead); do { curr = kzalloc(sizeof(*curr), GFP_KERNEL); if (!curr) goto cmd_qfail; c_size = (rem_q_size > qcsize_bytes) ? qcsize_bytes : rem_q_size; curr->head = dma_alloc_coherent(&pdev->dev, c_size + CPT_NEXT_CHUNK_PTR_SIZE, &curr->dma_addr, GFP_KERNEL); if (!curr->head) { dev_err(&pdev->dev, "Command Q (%d) chunk (%d) allocation failed\n", i, queue->nchunks); kfree(curr); goto cmd_qfail; } curr->size = c_size; if (queue->nchunks == 0) { hlist_add_head(&curr->nextchunk, &cqinfo->queue[i].chead); first = curr; } else { hlist_add_behind(&curr->nextchunk, &last->nextchunk); } queue->nchunks++; rem_q_size -= c_size; if (last) *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr; last = curr; } while (rem_q_size); /* Make the queue circular */ /* Tie back last chunk entry to head */ curr = first; *((u64 *)(&last->head[last->size])) = (u64)curr->dma_addr; queue->qhead = curr; spin_lock_init(&queue->lock); } return 0; cmd_qfail: free_command_queues(cptvf, cqinfo); return -ENOMEM; } static int init_command_queues(struct cpt_vf *cptvf, u32 qlen) { struct pci_dev *pdev = cptvf->pdev; int ret; /* setup AE command queues */ ret = alloc_command_queues(cptvf, &cptvf->cqinfo, CPT_INST_SIZE, qlen); if (ret) { dev_err(&pdev->dev, "failed to allocate AE command queues (%u)\n", cptvf->nr_queues); return ret; } return ret; } static void cleanup_command_queues(struct cpt_vf *cptvf) { struct pci_dev *pdev = cptvf->pdev; if (!cptvf->nr_queues) return; dev_info(&pdev->dev, "Cleaning VQ command queue (%u)\n", cptvf->nr_queues); free_command_queues(cptvf, &cptvf->cqinfo); } static void cptvf_sw_cleanup(struct cpt_vf *cptvf) { cleanup_worker_threads(cptvf); cleanup_pending_queues(cptvf); cleanup_command_queues(cptvf); } static int cptvf_sw_init(struct cpt_vf *cptvf, u32 qlen, u32 nr_queues) { struct pci_dev *pdev = cptvf->pdev; int ret = 0; u32 max_dev_queues = 0; max_dev_queues = CPT_NUM_QS_PER_VF; /* possible cpus */ nr_queues = min_t(u32, nr_queues, max_dev_queues); cptvf->nr_queues = nr_queues; ret = init_command_queues(cptvf, qlen); if (ret) { dev_err(&pdev->dev, "Failed to setup command queues (%u)\n", nr_queues); return ret; } ret = init_pending_queues(cptvf, qlen, nr_queues); if (ret) { dev_err(&pdev->dev, "Failed to setup pending queues (%u)\n", nr_queues); goto setup_pqfail; } /* Create worker threads for BH processing */ ret = init_worker_threads(cptvf); if (ret) { dev_err(&pdev->dev, "Failed to setup worker threads\n"); goto init_work_fail; } return 0; init_work_fail: cleanup_worker_threads(cptvf); cleanup_pending_queues(cptvf); setup_pqfail: cleanup_command_queues(cptvf); return ret; } static void cptvf_free_irq_affinity(struct cpt_vf *cptvf, int vec) { irq_set_affinity_hint(pci_irq_vector(cptvf->pdev, vec), NULL); free_cpumask_var(cptvf->affinity_mask[vec]); } static void cptvf_write_vq_ctl(struct cpt_vf *cptvf, bool val) { union cptx_vqx_ctl vqx_ctl; vqx_ctl.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0)); vqx_ctl.s.ena = val; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_CTL(0, 0), vqx_ctl.u); } void cptvf_write_vq_doorbell(struct cpt_vf *cptvf, u32 val) { union cptx_vqx_doorbell vqx_dbell; vqx_dbell.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0)); vqx_dbell.s.dbell_cnt = val * 8; /* Num of Instructions * 8 words */ cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DOORBELL(0, 0), vqx_dbell.u); } static void cptvf_write_vq_inprog(struct cpt_vf *cptvf, u8 val) { union cptx_vqx_inprog vqx_inprg; vqx_inprg.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0)); vqx_inprg.s.inflight = val; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_INPROG(0, 0), vqx_inprg.u); } static void cptvf_write_vq_done_numwait(struct cpt_vf *cptvf, u32 val) { union cptx_vqx_done_wait vqx_dwait; vqx_dwait.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0)); vqx_dwait.s.num_wait = val; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u); } static void cptvf_write_vq_done_timewait(struct cpt_vf *cptvf, u16 time) { union cptx_vqx_done_wait vqx_dwait; vqx_dwait.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0)); vqx_dwait.s.time_wait = time; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_WAIT(0, 0), vqx_dwait.u); } static void cptvf_enable_swerr_interrupts(struct cpt_vf *cptvf) { union cptx_vqx_misc_ena_w1s vqx_misc_ena; vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0)); /* Set mbox(0) interupts for the requested vf */ vqx_misc_ena.s.swerr = 1; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0), vqx_misc_ena.u); } static void cptvf_enable_mbox_interrupts(struct cpt_vf *cptvf) { union cptx_vqx_misc_ena_w1s vqx_misc_ena; vqx_misc_ena.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0)); /* Set mbox(0) interupts for the requested vf */ vqx_misc_ena.s.mbox = 1; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_ENA_W1S(0, 0), vqx_misc_ena.u); } static void cptvf_enable_done_interrupts(struct cpt_vf *cptvf) { union cptx_vqx_done_ena_w1s vqx_done_ena; vqx_done_ena.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0)); /* Set DONE interrupt for the requested vf */ vqx_done_ena.s.done = 1; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ENA_W1S(0, 0), vqx_done_ena.u); } static void cptvf_clear_dovf_intr(struct cpt_vf *cptvf) { union cptx_vqx_misc_int vqx_misc_int; vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0)); /* W1C for the VF */ vqx_misc_int.s.dovf = 1; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); } static void cptvf_clear_irde_intr(struct cpt_vf *cptvf) { union cptx_vqx_misc_int vqx_misc_int; vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0)); /* W1C for the VF */ vqx_misc_int.s.irde = 1; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); } static void cptvf_clear_nwrp_intr(struct cpt_vf *cptvf) { union cptx_vqx_misc_int vqx_misc_int; vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0)); /* W1C for the VF */ vqx_misc_int.s.nwrp = 1; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); } static void cptvf_clear_mbox_intr(struct cpt_vf *cptvf) { union cptx_vqx_misc_int vqx_misc_int; vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0)); /* W1C for the VF */ vqx_misc_int.s.mbox = 1; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); } static void cptvf_clear_swerr_intr(struct cpt_vf *cptvf) { union cptx_vqx_misc_int vqx_misc_int; vqx_misc_int.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0)); /* W1C for the VF */ vqx_misc_int.s.swerr = 1; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0), vqx_misc_int.u); } static u64 cptvf_read_vf_misc_intr_status(struct cpt_vf *cptvf) { return cpt_read_csr64(cptvf->reg_base, CPTX_VQX_MISC_INT(0, 0)); } static irqreturn_t cptvf_misc_intr_handler(int irq, void *cptvf_irq) { struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq; struct pci_dev *pdev = cptvf->pdev; u64 intr; intr = cptvf_read_vf_misc_intr_status(cptvf); /*Check for MISC interrupt types*/ if (likely(intr & CPT_VF_INTR_MBOX_MASK)) { dev_dbg(&pdev->dev, "Mailbox interrupt 0x%llx on CPT VF %d\n", intr, cptvf->vfid); cptvf_handle_mbox_intr(cptvf); cptvf_clear_mbox_intr(cptvf); } else if (unlikely(intr & CPT_VF_INTR_DOVF_MASK)) { cptvf_clear_dovf_intr(cptvf); /*Clear doorbell count*/ cptvf_write_vq_doorbell(cptvf, 0); dev_err(&pdev->dev, "Doorbell overflow error interrupt 0x%llx on CPT VF %d\n", intr, cptvf->vfid); } else if (unlikely(intr & CPT_VF_INTR_IRDE_MASK)) { cptvf_clear_irde_intr(cptvf); dev_err(&pdev->dev, "Instruction NCB read error interrupt 0x%llx on CPT VF %d\n", intr, cptvf->vfid); } else if (unlikely(intr & CPT_VF_INTR_NWRP_MASK)) { cptvf_clear_nwrp_intr(cptvf); dev_err(&pdev->dev, "NCB response write error interrupt 0x%llx on CPT VF %d\n", intr, cptvf->vfid); } else if (unlikely(intr & CPT_VF_INTR_SERR_MASK)) { cptvf_clear_swerr_intr(cptvf); dev_err(&pdev->dev, "Software error interrupt 0x%llx on CPT VF %d\n", intr, cptvf->vfid); } else { dev_err(&pdev->dev, "Unhandled interrupt in CPT VF %d\n", cptvf->vfid); } return IRQ_HANDLED; } static inline struct cptvf_wqe *get_cptvf_vq_wqe(struct cpt_vf *cptvf, int qno) { struct cptvf_wqe_info *nwqe_info; if (unlikely(qno >= cptvf->nr_queues)) return NULL; nwqe_info = (struct cptvf_wqe_info *)cptvf->wqe_info; return &nwqe_info->vq_wqe[qno]; } static inline u32 cptvf_read_vq_done_count(struct cpt_vf *cptvf) { union cptx_vqx_done vqx_done; vqx_done.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE(0, 0)); return vqx_done.s.done; } static inline void cptvf_write_vq_done_ack(struct cpt_vf *cptvf, u32 ackcnt) { union cptx_vqx_done_ack vqx_dack_cnt; vqx_dack_cnt.u = cpt_read_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0)); vqx_dack_cnt.s.done_ack = ackcnt; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_DONE_ACK(0, 0), vqx_dack_cnt.u); } static irqreturn_t cptvf_done_intr_handler(int irq, void *cptvf_irq) { struct cpt_vf *cptvf = (struct cpt_vf *)cptvf_irq; struct pci_dev *pdev = cptvf->pdev; /* Read the number of completions */ u32 intr = cptvf_read_vq_done_count(cptvf); if (intr) { struct cptvf_wqe *wqe; /* Acknowledge the number of * scheduled completions for processing */ cptvf_write_vq_done_ack(cptvf, intr); wqe = get_cptvf_vq_wqe(cptvf, 0); if (unlikely(!wqe)) { dev_err(&pdev->dev, "No work to schedule for VF (%d)", cptvf->vfid); return IRQ_NONE; } tasklet_hi_schedule(&wqe->twork); } return IRQ_HANDLED; } static void cptvf_set_irq_affinity(struct cpt_vf *cptvf, int vec) { struct pci_dev *pdev = cptvf->pdev; int cpu; if (!zalloc_cpumask_var(&cptvf->affinity_mask[vec], GFP_KERNEL)) { dev_err(&pdev->dev, "Allocation failed for affinity_mask for VF %d", cptvf->vfid); return; } cpu = cptvf->vfid % num_online_cpus(); cpumask_set_cpu(cpumask_local_spread(cpu, cptvf->node), cptvf->affinity_mask[vec]); irq_set_affinity_hint(pci_irq_vector(pdev, vec), cptvf->affinity_mask[vec]); } static void cptvf_write_vq_saddr(struct cpt_vf *cptvf, u64 val) { union cptx_vqx_saddr vqx_saddr; vqx_saddr.u = val; cpt_write_csr64(cptvf->reg_base, CPTX_VQX_SADDR(0, 0), vqx_saddr.u); } static void cptvf_device_init(struct cpt_vf *cptvf) { u64 base_addr = 0; /* Disable the VQ */ cptvf_write_vq_ctl(cptvf, 0); /* Reset the doorbell */ cptvf_write_vq_doorbell(cptvf, 0); /* Clear inflight */ cptvf_write_vq_inprog(cptvf, 0); /* Write VQ SADDR */ /* TODO: for now only one queue, so hard coded */ base_addr = (u64)(cptvf->cqinfo.queue[0].qhead->dma_addr); cptvf_write_vq_saddr(cptvf, base_addr); /* Configure timerhold / coalescence */ cptvf_write_vq_done_timewait(cptvf, CPT_TIMER_THOLD); cptvf_write_vq_done_numwait(cptvf, 1); /* Enable the VQ */ cptvf_write_vq_ctl(cptvf, 1); /* Flag the VF ready */ cptvf->flags |= CPT_FLAG_DEVICE_READY; } static int cptvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct cpt_vf *cptvf; int err; cptvf = devm_kzalloc(dev, sizeof(*cptvf), GFP_KERNEL); if (!cptvf) return -ENOMEM; pci_set_drvdata(pdev, cptvf); cptvf->pdev = pdev; err = pci_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); return err; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); goto cptvf_err_disable_device; } /* Mark as VF driver */ cptvf->flags |= CPT_FLAG_VF_DRIVER; err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "Unable to get usable 48-bit DMA configuration\n"); goto cptvf_err_release_regions; } /* MAP PF's configuration registers */ cptvf->reg_base = pcim_iomap(pdev, 0, 0); if (!cptvf->reg_base) { dev_err(dev, "Cannot map config register space, aborting\n"); err = -ENOMEM; goto cptvf_err_release_regions; } cptvf->node = dev_to_node(&pdev->dev); err = pci_alloc_irq_vectors(pdev, CPT_VF_MSIX_VECTORS, CPT_VF_MSIX_VECTORS, PCI_IRQ_MSIX); if (err < 0) { dev_err(dev, "Request for #%d msix vectors failed\n", CPT_VF_MSIX_VECTORS); goto cptvf_err_release_regions; } err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf_misc_intr_handler, 0, "CPT VF misc intr", cptvf); if (err) { dev_err(dev, "Request misc irq failed"); goto cptvf_free_vectors; } /* Enable mailbox interrupt */ cptvf_enable_mbox_interrupts(cptvf); cptvf_enable_swerr_interrupts(cptvf); /* Check ready with PF */ /* Gets chip ID / device Id from PF if ready */ err = cptvf_check_pf_ready(cptvf); if (err) { dev_err(dev, "PF not responding to READY msg"); goto cptvf_free_misc_irq; } /* CPT VF software resources initialization */ cptvf->cqinfo.qchunksize = CPT_CMD_QCHUNK_SIZE; err = cptvf_sw_init(cptvf, CPT_CMD_QLEN, CPT_NUM_QS_PER_VF); if (err) { dev_err(dev, "cptvf_sw_init() failed"); goto cptvf_free_misc_irq; } /* Convey VQ LEN to PF */ err = cptvf_send_vq_size_msg(cptvf); if (err) { dev_err(dev, "PF not responding to QLEN msg"); goto cptvf_free_misc_irq; } /* CPT VF device initialization */ cptvf_device_init(cptvf); /* Send msg to PF to assign currnet Q to required group */ cptvf->vfgrp = 1; err = cptvf_send_vf_to_grp_msg(cptvf); if (err) { dev_err(dev, "PF not responding to VF_GRP msg"); goto cptvf_free_misc_irq; } cptvf->priority = 1; err = cptvf_send_vf_priority_msg(cptvf); if (err) { dev_err(dev, "PF not responding to VF_PRIO msg"); goto cptvf_free_misc_irq; } err = request_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf_done_intr_handler, 0, "CPT VF done intr", cptvf); if (err) { dev_err(dev, "Request done irq failed\n"); goto cptvf_free_misc_irq; } /* Enable mailbox interrupt */ cptvf_enable_done_interrupts(cptvf); /* Set irq affinity masks */ cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC); cptvf_set_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE); err = cptvf_send_vf_up(cptvf); if (err) { dev_err(dev, "PF not responding to UP msg"); goto cptvf_free_irq_affinity; } err = cvm_crypto_init(cptvf); if (err) { dev_err(dev, "Algorithm register failed\n"); goto cptvf_free_irq_affinity; } return 0; cptvf_free_irq_affinity: cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE); cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC); cptvf_free_misc_irq: free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf); cptvf_free_vectors: pci_free_irq_vectors(cptvf->pdev); cptvf_err_release_regions: pci_release_regions(pdev); cptvf_err_disable_device: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; } static void cptvf_remove(struct pci_dev *pdev) { struct cpt_vf *cptvf = pci_get_drvdata(pdev); if (!cptvf) { dev_err(&pdev->dev, "Invalid CPT-VF device\n"); return; } /* Convey DOWN to PF */ if (cptvf_send_vf_down(cptvf)) { dev_err(&pdev->dev, "PF not responding to DOWN msg"); } else { cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_DONE); cptvf_free_irq_affinity(cptvf, CPT_VF_INT_VEC_E_MISC); free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_DONE), cptvf); free_irq(pci_irq_vector(pdev, CPT_VF_INT_VEC_E_MISC), cptvf); pci_free_irq_vectors(cptvf->pdev); cptvf_sw_cleanup(cptvf); pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); pci_disable_device(pdev); cvm_crypto_exit(); } } static void cptvf_shutdown(struct pci_dev *pdev) { cptvf_remove(pdev); } /* Supported devices */ static const struct pci_device_id cptvf_id_table[] = { {PCI_VDEVICE(CAVIUM, CPT_81XX_PCI_VF_DEVICE_ID), 0}, { 0, } /* end of table */ }; static struct pci_driver cptvf_pci_driver = { .name = DRV_NAME, .id_table = cptvf_id_table, .probe = cptvf_probe, .remove = cptvf_remove, .shutdown = cptvf_shutdown, }; module_pci_driver(cptvf_pci_driver); MODULE_AUTHOR("George Cherian <[email protected]>"); MODULE_DESCRIPTION("Cavium Thunder CPT Virtual Function Driver"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cptvf_id_table);
linux-master
drivers/crypto/cavium/cpt/cptvf_main.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Cavium, Inc. */ #include <crypto/aes.h> #include <crypto/algapi.h> #include <crypto/authenc.h> #include <crypto/internal/des.h> #include <crypto/xts.h> #include <linux/crypto.h> #include <linux/err.h> #include <linux/list.h> #include <linux/scatterlist.h> #include "cptvf.h" #include "cptvf_algs.h" struct cpt_device_handle { void *cdev[MAX_DEVICES]; u32 dev_count; }; static struct cpt_device_handle dev_handle; static void cvm_callback(u32 status, void *arg) { struct crypto_async_request *req = (struct crypto_async_request *)arg; crypto_request_complete(req, !status); } static inline void update_input_iv(struct cpt_request_info *req_info, u8 *iv, u32 enc_iv_len, u32 *argcnt) { /* Setting the iv information */ req_info->in[*argcnt].vptr = (void *)iv; req_info->in[*argcnt].size = enc_iv_len; req_info->req.dlen += enc_iv_len; ++(*argcnt); } static inline void update_output_iv(struct cpt_request_info *req_info, u8 *iv, u32 enc_iv_len, u32 *argcnt) { /* Setting the iv information */ req_info->out[*argcnt].vptr = (void *)iv; req_info->out[*argcnt].size = enc_iv_len; req_info->rlen += enc_iv_len; ++(*argcnt); } static inline void update_input_data(struct cpt_request_info *req_info, struct scatterlist *inp_sg, u32 nbytes, u32 *argcnt) { req_info->req.dlen += nbytes; while (nbytes) { u32 len = min(nbytes, inp_sg->length); u8 *ptr = sg_virt(inp_sg); req_info->in[*argcnt].vptr = (void *)ptr; req_info->in[*argcnt].size = len; nbytes -= len; ++(*argcnt); ++inp_sg; } } static inline void update_output_data(struct cpt_request_info *req_info, struct scatterlist *outp_sg, u32 nbytes, u32 *argcnt) { req_info->rlen += nbytes; while (nbytes) { u32 len = min(nbytes, outp_sg->length); u8 *ptr = sg_virt(outp_sg); req_info->out[*argcnt].vptr = (void *)ptr; req_info->out[*argcnt].size = len; nbytes -= len; ++(*argcnt); ++outp_sg; } } static inline u32 create_ctx_hdr(struct skcipher_request *req, u32 enc, u32 *argcnt) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(tfm); struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req); struct fc_context *fctx = &rctx->fctx; u32 enc_iv_len = crypto_skcipher_ivsize(tfm); struct cpt_request_info *req_info = &rctx->cpt_req; __be64 *ctrl_flags = NULL; __be64 *offset_control; req_info->ctrl.s.grp = 0; req_info->ctrl.s.dma_mode = DMA_GATHER_SCATTER; req_info->ctrl.s.se_req = SE_CORE_REQ; req_info->req.opcode.s.major = MAJOR_OP_FC | DMA_MODE_FLAG(DMA_GATHER_SCATTER); if (enc) req_info->req.opcode.s.minor = 2; else req_info->req.opcode.s.minor = 3; req_info->req.param1 = req->cryptlen; /* Encryption Data length */ req_info->req.param2 = 0; /*Auth data length */ fctx->enc.enc_ctrl.e.enc_cipher = ctx->cipher_type; fctx->enc.enc_ctrl.e.aes_key = ctx->key_type; fctx->enc.enc_ctrl.e.iv_source = FROM_DPTR; if (ctx->cipher_type == AES_XTS) memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len * 2); else memcpy(fctx->enc.encr_key, ctx->enc_key, ctx->key_len); ctrl_flags = (__be64 *)&fctx->enc.enc_ctrl.flags; *ctrl_flags = cpu_to_be64(fctx->enc.enc_ctrl.flags); offset_control = (__be64 *)&rctx->control_word; *offset_control = cpu_to_be64(((u64)(enc_iv_len) << 16)); /* Storing Packet Data Information in offset * Control Word First 8 bytes */ req_info->in[*argcnt].vptr = (u8 *)offset_control; req_info->in[*argcnt].size = CONTROL_WORD_LEN; req_info->req.dlen += CONTROL_WORD_LEN; ++(*argcnt); req_info->in[*argcnt].vptr = (u8 *)fctx; req_info->in[*argcnt].size = sizeof(struct fc_context); req_info->req.dlen += sizeof(struct fc_context); ++(*argcnt); return 0; } static inline u32 create_input_list(struct skcipher_request *req, u32 enc, u32 enc_iv_len) { struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req); struct cpt_request_info *req_info = &rctx->cpt_req; u32 argcnt = 0; create_ctx_hdr(req, enc, &argcnt); update_input_iv(req_info, req->iv, enc_iv_len, &argcnt); update_input_data(req_info, req->src, req->cryptlen, &argcnt); req_info->incnt = argcnt; return 0; } static inline void store_cb_info(struct skcipher_request *req, struct cpt_request_info *req_info) { req_info->callback = (void *)cvm_callback; req_info->callback_arg = (void *)&req->base; } static inline void create_output_list(struct skcipher_request *req, u32 enc_iv_len) { struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req); struct cpt_request_info *req_info = &rctx->cpt_req; u32 argcnt = 0; /* OUTPUT Buffer Processing * AES encryption/decryption output would be * received in the following format * * ------IV--------|------ENCRYPTED/DECRYPTED DATA-----| * [ 16 Bytes/ [ Request Enc/Dec/ DATA Len AES CBC ] */ /* Reading IV information */ update_output_iv(req_info, req->iv, enc_iv_len, &argcnt); update_output_data(req_info, req->dst, req->cryptlen, &argcnt); req_info->outcnt = argcnt; } static inline int cvm_enc_dec(struct skcipher_request *req, u32 enc) { struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); struct cvm_req_ctx *rctx = skcipher_request_ctx_dma(req); u32 enc_iv_len = crypto_skcipher_ivsize(tfm); struct fc_context *fctx = &rctx->fctx; struct cpt_request_info *req_info = &rctx->cpt_req; void *cdev = NULL; int status; memset(req_info, 0, sizeof(struct cpt_request_info)); req_info->may_sleep = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) != 0; memset(fctx, 0, sizeof(struct fc_context)); create_input_list(req, enc, enc_iv_len); create_output_list(req, enc_iv_len); store_cb_info(req, req_info); cdev = dev_handle.cdev[smp_processor_id()]; status = cptvf_do_request(cdev, req_info); /* We perform an asynchronous send and once * the request is completed the driver would * intimate through registered call back functions */ if (status) return status; else return -EINPROGRESS; } static int cvm_encrypt(struct skcipher_request *req) { return cvm_enc_dec(req, true); } static int cvm_decrypt(struct skcipher_request *req) { return cvm_enc_dec(req, false); } static int cvm_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, u32 keylen) { struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(cipher); int err; const u8 *key1 = key; const u8 *key2 = key + (keylen / 2); err = xts_verify_key(cipher, key, keylen); if (err) return err; ctx->key_len = keylen; memcpy(ctx->enc_key, key1, keylen / 2); memcpy(ctx->enc_key + KEY2_OFFSET, key2, keylen / 2); ctx->cipher_type = AES_XTS; switch (ctx->key_len) { case 32: ctx->key_type = AES_128_BIT; break; case 64: ctx->key_type = AES_256_BIT; break; default: return -EINVAL; } return 0; } static int cvm_validate_keylen(struct cvm_enc_ctx *ctx, u32 keylen) { if ((keylen == 16) || (keylen == 24) || (keylen == 32)) { ctx->key_len = keylen; switch (ctx->key_len) { case 16: ctx->key_type = AES_128_BIT; break; case 24: ctx->key_type = AES_192_BIT; break; case 32: ctx->key_type = AES_256_BIT; break; default: return -EINVAL; } if (ctx->cipher_type == DES3_CBC) ctx->key_type = 0; return 0; } return -EINVAL; } static int cvm_setkey(struct crypto_skcipher *cipher, const u8 *key, u32 keylen, u8 cipher_type) { struct cvm_enc_ctx *ctx = crypto_skcipher_ctx(cipher); ctx->cipher_type = cipher_type; if (!cvm_validate_keylen(ctx, keylen)) { memcpy(ctx->enc_key, key, keylen); return 0; } else { return -EINVAL; } } static int cvm_cbc_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, u32 keylen) { return cvm_setkey(cipher, key, keylen, AES_CBC); } static int cvm_ecb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, u32 keylen) { return cvm_setkey(cipher, key, keylen, AES_ECB); } static int cvm_cfb_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, u32 keylen) { return cvm_setkey(cipher, key, keylen, AES_CFB); } static int cvm_cbc_des3_setkey(struct crypto_skcipher *cipher, const u8 *key, u32 keylen) { return verify_skcipher_des3_key(cipher, key) ?: cvm_setkey(cipher, key, keylen, DES3_CBC); } static int cvm_ecb_des3_setkey(struct crypto_skcipher *cipher, const u8 *key, u32 keylen) { return verify_skcipher_des3_key(cipher, key) ?: cvm_setkey(cipher, key, keylen, DES3_ECB); } static int cvm_enc_dec_init(struct crypto_skcipher *tfm) { crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct cvm_req_ctx)); return 0; } static struct skcipher_alg algs[] = { { .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, .base.cra_priority = 4001, .base.cra_name = "xts(aes)", .base.cra_driver_name = "cavium-xts-aes", .base.cra_module = THIS_MODULE, .ivsize = AES_BLOCK_SIZE, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .setkey = cvm_xts_setkey, .encrypt = cvm_encrypt, .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, .base.cra_priority = 4001, .base.cra_name = "cbc(aes)", .base.cra_driver_name = "cavium-cbc-aes", .base.cra_module = THIS_MODULE, .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = cvm_cbc_aes_setkey, .encrypt = cvm_encrypt, .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, .base.cra_priority = 4001, .base.cra_name = "ecb(aes)", .base.cra_driver_name = "cavium-ecb-aes", .base.cra_module = THIS_MODULE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = cvm_ecb_aes_setkey, .encrypt = cvm_encrypt, .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = AES_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_enc_ctx), .base.cra_alignmask = 7, .base.cra_priority = 4001, .base.cra_name = "cfb(aes)", .base.cra_driver_name = "cavium-cfb-aes", .base.cra_module = THIS_MODULE, .ivsize = AES_BLOCK_SIZE, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .setkey = cvm_cfb_aes_setkey, .encrypt = cvm_encrypt, .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_des3_ctx), .base.cra_alignmask = 7, .base.cra_priority = 4001, .base.cra_name = "cbc(des3_ede)", .base.cra_driver_name = "cavium-cbc-des3_ede", .base.cra_module = THIS_MODULE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, .setkey = cvm_cbc_des3_setkey, .encrypt = cvm_encrypt, .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, }, { .base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, .base.cra_ctxsize = sizeof(struct cvm_des3_ctx), .base.cra_alignmask = 7, .base.cra_priority = 4001, .base.cra_name = "ecb(des3_ede)", .base.cra_driver_name = "cavium-ecb-des3_ede", .base.cra_module = THIS_MODULE, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, .setkey = cvm_ecb_des3_setkey, .encrypt = cvm_encrypt, .decrypt = cvm_decrypt, .init = cvm_enc_dec_init, } }; static inline int cav_register_algs(void) { return crypto_register_skciphers(algs, ARRAY_SIZE(algs)); } static inline void cav_unregister_algs(void) { crypto_unregister_skciphers(algs, ARRAY_SIZE(algs)); } int cvm_crypto_init(struct cpt_vf *cptvf) { struct pci_dev *pdev = cptvf->pdev; u32 dev_count; dev_count = dev_handle.dev_count; dev_handle.cdev[dev_count] = cptvf; dev_handle.dev_count++; if (dev_count == 3) { if (cav_register_algs()) { dev_err(&pdev->dev, "Error in registering crypto algorithms\n"); return -EINVAL; } } return 0; } void cvm_crypto_exit(void) { u32 dev_count; dev_count = --dev_handle.dev_count; if (!dev_count) cav_unregister_algs(); }
linux-master
drivers/crypto/cavium/cpt/cptvf_algs.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Cavium, Inc. */ #include <linux/module.h> #include "cptpf.h" static void cpt_send_msg_to_vf(struct cpt_device *cpt, int vf, struct cpt_mbox *mbx) { /* Writing mbox(0) causes interrupt */ cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1), mbx->data); cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0), mbx->msg); } /* ACKs VF's mailbox message * @vf: VF to which ACK to be sent */ static void cpt_mbox_send_ack(struct cpt_device *cpt, int vf, struct cpt_mbox *mbx) { mbx->data = 0ull; mbx->msg = CPT_MBOX_MSG_TYPE_ACK; cpt_send_msg_to_vf(cpt, vf, mbx); } static void cpt_clear_mbox_intr(struct cpt_device *cpt, u32 vf) { /* W1C for the VF */ cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0), (1 << vf)); } /* * Configure QLEN/Chunk sizes for VF */ static void cpt_cfg_qlen_for_vf(struct cpt_device *cpt, int vf, u32 size) { union cptx_pf_qx_ctl pf_qx_ctl; pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf)); pf_qx_ctl.s.size = size; pf_qx_ctl.s.cont_err = true; cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf), pf_qx_ctl.u); } /* * Configure VQ priority */ static void cpt_cfg_vq_priority(struct cpt_device *cpt, int vf, u32 pri) { union cptx_pf_qx_ctl pf_qx_ctl; pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf)); pf_qx_ctl.s.pri = pri; cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf), pf_qx_ctl.u); } static int cpt_bind_vq_to_grp(struct cpt_device *cpt, u8 q, u8 grp) { struct microcode *mcode = cpt->mcode; union cptx_pf_qx_ctl pf_qx_ctl; struct device *dev = &cpt->pdev->dev; if (q >= CPT_MAX_VF_NUM) { dev_err(dev, "Queues are more than cores in the group"); return -EINVAL; } if (grp >= CPT_MAX_CORE_GROUPS) { dev_err(dev, "Request group is more than possible groups"); return -EINVAL; } if (grp >= cpt->next_mc_idx) { dev_err(dev, "Request group is higher than available functional groups"); return -EINVAL; } pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q)); pf_qx_ctl.s.grp = mcode[grp].group; cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q), pf_qx_ctl.u); dev_dbg(dev, "VF %d TYPE %s", q, (mcode[grp].is_ae ? "AE" : "SE")); return mcode[grp].is_ae ? AE_TYPES : SE_TYPES; } /* Interrupt handler to handle mailbox messages from VFs */ static void cpt_handle_mbox_intr(struct cpt_device *cpt, int vf) { struct cpt_vf_info *vfx = &cpt->vfinfo[vf]; struct cpt_mbox mbx = {}; int vftype; struct device *dev = &cpt->pdev->dev; /* * MBOX[0] contains msg * MBOX[1] contains data */ mbx.msg = cpt_read_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0)); mbx.data = cpt_read_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1)); dev_dbg(dev, "%s: Mailbox msg 0x%llx from VF%d", __func__, mbx.msg, vf); switch (mbx.msg) { case CPT_MSG_VF_UP: vfx->state = VF_STATE_UP; try_module_get(THIS_MODULE); cpt_mbox_send_ack(cpt, vf, &mbx); break; case CPT_MSG_READY: mbx.msg = CPT_MSG_READY; mbx.data = vf; cpt_send_msg_to_vf(cpt, vf, &mbx); break; case CPT_MSG_VF_DOWN: /* First msg in VF teardown sequence */ vfx->state = VF_STATE_DOWN; module_put(THIS_MODULE); cpt_mbox_send_ack(cpt, vf, &mbx); break; case CPT_MSG_QLEN: vfx->qlen = mbx.data; cpt_cfg_qlen_for_vf(cpt, vf, vfx->qlen); cpt_mbox_send_ack(cpt, vf, &mbx); break; case CPT_MSG_QBIND_GRP: vftype = cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data); if ((vftype != AE_TYPES) && (vftype != SE_TYPES)) dev_err(dev, "Queue %d binding to group %llu failed", vf, mbx.data); else { dev_dbg(dev, "Queue %d binding to group %llu successful", vf, mbx.data); mbx.msg = CPT_MSG_QBIND_GRP; mbx.data = vftype; cpt_send_msg_to_vf(cpt, vf, &mbx); } break; case CPT_MSG_VQ_PRIORITY: vfx->priority = mbx.data; cpt_cfg_vq_priority(cpt, vf, vfx->priority); cpt_mbox_send_ack(cpt, vf, &mbx); break; default: dev_err(&cpt->pdev->dev, "Invalid msg from VF%d, msg 0x%llx\n", vf, mbx.msg); break; } } void cpt_mbox_intr_handler (struct cpt_device *cpt, int mbx) { u64 intr; u8 vf; intr = cpt_read_csr64(cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0)); dev_dbg(&cpt->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr); for (vf = 0; vf < CPT_MAX_VF_NUM; vf++) { if (intr & (1ULL << vf)) { dev_dbg(&cpt->pdev->dev, "Intr from VF %d\n", vf); cpt_handle_mbox_intr(cpt, vf); cpt_clear_mbox_intr(cpt, vf); } } }
linux-master
drivers/crypto/cavium/cpt/cptpf_mbox.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Cavium, Inc. */ #include "cptvf.h" #include "cptvf_algs.h" #include "request_manager.h" /** * get_free_pending_entry - get free entry from pending queue * @q: pending queue * @qlen: queue length */ static struct pending_entry *get_free_pending_entry(struct pending_queue *q, int qlen) { struct pending_entry *ent = NULL; ent = &q->head[q->rear]; if (unlikely(ent->busy)) { ent = NULL; goto no_free_entry; } q->rear++; if (unlikely(q->rear == qlen)) q->rear = 0; no_free_entry: return ent; } static inline void pending_queue_inc_front(struct pending_qinfo *pqinfo, int qno) { struct pending_queue *queue = &pqinfo->queue[qno]; queue->front++; if (unlikely(queue->front == pqinfo->qlen)) queue->front = 0; } static int setup_sgio_components(struct cpt_vf *cptvf, struct buf_ptr *list, int buf_count, u8 *buffer) { int ret = 0, i, j; int components; struct sglist_component *sg_ptr = NULL; struct pci_dev *pdev = cptvf->pdev; if (unlikely(!list)) { dev_err(&pdev->dev, "Input List pointer is NULL\n"); return -EFAULT; } for (i = 0; i < buf_count; i++) { if (likely(list[i].vptr)) { list[i].dma_addr = dma_map_single(&pdev->dev, list[i].vptr, list[i].size, DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(&pdev->dev, list[i].dma_addr))) { dev_err(&pdev->dev, "DMA map kernel buffer failed for component: %d\n", i); ret = -EIO; goto sg_cleanup; } } } components = buf_count / 4; sg_ptr = (struct sglist_component *)buffer; for (i = 0; i < components; i++) { sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size); sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size); sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size); sg_ptr->u.s.len3 = cpu_to_be16(list[i * 4 + 3].size); sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr); sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr); sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr); sg_ptr->ptr3 = cpu_to_be64(list[i * 4 + 3].dma_addr); sg_ptr++; } components = buf_count % 4; switch (components) { case 3: sg_ptr->u.s.len2 = cpu_to_be16(list[i * 4 + 2].size); sg_ptr->ptr2 = cpu_to_be64(list[i * 4 + 2].dma_addr); fallthrough; case 2: sg_ptr->u.s.len1 = cpu_to_be16(list[i * 4 + 1].size); sg_ptr->ptr1 = cpu_to_be64(list[i * 4 + 1].dma_addr); fallthrough; case 1: sg_ptr->u.s.len0 = cpu_to_be16(list[i * 4 + 0].size); sg_ptr->ptr0 = cpu_to_be64(list[i * 4 + 0].dma_addr); break; default: break; } return ret; sg_cleanup: for (j = 0; j < i; j++) { if (list[j].dma_addr) { dma_unmap_single(&pdev->dev, list[i].dma_addr, list[i].size, DMA_BIDIRECTIONAL); } list[j].dma_addr = 0; } return ret; } static inline int setup_sgio_list(struct cpt_vf *cptvf, struct cpt_info_buffer *info, struct cpt_request_info *req) { u16 g_sz_bytes = 0, s_sz_bytes = 0; int ret = 0; struct pci_dev *pdev = cptvf->pdev; if (req->incnt > MAX_SG_IN_CNT || req->outcnt > MAX_SG_OUT_CNT) { dev_err(&pdev->dev, "Request SG components are higher than supported\n"); ret = -EINVAL; goto scatter_gather_clean; } /* Setup gather (input) components */ g_sz_bytes = ((req->incnt + 3) / 4) * sizeof(struct sglist_component); info->gather_components = kzalloc(g_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->gather_components) { ret = -ENOMEM; goto scatter_gather_clean; } ret = setup_sgio_components(cptvf, req->in, req->incnt, info->gather_components); if (ret) { dev_err(&pdev->dev, "Failed to setup gather list\n"); ret = -EFAULT; goto scatter_gather_clean; } /* Setup scatter (output) components */ s_sz_bytes = ((req->outcnt + 3) / 4) * sizeof(struct sglist_component); info->scatter_components = kzalloc(s_sz_bytes, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->scatter_components) { ret = -ENOMEM; goto scatter_gather_clean; } ret = setup_sgio_components(cptvf, req->out, req->outcnt, info->scatter_components); if (ret) { dev_err(&pdev->dev, "Failed to setup gather list\n"); ret = -EFAULT; goto scatter_gather_clean; } /* Create and initialize DPTR */ info->dlen = g_sz_bytes + s_sz_bytes + SG_LIST_HDR_SIZE; info->in_buffer = kzalloc(info->dlen, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->in_buffer) { ret = -ENOMEM; goto scatter_gather_clean; } ((__be16 *)info->in_buffer)[0] = cpu_to_be16(req->outcnt); ((__be16 *)info->in_buffer)[1] = cpu_to_be16(req->incnt); ((__be16 *)info->in_buffer)[2] = 0; ((__be16 *)info->in_buffer)[3] = 0; memcpy(&info->in_buffer[8], info->gather_components, g_sz_bytes); memcpy(&info->in_buffer[8 + g_sz_bytes], info->scatter_components, s_sz_bytes); info->dptr_baddr = dma_map_single(&pdev->dev, (void *)info->in_buffer, info->dlen, DMA_BIDIRECTIONAL); if (dma_mapping_error(&pdev->dev, info->dptr_baddr)) { dev_err(&pdev->dev, "Mapping DPTR Failed %d\n", info->dlen); ret = -EIO; goto scatter_gather_clean; } /* Create and initialize RPTR */ info->out_buffer = kzalloc(COMPLETION_CODE_SIZE, req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!info->out_buffer) { ret = -ENOMEM; goto scatter_gather_clean; } *((u64 *)info->out_buffer) = ~((u64)COMPLETION_CODE_INIT); info->alternate_caddr = (u64 *)info->out_buffer; info->rptr_baddr = dma_map_single(&pdev->dev, (void *)info->out_buffer, COMPLETION_CODE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(&pdev->dev, info->rptr_baddr)) { dev_err(&pdev->dev, "Mapping RPTR Failed %d\n", COMPLETION_CODE_SIZE); ret = -EIO; goto scatter_gather_clean; } return 0; scatter_gather_clean: return ret; } static int send_cpt_command(struct cpt_vf *cptvf, union cpt_inst_s *cmd, u32 qno) { struct pci_dev *pdev = cptvf->pdev; struct command_qinfo *qinfo = NULL; struct command_queue *queue; struct command_chunk *chunk; u8 *ent; int ret = 0; if (unlikely(qno >= cptvf->nr_queues)) { dev_err(&pdev->dev, "Invalid queue (qno: %d, nr_queues: %d)\n", qno, cptvf->nr_queues); return -EINVAL; } qinfo = &cptvf->cqinfo; queue = &qinfo->queue[qno]; /* lock commad queue */ spin_lock(&queue->lock); ent = &queue->qhead->head[queue->idx * qinfo->cmd_size]; memcpy(ent, (void *)cmd, qinfo->cmd_size); if (++queue->idx >= queue->qhead->size / 64) { hlist_for_each_entry(chunk, &queue->chead, nextchunk) { if (chunk == queue->qhead) { continue; } else { queue->qhead = chunk; break; } } queue->idx = 0; } /* make sure all memory stores are done before ringing doorbell */ smp_wmb(); cptvf_write_vq_doorbell(cptvf, 1); /* unlock command queue */ spin_unlock(&queue->lock); return ret; } static void do_request_cleanup(struct cpt_vf *cptvf, struct cpt_info_buffer *info) { int i; struct pci_dev *pdev = cptvf->pdev; struct cpt_request_info *req; if (info->dptr_baddr) dma_unmap_single(&pdev->dev, info->dptr_baddr, info->dlen, DMA_BIDIRECTIONAL); if (info->rptr_baddr) dma_unmap_single(&pdev->dev, info->rptr_baddr, COMPLETION_CODE_SIZE, DMA_BIDIRECTIONAL); if (info->comp_baddr) dma_unmap_single(&pdev->dev, info->comp_baddr, sizeof(union cpt_res_s), DMA_BIDIRECTIONAL); if (info->req) { req = info->req; for (i = 0; i < req->outcnt; i++) { if (req->out[i].dma_addr) dma_unmap_single(&pdev->dev, req->out[i].dma_addr, req->out[i].size, DMA_BIDIRECTIONAL); } for (i = 0; i < req->incnt; i++) { if (req->in[i].dma_addr) dma_unmap_single(&pdev->dev, req->in[i].dma_addr, req->in[i].size, DMA_BIDIRECTIONAL); } } kfree_sensitive(info->scatter_components); kfree_sensitive(info->gather_components); kfree_sensitive(info->out_buffer); kfree_sensitive(info->in_buffer); kfree_sensitive((void *)info->completion_addr); kfree_sensitive(info); } static void do_post_process(struct cpt_vf *cptvf, struct cpt_info_buffer *info) { struct pci_dev *pdev = cptvf->pdev; if (!info) { dev_err(&pdev->dev, "incorrect cpt_info_buffer for post processing\n"); return; } do_request_cleanup(cptvf, info); } static inline void process_pending_queue(struct cpt_vf *cptvf, struct pending_qinfo *pqinfo, int qno) { struct pci_dev *pdev = cptvf->pdev; struct pending_queue *pqueue = &pqinfo->queue[qno]; struct pending_entry *pentry = NULL; struct cpt_info_buffer *info = NULL; union cpt_res_s *status = NULL; unsigned char ccode; while (1) { spin_lock_bh(&pqueue->lock); pentry = &pqueue->head[pqueue->front]; if (unlikely(!pentry->busy)) { spin_unlock_bh(&pqueue->lock); break; } info = (struct cpt_info_buffer *)pentry->post_arg; if (unlikely(!info)) { dev_err(&pdev->dev, "Pending Entry post arg NULL\n"); pending_queue_inc_front(pqinfo, qno); spin_unlock_bh(&pqueue->lock); continue; } status = (union cpt_res_s *)pentry->completion_addr; ccode = status->s.compcode; if ((status->s.compcode == CPT_COMP_E_FAULT) || (status->s.compcode == CPT_COMP_E_SWERR)) { dev_err(&pdev->dev, "Request failed with %s\n", (status->s.compcode == CPT_COMP_E_FAULT) ? "DMA Fault" : "Software error"); pentry->completion_addr = NULL; pentry->busy = false; atomic64_dec((&pqueue->pending_count)); pentry->post_arg = NULL; pending_queue_inc_front(pqinfo, qno); do_request_cleanup(cptvf, info); spin_unlock_bh(&pqueue->lock); break; } else if (status->s.compcode == COMPLETION_CODE_INIT) { /* check for timeout */ if (time_after_eq(jiffies, (info->time_in + (CPT_COMMAND_TIMEOUT * HZ)))) { dev_err(&pdev->dev, "Request timed out"); pentry->completion_addr = NULL; pentry->busy = false; atomic64_dec((&pqueue->pending_count)); pentry->post_arg = NULL; pending_queue_inc_front(pqinfo, qno); do_request_cleanup(cptvf, info); spin_unlock_bh(&pqueue->lock); break; } else if ((*info->alternate_caddr == (~COMPLETION_CODE_INIT)) && (info->extra_time < TIME_IN_RESET_COUNT)) { info->time_in = jiffies; info->extra_time++; spin_unlock_bh(&pqueue->lock); break; } } pentry->completion_addr = NULL; pentry->busy = false; pentry->post_arg = NULL; atomic64_dec((&pqueue->pending_count)); pending_queue_inc_front(pqinfo, qno); spin_unlock_bh(&pqueue->lock); do_post_process(info->cptvf, info); /* * Calling callback after we find * that the request has been serviced */ pentry->callback(ccode, pentry->callback_arg); } } int process_request(struct cpt_vf *cptvf, struct cpt_request_info *req) { int ret = 0, clear = 0, queue = 0; struct cpt_info_buffer *info = NULL; struct cptvf_request *cpt_req = NULL; union ctrl_info *ctrl = NULL; union cpt_res_s *result = NULL; struct pending_entry *pentry = NULL; struct pending_queue *pqueue = NULL; struct pci_dev *pdev = cptvf->pdev; u8 group = 0; struct cpt_vq_command vq_cmd; union cpt_inst_s cptinst; info = kzalloc(sizeof(*info), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info)) { dev_err(&pdev->dev, "Unable to allocate memory for info_buffer\n"); return -ENOMEM; } cpt_req = (struct cptvf_request *)&req->req; ctrl = (union ctrl_info *)&req->ctrl; info->cptvf = cptvf; group = ctrl->s.grp; ret = setup_sgio_list(cptvf, info, req); if (ret) { dev_err(&pdev->dev, "Setting up SG list failed"); goto request_cleanup; } cpt_req->dlen = info->dlen; /* * Get buffer for union cpt_res_s response * structure and its physical address */ info->completion_addr = kzalloc(sizeof(union cpt_res_s), req->may_sleep ? GFP_KERNEL : GFP_ATOMIC); if (unlikely(!info->completion_addr)) { dev_err(&pdev->dev, "Unable to allocate memory for completion_addr\n"); ret = -ENOMEM; goto request_cleanup; } result = (union cpt_res_s *)info->completion_addr; result->s.compcode = COMPLETION_CODE_INIT; info->comp_baddr = dma_map_single(&pdev->dev, (void *)info->completion_addr, sizeof(union cpt_res_s), DMA_BIDIRECTIONAL); if (dma_mapping_error(&pdev->dev, info->comp_baddr)) { dev_err(&pdev->dev, "mapping compptr Failed %lu\n", sizeof(union cpt_res_s)); ret = -EFAULT; goto request_cleanup; } /* Fill the VQ command */ vq_cmd.cmd.u64 = 0; vq_cmd.cmd.s.opcode = cpu_to_be16(cpt_req->opcode.flags); vq_cmd.cmd.s.param1 = cpu_to_be16(cpt_req->param1); vq_cmd.cmd.s.param2 = cpu_to_be16(cpt_req->param2); vq_cmd.cmd.s.dlen = cpu_to_be16(cpt_req->dlen); vq_cmd.dptr = info->dptr_baddr; vq_cmd.rptr = info->rptr_baddr; vq_cmd.cptr.u64 = 0; vq_cmd.cptr.s.grp = group; /* Get Pending Entry to submit command */ /* Always queue 0, because 1 queue per VF */ queue = 0; pqueue = &cptvf->pqinfo.queue[queue]; if (atomic64_read(&pqueue->pending_count) > PENDING_THOLD) { dev_err(&pdev->dev, "pending threshold reached\n"); process_pending_queue(cptvf, &cptvf->pqinfo, queue); } get_pending_entry: spin_lock_bh(&pqueue->lock); pentry = get_free_pending_entry(pqueue, cptvf->pqinfo.qlen); if (unlikely(!pentry)) { spin_unlock_bh(&pqueue->lock); if (clear == 0) { process_pending_queue(cptvf, &cptvf->pqinfo, queue); clear = 1; goto get_pending_entry; } dev_err(&pdev->dev, "Get free entry failed\n"); dev_err(&pdev->dev, "queue: %d, rear: %d, front: %d\n", queue, pqueue->rear, pqueue->front); ret = -EFAULT; goto request_cleanup; } pentry->completion_addr = info->completion_addr; pentry->post_arg = (void *)info; pentry->callback = req->callback; pentry->callback_arg = req->callback_arg; info->pentry = pentry; pentry->busy = true; atomic64_inc(&pqueue->pending_count); /* Send CPT command */ info->pentry = pentry; info->time_in = jiffies; info->req = req; /* Create the CPT_INST_S type command for HW intrepretation */ cptinst.s.doneint = true; cptinst.s.res_addr = (u64)info->comp_baddr; cptinst.s.tag = 0; cptinst.s.grp = 0; cptinst.s.wq_ptr = 0; cptinst.s.ei0 = vq_cmd.cmd.u64; cptinst.s.ei1 = vq_cmd.dptr; cptinst.s.ei2 = vq_cmd.rptr; cptinst.s.ei3 = vq_cmd.cptr.u64; ret = send_cpt_command(cptvf, &cptinst, queue); spin_unlock_bh(&pqueue->lock); if (unlikely(ret)) { dev_err(&pdev->dev, "Send command failed for AE\n"); ret = -EFAULT; goto request_cleanup; } return 0; request_cleanup: dev_dbg(&pdev->dev, "Failed to submit CPT command\n"); do_request_cleanup(cptvf, info); return ret; } void vq_post_process(struct cpt_vf *cptvf, u32 qno) { struct pci_dev *pdev = cptvf->pdev; if (unlikely(qno > cptvf->nr_queues)) { dev_err(&pdev->dev, "Request for post processing on invalid pending queue: %u\n", qno); return; } process_pending_queue(cptvf, &cptvf->pqinfo, qno); } int cptvf_do_request(void *vfdev, struct cpt_request_info *req) { struct cpt_vf *cptvf = (struct cpt_vf *)vfdev; struct pci_dev *pdev = cptvf->pdev; if (!cpt_device_ready(cptvf)) { dev_err(&pdev->dev, "CPT Device is not ready"); return -ENODEV; } if ((cptvf->vftype == SE_TYPES) && (!req->ctrl.s.se_req)) { dev_err(&pdev->dev, "CPTVF-%d of SE TYPE got AE request", cptvf->vfid); return -EINVAL; } else if ((cptvf->vftype == AE_TYPES) && (req->ctrl.s.se_req)) { dev_err(&pdev->dev, "CPTVF-%d of AE TYPE got SE request", cptvf->vfid); return -EINVAL; } return process_request(cptvf, req); }
linux-master
drivers/crypto/cavium/cpt/cptvf_reqmanager.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Cavium, Inc. */ #include <linux/device.h> #include <linux/firmware.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/printk.h> #include "cptpf.h" #define DRV_NAME "thunder-cpt" #define DRV_VERSION "1.0" static u32 num_vfs = 4; /* Default 4 VF enabled */ module_param(num_vfs, uint, 0444); MODULE_PARM_DESC(num_vfs, "Number of VFs to enable(1-16)"); /* * Disable cores specified by coremask */ static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask, u8 type, u8 grp) { u64 pf_exe_ctl; u32 timeout = 100; u64 grpmask = 0; struct device *dev = &cpt->pdev->dev; if (type == AE_TYPES) coremask = (coremask << cpt->max_se_cores); /* Disengage the cores from groups */ grpmask = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp)); cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), (grpmask & ~coremask)); udelay(CSR_DELAY); grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); while (grp & coremask) { dev_err(dev, "Cores still busy %llx", coremask); grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); if (timeout--) break; udelay(CSR_DELAY); } /* Disable the cores */ pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0)); cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), (pf_exe_ctl & ~coremask)); udelay(CSR_DELAY); } /* * Enable cores specified by coremask */ static void cpt_enable_cores(struct cpt_device *cpt, u64 coremask, u8 type) { u64 pf_exe_ctl; if (type == AE_TYPES) coremask = (coremask << cpt->max_se_cores); pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0)); cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), (pf_exe_ctl | coremask)); udelay(CSR_DELAY); } static void cpt_configure_group(struct cpt_device *cpt, u8 grp, u64 coremask, u8 type) { u64 pf_gx_en = 0; if (type == AE_TYPES) coremask = (coremask << cpt->max_se_cores); pf_gx_en = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp)); cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), (pf_gx_en | coremask)); udelay(CSR_DELAY); } static void cpt_disable_mbox_interrupts(struct cpt_device *cpt) { /* Clear mbox(0) interupts for all vfs */ cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1CX(0, 0), ~0ull); } static void cpt_disable_ecc_interrupts(struct cpt_device *cpt) { /* Clear ecc(0) interupts for all vfs */ cpt_write_csr64(cpt->reg_base, CPTX_PF_ECC0_ENA_W1C(0), ~0ull); } static void cpt_disable_exec_interrupts(struct cpt_device *cpt) { /* Clear exec interupts for all vfs */ cpt_write_csr64(cpt->reg_base, CPTX_PF_EXEC_ENA_W1C(0), ~0ull); } static void cpt_disable_all_interrupts(struct cpt_device *cpt) { cpt_disable_mbox_interrupts(cpt); cpt_disable_ecc_interrupts(cpt); cpt_disable_exec_interrupts(cpt); } static void cpt_enable_mbox_interrupts(struct cpt_device *cpt) { /* Set mbox(0) interupts for all vfs */ cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1SX(0, 0), ~0ull); } static int cpt_load_microcode(struct cpt_device *cpt, struct microcode *mcode) { int ret = 0, core = 0, shift = 0; u32 total_cores = 0; struct device *dev = &cpt->pdev->dev; if (!mcode || !mcode->code) { dev_err(dev, "Either the mcode is null or data is NULL\n"); return -EINVAL; } if (mcode->code_size == 0) { dev_err(dev, "microcode size is 0\n"); return -EINVAL; } /* Assumes 0-9 are SE cores for UCODE_BASE registers and * AE core bases follow */ if (mcode->is_ae) { core = CPT_MAX_SE_CORES; /* start couting from 10 */ total_cores = CPT_MAX_TOTAL_CORES; /* upto 15 */ } else { core = 0; /* start couting from 0 */ total_cores = CPT_MAX_SE_CORES; /* upto 9 */ } /* Point to microcode for each core of the group */ for (; core < total_cores ; core++, shift++) { if (mcode->core_mask & (1 << shift)) { cpt_write_csr64(cpt->reg_base, CPTX_PF_ENGX_UCODE_BASE(0, core), (u64)mcode->phys_base); } } return ret; } static int do_cpt_init(struct cpt_device *cpt, struct microcode *mcode) { int ret = 0; struct device *dev = &cpt->pdev->dev; /* Make device not ready */ cpt->flags &= ~CPT_FLAG_DEVICE_READY; /* Disable All PF interrupts */ cpt_disable_all_interrupts(cpt); /* Calculate mcode group and coremasks */ if (mcode->is_ae) { if (mcode->num_cores > cpt->max_ae_cores) { dev_err(dev, "Requested for more cores than available AE cores\n"); ret = -EINVAL; goto cpt_init_fail; } if (cpt->next_group >= CPT_MAX_CORE_GROUPS) { dev_err(dev, "Can't load, all eight microcode groups in use"); return -ENFILE; } mcode->group = cpt->next_group; /* Convert requested cores to mask */ mcode->core_mask = GENMASK(mcode->num_cores, 0); cpt_disable_cores(cpt, mcode->core_mask, AE_TYPES, mcode->group); /* Load microcode for AE engines */ ret = cpt_load_microcode(cpt, mcode); if (ret) { dev_err(dev, "Microcode load Failed for %s\n", mcode->version); goto cpt_init_fail; } cpt->next_group++; /* Configure group mask for the mcode */ cpt_configure_group(cpt, mcode->group, mcode->core_mask, AE_TYPES); /* Enable AE cores for the group mask */ cpt_enable_cores(cpt, mcode->core_mask, AE_TYPES); } else { if (mcode->num_cores > cpt->max_se_cores) { dev_err(dev, "Requested for more cores than available SE cores\n"); ret = -EINVAL; goto cpt_init_fail; } if (cpt->next_group >= CPT_MAX_CORE_GROUPS) { dev_err(dev, "Can't load, all eight microcode groups in use"); return -ENFILE; } mcode->group = cpt->next_group; /* Covert requested cores to mask */ mcode->core_mask = GENMASK(mcode->num_cores, 0); cpt_disable_cores(cpt, mcode->core_mask, SE_TYPES, mcode->group); /* Load microcode for SE engines */ ret = cpt_load_microcode(cpt, mcode); if (ret) { dev_err(dev, "Microcode load Failed for %s\n", mcode->version); goto cpt_init_fail; } cpt->next_group++; /* Configure group mask for the mcode */ cpt_configure_group(cpt, mcode->group, mcode->core_mask, SE_TYPES); /* Enable SE cores for the group mask */ cpt_enable_cores(cpt, mcode->core_mask, SE_TYPES); } /* Enabled PF mailbox interrupts */ cpt_enable_mbox_interrupts(cpt); cpt->flags |= CPT_FLAG_DEVICE_READY; return ret; cpt_init_fail: /* Enabled PF mailbox interrupts */ cpt_enable_mbox_interrupts(cpt); return ret; } struct ucode_header { u8 version[CPT_UCODE_VERSION_SZ]; __be32 code_length; u32 data_length; u64 sram_address; }; static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) { const struct firmware *fw_entry; struct device *dev = &cpt->pdev->dev; struct ucode_header *ucode; unsigned int code_length; struct microcode *mcode; int j, ret = 0; ret = request_firmware(&fw_entry, fw, dev); if (ret) return ret; ucode = (struct ucode_header *)fw_entry->data; mcode = &cpt->mcode[cpt->next_mc_idx]; memcpy(mcode->version, (u8 *)fw_entry->data, CPT_UCODE_VERSION_SZ); code_length = ntohl(ucode->code_length); if (code_length == 0 || code_length >= INT_MAX / 2) { ret = -EINVAL; goto fw_release; } mcode->code_size = code_length * 2; mcode->is_ae = is_ae; mcode->core_mask = 0ULL; mcode->num_cores = is_ae ? 6 : 10; /* Allocate DMAable space */ mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size, &mcode->phys_base, GFP_KERNEL); if (!mcode->code) { dev_err(dev, "Unable to allocate space for microcode"); ret = -ENOMEM; goto fw_release; } memcpy((void *)mcode->code, (void *)(fw_entry->data + sizeof(*ucode)), mcode->code_size); /* Byte swap 64-bit */ for (j = 0; j < (mcode->code_size / 8); j++) ((__be64 *)mcode->code)[j] = cpu_to_be64(((u64 *)mcode->code)[j]); /* MC needs 16-bit swap */ for (j = 0; j < (mcode->code_size / 2); j++) ((__be16 *)mcode->code)[j] = cpu_to_be16(((u16 *)mcode->code)[j]); dev_dbg(dev, "mcode->code_size = %u\n", mcode->code_size); dev_dbg(dev, "mcode->is_ae = %u\n", mcode->is_ae); dev_dbg(dev, "mcode->num_cores = %u\n", mcode->num_cores); dev_dbg(dev, "mcode->code = %llx\n", (u64)mcode->code); dev_dbg(dev, "mcode->phys_base = %llx\n", mcode->phys_base); ret = do_cpt_init(cpt, mcode); if (ret) { dev_err(dev, "do_cpt_init failed with ret: %d\n", ret); goto fw_release; } dev_info(dev, "Microcode Loaded %s\n", mcode->version); mcode->is_mc_valid = 1; cpt->next_mc_idx++; fw_release: release_firmware(fw_entry); return ret; } static int cpt_ucode_load(struct cpt_device *cpt) { int ret = 0; struct device *dev = &cpt->pdev->dev; ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-ae.out", true); if (ret) { dev_err(dev, "ae:cpt_ucode_load failed with ret: %d\n", ret); return ret; } ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-se.out", false); if (ret) { dev_err(dev, "se:cpt_ucode_load failed with ret: %d\n", ret); return ret; } return ret; } static irqreturn_t cpt_mbx0_intr_handler(int irq, void *cpt_irq) { struct cpt_device *cpt = (struct cpt_device *)cpt_irq; cpt_mbox_intr_handler(cpt, 0); return IRQ_HANDLED; } static void cpt_reset(struct cpt_device *cpt) { cpt_write_csr64(cpt->reg_base, CPTX_PF_RESET(0), 1); } static void cpt_find_max_enabled_cores(struct cpt_device *cpt) { union cptx_pf_constants pf_cnsts = {0}; pf_cnsts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_CONSTANTS(0)); cpt->max_se_cores = pf_cnsts.s.se; cpt->max_ae_cores = pf_cnsts.s.ae; } static u32 cpt_check_bist_status(struct cpt_device *cpt) { union cptx_pf_bist_status bist_sts = {0}; bist_sts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_BIST_STATUS(0)); return bist_sts.u; } static u64 cpt_check_exe_bist_status(struct cpt_device *cpt) { union cptx_pf_exe_bist_status bist_sts = {0}; bist_sts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_BIST_STATUS(0)); return bist_sts.u; } static void cpt_disable_all_cores(struct cpt_device *cpt) { u32 grp, timeout = 100; struct device *dev = &cpt->pdev->dev; /* Disengage the cores from groups */ for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) { cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), 0); udelay(CSR_DELAY); } grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); while (grp) { dev_err(dev, "Cores still busy"); grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); if (timeout--) break; udelay(CSR_DELAY); } /* Disable the cores */ cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0); } /* * Ensure all cores are disengaged from all groups by * calling cpt_disable_all_cores() before calling this * function. */ static void cpt_unload_microcode(struct cpt_device *cpt) { u32 grp = 0, core; /* Free microcode bases and reset group masks */ for (grp = 0; grp < CPT_MAX_CORE_GROUPS; grp++) { struct microcode *mcode = &cpt->mcode[grp]; if (cpt->mcode[grp].code) dma_free_coherent(&cpt->pdev->dev, mcode->code_size, mcode->code, mcode->phys_base); mcode->code = NULL; } /* Clear UCODE_BASE registers for all engines */ for (core = 0; core < CPT_MAX_TOTAL_CORES; core++) cpt_write_csr64(cpt->reg_base, CPTX_PF_ENGX_UCODE_BASE(0, core), 0ull); } static int cpt_device_init(struct cpt_device *cpt) { u64 bist; struct device *dev = &cpt->pdev->dev; /* Reset the PF when probed first */ cpt_reset(cpt); msleep(100); /*Check BIST status*/ bist = (u64)cpt_check_bist_status(cpt); if (bist) { dev_err(dev, "RAM BIST failed with code 0x%llx", bist); return -ENODEV; } bist = cpt_check_exe_bist_status(cpt); if (bist) { dev_err(dev, "Engine BIST failed with code 0x%llx", bist); return -ENODEV; } /*Get CLK frequency*/ /*Get max enabled cores */ cpt_find_max_enabled_cores(cpt); /*Disable all cores*/ cpt_disable_all_cores(cpt); /*Reset device parameters*/ cpt->next_mc_idx = 0; cpt->next_group = 0; /* PF is ready */ cpt->flags |= CPT_FLAG_DEVICE_READY; return 0; } static int cpt_register_interrupts(struct cpt_device *cpt) { int ret; struct device *dev = &cpt->pdev->dev; /* Enable MSI-X */ ret = pci_alloc_irq_vectors(cpt->pdev, CPT_PF_MSIX_VECTORS, CPT_PF_MSIX_VECTORS, PCI_IRQ_MSIX); if (ret < 0) { dev_err(&cpt->pdev->dev, "Request for #%d msix vectors failed\n", CPT_PF_MSIX_VECTORS); return ret; } /* Register mailbox interrupt handlers */ ret = request_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt); if (ret) goto fail; /* Enable mailbox interrupt */ cpt_enable_mbox_interrupts(cpt); return 0; fail: dev_err(dev, "Request irq failed\n"); pci_disable_msix(cpt->pdev); return ret; } static void cpt_unregister_interrupts(struct cpt_device *cpt) { free_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt); pci_disable_msix(cpt->pdev); } static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs) { int pos = 0; int err; u16 total_vf_cnt; struct pci_dev *pdev = cpt->pdev; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); if (!pos) { dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n"); return -ENODEV; } cpt->num_vf_en = num_vfs; /* User requested VFs */ pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt); if (total_vf_cnt < cpt->num_vf_en) cpt->num_vf_en = total_vf_cnt; if (!total_vf_cnt) return 0; /*Enabled the available VFs */ err = pci_enable_sriov(pdev, cpt->num_vf_en); if (err) { dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n", cpt->num_vf_en); cpt->num_vf_en = 0; return err; } /* TODO: Optionally enable static VQ priorities feature */ dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n", cpt->num_vf_en); cpt->flags |= CPT_FLAG_SRIOV_ENABLED; return 0; } static int cpt_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct cpt_device *cpt; int err; if (num_vfs > 16 || num_vfs < 4) { dev_warn(dev, "Invalid vf count %d, Resetting it to 4(default)\n", num_vfs); num_vfs = 4; } cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL); if (!cpt) return -ENOMEM; pci_set_drvdata(pdev, cpt); cpt->pdev = pdev; err = pci_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device\n"); pci_set_drvdata(pdev, NULL); return err; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x\n", err); goto cpt_err_disable_device; } err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "Unable to get usable 48-bit DMA configuration\n"); goto cpt_err_release_regions; } /* MAP PF's configuration registers */ cpt->reg_base = pcim_iomap(pdev, 0, 0); if (!cpt->reg_base) { dev_err(dev, "Cannot map config register space, aborting\n"); err = -ENOMEM; goto cpt_err_release_regions; } /* CPT device HW initialization */ cpt_device_init(cpt); /* Register interrupts */ err = cpt_register_interrupts(cpt); if (err) goto cpt_err_release_regions; err = cpt_ucode_load(cpt); if (err) goto cpt_err_unregister_interrupts; /* Configure SRIOV */ err = cpt_sriov_init(cpt, num_vfs); if (err) goto cpt_err_unregister_interrupts; return 0; cpt_err_unregister_interrupts: cpt_unregister_interrupts(cpt); cpt_err_release_regions: pci_release_regions(pdev); cpt_err_disable_device: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return err; } static void cpt_remove(struct pci_dev *pdev) { struct cpt_device *cpt = pci_get_drvdata(pdev); /* Disengage SE and AE cores from all groups*/ cpt_disable_all_cores(cpt); /* Unload microcodes */ cpt_unload_microcode(cpt); cpt_unregister_interrupts(cpt); pci_disable_sriov(pdev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } static void cpt_shutdown(struct pci_dev *pdev) { struct cpt_device *cpt = pci_get_drvdata(pdev); if (!cpt) return; dev_info(&pdev->dev, "Shutdown device %x:%x.\n", (u32)pdev->vendor, (u32)pdev->device); cpt_unregister_interrupts(cpt); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } /* Supported devices */ static const struct pci_device_id cpt_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, CPT_81XX_PCI_PF_DEVICE_ID) }, { 0, } /* end of table */ }; static struct pci_driver cpt_pci_driver = { .name = DRV_NAME, .id_table = cpt_id_table, .probe = cpt_probe, .remove = cpt_remove, .shutdown = cpt_shutdown, }; module_pci_driver(cpt_pci_driver); MODULE_AUTHOR("George Cherian <[email protected]>"); MODULE_DESCRIPTION("Cavium Thunder CPT Physical Function Driver"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, cpt_id_table);
linux-master
drivers/crypto/cavium/cpt/cptpf_main.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2016 Cavium, Inc. */ #include "cptvf.h" static void cptvf_send_msg_to_pf(struct cpt_vf *cptvf, struct cpt_mbox *mbx) { /* Writing mbox(1) causes interrupt */ cpt_write_csr64(cptvf->reg_base, CPTX_VFX_PF_MBOXX(0, 0, 0), mbx->msg); cpt_write_csr64(cptvf->reg_base, CPTX_VFX_PF_MBOXX(0, 0, 1), mbx->data); } /* Interrupt handler to handle mailbox messages from VFs */ void cptvf_handle_mbox_intr(struct cpt_vf *cptvf) { struct cpt_mbox mbx = {}; /* * MBOX[0] contains msg * MBOX[1] contains data */ mbx.msg = cpt_read_csr64(cptvf->reg_base, CPTX_VFX_PF_MBOXX(0, 0, 0)); mbx.data = cpt_read_csr64(cptvf->reg_base, CPTX_VFX_PF_MBOXX(0, 0, 1)); dev_dbg(&cptvf->pdev->dev, "%s: Mailbox msg 0x%llx from PF\n", __func__, mbx.msg); switch (mbx.msg) { case CPT_MSG_READY: { cptvf->pf_acked = true; cptvf->vfid = mbx.data; dev_dbg(&cptvf->pdev->dev, "Received VFID %d\n", cptvf->vfid); break; } case CPT_MSG_QBIND_GRP: cptvf->pf_acked = true; cptvf->vftype = mbx.data; dev_dbg(&cptvf->pdev->dev, "VF %d type %s group %d\n", cptvf->vfid, ((mbx.data == SE_TYPES) ? "SE" : "AE"), cptvf->vfgrp); break; case CPT_MBOX_MSG_TYPE_ACK: cptvf->pf_acked = true; break; case CPT_MBOX_MSG_TYPE_NACK: cptvf->pf_nacked = true; break; default: dev_err(&cptvf->pdev->dev, "Invalid msg from PF, msg 0x%llx\n", mbx.msg); break; } } static int cptvf_send_msg_to_pf_timeout(struct cpt_vf *cptvf, struct cpt_mbox *mbx) { int timeout = CPT_MBOX_MSG_TIMEOUT; int sleep = 10; cptvf->pf_acked = false; cptvf->pf_nacked = false; cptvf_send_msg_to_pf(cptvf, mbx); /* Wait for previous message to be acked, timeout 2sec */ while (!cptvf->pf_acked) { if (cptvf->pf_nacked) return -EINVAL; msleep(sleep); if (cptvf->pf_acked) break; timeout -= sleep; if (!timeout) { dev_err(&cptvf->pdev->dev, "PF didn't ack to mbox msg %llx from VF%u\n", (mbx->msg & 0xFF), cptvf->vfid); return -EBUSY; } } return 0; } /* * Checks if VF is able to comminicate with PF * and also gets the CPT number this VF is associated to. */ int cptvf_check_pf_ready(struct cpt_vf *cptvf) { struct pci_dev *pdev = cptvf->pdev; struct cpt_mbox mbx = {}; mbx.msg = CPT_MSG_READY; if (cptvf_send_msg_to_pf_timeout(cptvf, &mbx)) { dev_err(&pdev->dev, "PF didn't respond to READY msg\n"); return -EBUSY; } return 0; } /* * Communicate VQs size to PF to program CPT(0)_PF_Q(0-15)_CTL of the VF. * Must be ACKed. */ int cptvf_send_vq_size_msg(struct cpt_vf *cptvf) { struct pci_dev *pdev = cptvf->pdev; struct cpt_mbox mbx = {}; mbx.msg = CPT_MSG_QLEN; mbx.data = cptvf->qsize; if (cptvf_send_msg_to_pf_timeout(cptvf, &mbx)) { dev_err(&pdev->dev, "PF didn't respond to vq_size msg\n"); return -EBUSY; } return 0; } /* * Communicate VF group required to PF and get the VQ binded to that group */ int cptvf_send_vf_to_grp_msg(struct cpt_vf *cptvf) { struct pci_dev *pdev = cptvf->pdev; struct cpt_mbox mbx = {}; mbx.msg = CPT_MSG_QBIND_GRP; /* Convey group of the VF */ mbx.data = cptvf->vfgrp; if (cptvf_send_msg_to_pf_timeout(cptvf, &mbx)) { dev_err(&pdev->dev, "PF didn't respond to vf_type msg\n"); return -EBUSY; } return 0; } /* * Communicate VF group required to PF and get the VQ binded to that group */ int cptvf_send_vf_priority_msg(struct cpt_vf *cptvf) { struct pci_dev *pdev = cptvf->pdev; struct cpt_mbox mbx = {}; mbx.msg = CPT_MSG_VQ_PRIORITY; /* Convey group of the VF */ mbx.data = cptvf->priority; if (cptvf_send_msg_to_pf_timeout(cptvf, &mbx)) { dev_err(&pdev->dev, "PF didn't respond to vf_type msg\n"); return -EBUSY; } return 0; } /* * Communicate to PF that VF is UP and running */ int cptvf_send_vf_up(struct cpt_vf *cptvf) { struct pci_dev *pdev = cptvf->pdev; struct cpt_mbox mbx = {}; mbx.msg = CPT_MSG_VF_UP; if (cptvf_send_msg_to_pf_timeout(cptvf, &mbx)) { dev_err(&pdev->dev, "PF didn't respond to UP msg\n"); return -EBUSY; } return 0; } /* * Communicate to PF that VF is DOWN and running */ int cptvf_send_vf_down(struct cpt_vf *cptvf) { struct pci_dev *pdev = cptvf->pdev; struct cpt_mbox mbx = {}; mbx.msg = CPT_MSG_VF_DOWN; if (cptvf_send_msg_to_pf_timeout(cptvf, &mbx)) { dev_err(&pdev->dev, "PF didn't respond to DOWN msg\n"); return -EBUSY; } return 0; }
linux-master
drivers/crypto/cavium/cpt/cptvf_mbox.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/delay.h> #include <linux/firmware.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include "nitrox_dev.h" #include "nitrox_common.h" #include "nitrox_csr.h" #include "nitrox_hal.h" #include "nitrox_isr.h" #include "nitrox_debugfs.h" #define CNN55XX_DEV_ID 0x12 #define UCODE_HLEN 48 #define DEFAULT_SE_GROUP 0 #define DEFAULT_AE_GROUP 0 #define DRIVER_VERSION "1.2" #define CNN55XX_UCD_BLOCK_SIZE 32768 #define CNN55XX_MAX_UCODE_SIZE (CNN55XX_UCD_BLOCK_SIZE * 2) #define FW_DIR "cavium/" /* SE microcode */ #define SE_FW FW_DIR "cnn55xx_se.fw" /* AE microcode */ #define AE_FW FW_DIR "cnn55xx_ae.fw" static const char nitrox_driver_name[] = "CNN55XX"; static LIST_HEAD(ndevlist); static DEFINE_MUTEX(devlist_lock); static unsigned int num_devices; /* * nitrox_pci_tbl - PCI Device ID Table */ static const struct pci_device_id nitrox_pci_tbl[] = { {PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0}, /* required last entry */ {0, } }; MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl); static unsigned int qlen = DEFAULT_CMD_QLEN; module_param(qlen, uint, 0644); MODULE_PARM_DESC(qlen, "Command queue length - default 2048"); /** * struct ucode - Firmware Header * @id: microcode ID * @version: firmware version * @code_size: code section size * @raz: alignment * @code: code section */ struct ucode { u8 id; char version[VERSION_LEN - 1]; __be32 code_size; u8 raz[12]; u64 code[]; }; /* * write_to_ucd_unit - Write Firmware to NITROX UCD unit */ static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size, u64 *ucode_data, int block_num) { u32 code_size; u64 offset, data; int i = 0; /* * UCD structure * * ------------- * | BLK 7 | * ------------- * | BLK 6 | * ------------- * | ... | * ------------- * | BLK 0 | * ------------- * Total of 8 blocks, each size 32KB */ /* set the block number */ offset = UCD_UCODE_LOAD_BLOCK_NUM; nitrox_write_csr(ndev, offset, block_num); code_size = roundup(ucode_size, 16); while (code_size) { data = ucode_data[i]; /* write 8 bytes at a time */ offset = UCD_UCODE_LOAD_IDX_DATAX(i); nitrox_write_csr(ndev, offset, data); code_size -= 8; i++; } usleep_range(300, 400); } static int nitrox_load_fw(struct nitrox_device *ndev) { const struct firmware *fw; const char *fw_name; struct ucode *ucode; u64 *ucode_data; u64 offset; union ucd_core_eid_ucode_block_num core_2_eid_val; union aqm_grp_execmsk_lo aqm_grp_execmask_lo; union aqm_grp_execmsk_hi aqm_grp_execmask_hi; u32 ucode_size; int ret, i = 0; fw_name = SE_FW; dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name); ret = request_firmware(&fw, fw_name, DEV(ndev)); if (ret < 0) { dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name); return ret; } ucode = (struct ucode *)fw->data; ucode_size = be32_to_cpu(ucode->code_size) * 2; if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) { dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n", ucode_size, fw_name); release_firmware(fw); return -EINVAL; } ucode_data = ucode->code; /* copy the firmware version */ memcpy(&ndev->hw.fw_name[0][0], ucode->version, (VERSION_LEN - 2)); ndev->hw.fw_name[0][VERSION_LEN - 1] = '\0'; /* Load SE Firmware on UCD Block 0 */ write_to_ucd_unit(ndev, ucode_size, ucode_data, 0); release_firmware(fw); /* put all SE cores in DEFAULT_SE_GROUP */ offset = POM_GRP_EXECMASKX(DEFAULT_SE_GROUP); nitrox_write_csr(ndev, offset, (~0ULL)); /* write block number and firmware length * bit:<2:0> block number * bit:3 is set SE uses 32KB microcode * bit:3 is clear SE uses 64KB microcode */ core_2_eid_val.value = 0ULL; core_2_eid_val.ucode_blk = 0; if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE) core_2_eid_val.ucode_len = 1; else core_2_eid_val.ucode_len = 0; for (i = 0; i < ndev->hw.se_cores; i++) { offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i); nitrox_write_csr(ndev, offset, core_2_eid_val.value); } fw_name = AE_FW; dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name); ret = request_firmware(&fw, fw_name, DEV(ndev)); if (ret < 0) { dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name); return ret; } ucode = (struct ucode *)fw->data; ucode_size = be32_to_cpu(ucode->code_size) * 2; if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) { dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n", ucode_size, fw_name); release_firmware(fw); return -EINVAL; } ucode_data = ucode->code; /* copy the firmware version */ memcpy(&ndev->hw.fw_name[1][0], ucode->version, (VERSION_LEN - 2)); ndev->hw.fw_name[1][VERSION_LEN - 1] = '\0'; /* Load AE Firmware on UCD Block 2 */ write_to_ucd_unit(ndev, ucode_size, ucode_data, 2); release_firmware(fw); /* put all AE cores in DEFAULT_AE_GROUP */ offset = AQM_GRP_EXECMSK_LOX(DEFAULT_AE_GROUP); aqm_grp_execmask_lo.exec_0_to_39 = 0xFFFFFFFFFFULL; nitrox_write_csr(ndev, offset, aqm_grp_execmask_lo.value); offset = AQM_GRP_EXECMSK_HIX(DEFAULT_AE_GROUP); aqm_grp_execmask_hi.exec_40_to_79 = 0xFFFFFFFFFFULL; nitrox_write_csr(ndev, offset, aqm_grp_execmask_hi.value); /* write block number and firmware length * bit:<2:0> block number * bit:3 is set AE uses 32KB microcode * bit:3 is clear AE uses 64KB microcode */ core_2_eid_val.value = 0ULL; core_2_eid_val.ucode_blk = 2; if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE) core_2_eid_val.ucode_len = 1; else core_2_eid_val.ucode_len = 0; for (i = 0; i < ndev->hw.ae_cores; i++) { offset = UCD_AE_EID_UCODE_BLOCK_NUMX(i); nitrox_write_csr(ndev, offset, core_2_eid_val.value); } return 0; } /** * nitrox_add_to_devlist - add NITROX device to global device list * @ndev: NITROX device */ static int nitrox_add_to_devlist(struct nitrox_device *ndev) { struct nitrox_device *dev; int ret = 0; INIT_LIST_HEAD(&ndev->list); refcount_set(&ndev->refcnt, 1); mutex_lock(&devlist_lock); list_for_each_entry(dev, &ndevlist, list) { if (dev == ndev) { ret = -EEXIST; goto unlock; } } ndev->idx = num_devices++; list_add_tail(&ndev->list, &ndevlist); unlock: mutex_unlock(&devlist_lock); return ret; } /** * nitrox_remove_from_devlist - remove NITROX device from * global device list * @ndev: NITROX device */ static void nitrox_remove_from_devlist(struct nitrox_device *ndev) { mutex_lock(&devlist_lock); list_del(&ndev->list); num_devices--; mutex_unlock(&devlist_lock); } struct nitrox_device *nitrox_get_first_device(void) { struct nitrox_device *ndev = NULL, *iter; mutex_lock(&devlist_lock); list_for_each_entry(iter, &ndevlist, list) { if (nitrox_ready(iter)) { ndev = iter; break; } } mutex_unlock(&devlist_lock); if (!ndev) return NULL; refcount_inc(&ndev->refcnt); /* barrier to sync with other cpus */ smp_mb__after_atomic(); return ndev; } void nitrox_put_device(struct nitrox_device *ndev) { if (!ndev) return; refcount_dec(&ndev->refcnt); /* barrier to sync with other cpus */ smp_mb__after_atomic(); } static int nitrox_device_flr(struct pci_dev *pdev) { int pos = 0; pos = pci_save_state(pdev); if (pos) { dev_err(&pdev->dev, "Failed to save pci state\n"); return -ENOMEM; } pcie_reset_flr(pdev, PCI_RESET_DO_RESET); pci_restore_state(pdev); return 0; } static int nitrox_pf_sw_init(struct nitrox_device *ndev) { int err; err = nitrox_common_sw_init(ndev); if (err) return err; err = nitrox_register_interrupts(ndev); if (err) nitrox_common_sw_cleanup(ndev); return err; } static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev) { nitrox_unregister_interrupts(ndev); nitrox_common_sw_cleanup(ndev); } /** * nitrox_bist_check - Check NITROX BIST registers status * @ndev: NITROX device */ static int nitrox_bist_check(struct nitrox_device *ndev) { u64 value = 0; int i; for (i = 0; i < NR_CLUSTERS; i++) { value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i)); value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i)); } value += nitrox_read_csr(ndev, UCD_BIST_STATUS); value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG); value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG); value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG); value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG); value += nitrox_read_csr(ndev, POM_BIST_REG); value += nitrox_read_csr(ndev, BMI_BIST_REG); value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT); value += nitrox_read_csr(ndev, BMO_BIST_REG); value += nitrox_read_csr(ndev, LBC_BIST_STATUS); value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0)); if (value) return -EIO; return 0; } static int nitrox_pf_hw_init(struct nitrox_device *ndev) { int err; err = nitrox_bist_check(ndev); if (err) { dev_err(&ndev->pdev->dev, "BIST check failed\n"); return err; } /* get cores information */ nitrox_get_hwinfo(ndev); nitrox_config_nps_core_unit(ndev); nitrox_config_aqm_unit(ndev); nitrox_config_nps_pkt_unit(ndev); nitrox_config_pom_unit(ndev); nitrox_config_efl_unit(ndev); /* configure IO units */ nitrox_config_bmi_unit(ndev); nitrox_config_bmo_unit(ndev); /* configure Local Buffer Cache */ nitrox_config_lbc_unit(ndev); nitrox_config_rand_unit(ndev); /* load firmware on cores */ err = nitrox_load_fw(ndev); if (err) return err; nitrox_config_emu_unit(ndev); return 0; } /** * nitrox_probe - NITROX Initialization function. * @pdev: PCI device information struct * @id: entry in nitrox_pci_tbl * * Return: 0, if the driver is bound to the device, or * a negative error if there is failure. */ static int nitrox_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct nitrox_device *ndev; int err; dev_info_once(&pdev->dev, "%s driver version %s\n", nitrox_driver_name, DRIVER_VERSION); err = pci_enable_device_mem(pdev); if (err) return err; /* do FLR */ err = nitrox_device_flr(pdev); if (err) { dev_err(&pdev->dev, "FLR failed\n"); goto flr_fail; } if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { dev_dbg(&pdev->dev, "DMA to 64-BIT address\n"); } else { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "DMA configuration failed\n"); goto flr_fail; } } err = pci_request_mem_regions(pdev, nitrox_driver_name); if (err) goto flr_fail; pci_set_master(pdev); ndev = kzalloc(sizeof(*ndev), GFP_KERNEL); if (!ndev) { err = -ENOMEM; goto ndev_fail; } pci_set_drvdata(pdev, ndev); ndev->pdev = pdev; /* add to device list */ nitrox_add_to_devlist(ndev); ndev->hw.vendor_id = pdev->vendor; ndev->hw.device_id = pdev->device; ndev->hw.revision_id = pdev->revision; /* command timeout in jiffies */ ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT); ndev->node = dev_to_node(&pdev->dev); if (ndev->node == NUMA_NO_NODE) ndev->node = 0; ndev->bar_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!ndev->bar_addr) { err = -EIO; goto ioremap_err; } /* allocate command queus based on cpus, max queues are 64 */ ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus()); ndev->qlen = qlen; err = nitrox_pf_sw_init(ndev); if (err) goto pf_sw_fail; err = nitrox_pf_hw_init(ndev); if (err) goto pf_hw_fail; nitrox_debugfs_init(ndev); /* clear the statistics */ atomic64_set(&ndev->stats.posted, 0); atomic64_set(&ndev->stats.completed, 0); atomic64_set(&ndev->stats.dropped, 0); atomic_set(&ndev->state, __NDEV_READY); /* barrier to sync with other cpus */ smp_mb__after_atomic(); err = nitrox_crypto_register(); if (err) goto crypto_fail; return 0; crypto_fail: nitrox_debugfs_exit(ndev); atomic_set(&ndev->state, __NDEV_NOT_READY); /* barrier to sync with other cpus */ smp_mb__after_atomic(); pf_hw_fail: nitrox_pf_sw_cleanup(ndev); pf_sw_fail: iounmap(ndev->bar_addr); ioremap_err: nitrox_remove_from_devlist(ndev); kfree(ndev); pci_set_drvdata(pdev, NULL); ndev_fail: pci_release_mem_regions(pdev); flr_fail: pci_disable_device(pdev); return err; } /** * nitrox_remove - Unbind the driver from the device. * @pdev: PCI device information struct */ static void nitrox_remove(struct pci_dev *pdev) { struct nitrox_device *ndev = pci_get_drvdata(pdev); if (!ndev) return; if (!refcount_dec_and_test(&ndev->refcnt)) { dev_err(DEV(ndev), "Device refcnt not zero (%d)\n", refcount_read(&ndev->refcnt)); return; } dev_info(DEV(ndev), "Removing Device %x:%x\n", ndev->hw.vendor_id, ndev->hw.device_id); atomic_set(&ndev->state, __NDEV_NOT_READY); /* barrier to sync with other cpus */ smp_mb__after_atomic(); nitrox_remove_from_devlist(ndev); /* disable SR-IOV */ nitrox_sriov_configure(pdev, 0); nitrox_crypto_unregister(); nitrox_debugfs_exit(ndev); nitrox_pf_sw_cleanup(ndev); iounmap(ndev->bar_addr); kfree(ndev); pci_set_drvdata(pdev, NULL); pci_release_mem_regions(pdev); pci_disable_device(pdev); } static void nitrox_shutdown(struct pci_dev *pdev) { pci_set_drvdata(pdev, NULL); pci_release_mem_regions(pdev); pci_disable_device(pdev); } static struct pci_driver nitrox_driver = { .name = nitrox_driver_name, .id_table = nitrox_pci_tbl, .probe = nitrox_probe, .remove = nitrox_remove, .shutdown = nitrox_shutdown, .sriov_configure = nitrox_sriov_configure, }; module_pci_driver(nitrox_driver); MODULE_AUTHOR("Srikanth Jampala <[email protected]>"); MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " "); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION); MODULE_FIRMWARE(SE_FW);
linux-master
drivers/crypto/cavium/nitrox/nitrox_main.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/bitmap.h> #include <linux/workqueue.h> #include "nitrox_csr.h" #include "nitrox_hal.h" #include "nitrox_dev.h" #include "nitrox_mbx.h" #define RING_TO_VFNO(_x, _y) ((_x) / (_y)) /* * mbx_msg_type - Mailbox message types */ enum mbx_msg_type { MBX_MSG_TYPE_NOP, MBX_MSG_TYPE_REQ, MBX_MSG_TYPE_ACK, MBX_MSG_TYPE_NACK, }; /* * mbx_msg_opcode - Mailbox message opcodes */ enum mbx_msg_opcode { MSG_OP_VF_MODE = 1, MSG_OP_VF_UP, MSG_OP_VF_DOWN, MSG_OP_CHIPID_VFID, MSG_OP_MCODE_INFO = 11, }; struct pf2vf_work { struct nitrox_vfdev *vfdev; struct nitrox_device *ndev; struct work_struct pf2vf_resp; }; static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring) { u64 reg_addr; reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring); return nitrox_read_csr(ndev, reg_addr); } static inline void pf2vf_write_mbox(struct nitrox_device *ndev, u64 value, int ring) { u64 reg_addr; reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring); nitrox_write_csr(ndev, reg_addr, value); } static void pf2vf_send_response(struct nitrox_device *ndev, struct nitrox_vfdev *vfdev) { union mbox_msg msg; msg.value = vfdev->msg.value; switch (vfdev->msg.opcode) { case MSG_OP_VF_MODE: msg.data = ndev->mode; break; case MSG_OP_VF_UP: vfdev->nr_queues = vfdev->msg.data; atomic_set(&vfdev->state, __NDEV_READY); break; case MSG_OP_CHIPID_VFID: msg.id.chipid = ndev->idx; msg.id.vfid = vfdev->vfno; break; case MSG_OP_VF_DOWN: vfdev->nr_queues = 0; atomic_set(&vfdev->state, __NDEV_NOT_READY); break; case MSG_OP_MCODE_INFO: msg.data = 0; msg.mcode_info.count = 2; msg.mcode_info.info = MCODE_TYPE_SE_SSL | (MCODE_TYPE_AE << 5); msg.mcode_info.next_se_grp = 1; msg.mcode_info.next_ae_grp = 1; break; default: msg.type = MBX_MSG_TYPE_NOP; break; } if (msg.type == MBX_MSG_TYPE_NOP) return; /* send ACK to VF */ msg.type = MBX_MSG_TYPE_ACK; pf2vf_write_mbox(ndev, msg.value, vfdev->ring); vfdev->msg.value = 0; atomic64_inc(&vfdev->mbx_resp); } static void pf2vf_resp_handler(struct work_struct *work) { struct pf2vf_work *pf2vf_resp = container_of(work, struct pf2vf_work, pf2vf_resp); struct nitrox_vfdev *vfdev = pf2vf_resp->vfdev; struct nitrox_device *ndev = pf2vf_resp->ndev; switch (vfdev->msg.type) { case MBX_MSG_TYPE_REQ: /* process the request from VF */ pf2vf_send_response(ndev, vfdev); break; case MBX_MSG_TYPE_ACK: case MBX_MSG_TYPE_NACK: break; } kfree(pf2vf_resp); } void nitrox_pf2vf_mbox_handler(struct nitrox_device *ndev) { DECLARE_BITMAP(csr, BITS_PER_TYPE(u64)); struct nitrox_vfdev *vfdev; struct pf2vf_work *pfwork; u64 value, reg_addr; u32 i; int vfno; /* loop for VF(0..63) */ reg_addr = NPS_PKT_MBOX_INT_LO; value = nitrox_read_csr(ndev, reg_addr); bitmap_from_u64(csr, value); for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) { /* get the vfno from ring */ vfno = RING_TO_VFNO(i, ndev->iov.max_vf_queues); vfdev = ndev->iov.vfdev + vfno; vfdev->ring = i; /* fill the vf mailbox data */ vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring); pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC); if (!pfwork) continue; pfwork->vfdev = vfdev; pfwork->ndev = ndev; INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler); queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp); /* clear the corresponding vf bit */ nitrox_write_csr(ndev, reg_addr, BIT_ULL(i)); } /* loop for VF(64..127) */ reg_addr = NPS_PKT_MBOX_INT_HI; value = nitrox_read_csr(ndev, reg_addr); bitmap_from_u64(csr, value); for_each_set_bit(i, csr, BITS_PER_TYPE(csr)) { /* get the vfno from ring */ vfno = RING_TO_VFNO(i + 64, ndev->iov.max_vf_queues); vfdev = ndev->iov.vfdev + vfno; vfdev->ring = (i + 64); /* fill the vf mailbox data */ vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring); pfwork = kzalloc(sizeof(*pfwork), GFP_ATOMIC); if (!pfwork) continue; pfwork->vfdev = vfdev; pfwork->ndev = ndev; INIT_WORK(&pfwork->pf2vf_resp, pf2vf_resp_handler); queue_work(ndev->iov.pf2vf_wq, &pfwork->pf2vf_resp); /* clear the corresponding vf bit */ nitrox_write_csr(ndev, reg_addr, BIT_ULL(i)); } } int nitrox_mbox_init(struct nitrox_device *ndev) { struct nitrox_vfdev *vfdev; int i; ndev->iov.vfdev = kcalloc(ndev->iov.num_vfs, sizeof(struct nitrox_vfdev), GFP_KERNEL); if (!ndev->iov.vfdev) return -ENOMEM; for (i = 0; i < ndev->iov.num_vfs; i++) { vfdev = ndev->iov.vfdev + i; vfdev->vfno = i; } /* allocate pf2vf response workqueue */ ndev->iov.pf2vf_wq = alloc_workqueue("nitrox_pf2vf", 0, 0); if (!ndev->iov.pf2vf_wq) { kfree(ndev->iov.vfdev); ndev->iov.vfdev = NULL; return -ENOMEM; } /* enable pf2vf mailbox interrupts */ enable_pf2vf_mbox_interrupts(ndev); return 0; } void nitrox_mbox_cleanup(struct nitrox_device *ndev) { /* disable pf2vf mailbox interrupts */ disable_pf2vf_mbox_interrupts(ndev); /* destroy workqueue */ if (ndev->iov.pf2vf_wq) destroy_workqueue(ndev->iov.pf2vf_wq); kfree(ndev->iov.vfdev); ndev->iov.pf2vf_wq = NULL; ndev->iov.vfdev = NULL; }
linux-master
drivers/crypto/cavium/nitrox/nitrox_mbx.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/delay.h> #include "nitrox_dev.h" #include "nitrox_csr.h" #include "nitrox_hal.h" #define PLL_REF_CLK 50 #define MAX_CSR_RETRIES 10 /** * emu_enable_cores - Enable EMU cluster cores. * @ndev: NITROX device */ static void emu_enable_cores(struct nitrox_device *ndev) { union emu_se_enable emu_se; union emu_ae_enable emu_ae; int i; /* AE cores 20 per cluster */ emu_ae.value = 0; emu_ae.s.enable = 0xfffff; /* SE cores 16 per cluster */ emu_se.value = 0; emu_se.s.enable = 0xffff; /* enable per cluster cores */ for (i = 0; i < NR_CLUSTERS; i++) { nitrox_write_csr(ndev, EMU_AE_ENABLEX(i), emu_ae.value); nitrox_write_csr(ndev, EMU_SE_ENABLEX(i), emu_se.value); } } /** * nitrox_config_emu_unit - configure EMU unit. * @ndev: NITROX device */ void nitrox_config_emu_unit(struct nitrox_device *ndev) { union emu_wd_int_ena_w1s emu_wd_int; union emu_ge_int_ena_w1s emu_ge_int; u64 offset; int i; /* enable cores */ emu_enable_cores(ndev); /* enable general error and watch dog interrupts */ emu_ge_int.value = 0; emu_ge_int.s.se_ge = 0xffff; emu_ge_int.s.ae_ge = 0xfffff; emu_wd_int.value = 0; emu_wd_int.s.se_wd = 1; for (i = 0; i < NR_CLUSTERS; i++) { offset = EMU_WD_INT_ENA_W1SX(i); nitrox_write_csr(ndev, offset, emu_wd_int.value); offset = EMU_GE_INT_ENA_W1SX(i); nitrox_write_csr(ndev, offset, emu_ge_int.value); } } static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring) { union nps_pkt_in_instr_ctl pkt_in_ctl; union nps_pkt_in_done_cnts pkt_in_cnts; int max_retries = MAX_CSR_RETRIES; u64 offset; /* step 1: disable the ring, clear enable bit */ offset = NPS_PKT_IN_INSTR_CTLX(ring); pkt_in_ctl.value = nitrox_read_csr(ndev, offset); pkt_in_ctl.s.enb = 0; nitrox_write_csr(ndev, offset, pkt_in_ctl.value); /* step 2: wait to clear [ENB] */ usleep_range(100, 150); do { pkt_in_ctl.value = nitrox_read_csr(ndev, offset); if (!pkt_in_ctl.s.enb) break; udelay(50); } while (max_retries--); /* step 3: clear done counts */ offset = NPS_PKT_IN_DONE_CNTSX(ring); pkt_in_cnts.value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, pkt_in_cnts.value); usleep_range(50, 100); } void enable_pkt_input_ring(struct nitrox_device *ndev, int ring) { union nps_pkt_in_instr_ctl pkt_in_ctl; int max_retries = MAX_CSR_RETRIES; u64 offset; /* 64-byte instruction size */ offset = NPS_PKT_IN_INSTR_CTLX(ring); pkt_in_ctl.value = nitrox_read_csr(ndev, offset); pkt_in_ctl.s.is64b = 1; pkt_in_ctl.s.enb = 1; nitrox_write_csr(ndev, offset, pkt_in_ctl.value); /* wait for set [ENB] */ do { pkt_in_ctl.value = nitrox_read_csr(ndev, offset); if (pkt_in_ctl.s.enb) break; udelay(50); } while (max_retries--); } /** * nitrox_config_pkt_input_rings - configure Packet Input Rings * @ndev: NITROX device */ void nitrox_config_pkt_input_rings(struct nitrox_device *ndev) { int i; for (i = 0; i < ndev->nr_queues; i++) { struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i]; union nps_pkt_in_instr_rsize pkt_in_rsize; union nps_pkt_in_instr_baoff_dbell pkt_in_dbell; u64 offset; reset_pkt_input_ring(ndev, i); /** * step 4: * configure ring base address 16-byte aligned, * size and interrupt threshold. */ offset = NPS_PKT_IN_INSTR_BADDRX(i); nitrox_write_csr(ndev, offset, cmdq->dma); /* configure ring size */ offset = NPS_PKT_IN_INSTR_RSIZEX(i); pkt_in_rsize.value = 0; pkt_in_rsize.s.rsize = ndev->qlen; nitrox_write_csr(ndev, offset, pkt_in_rsize.value); /* set high threshold for pkt input ring interrupts */ offset = NPS_PKT_IN_INT_LEVELSX(i); nitrox_write_csr(ndev, offset, 0xffffffff); /* step 5: clear off door bell counts */ offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i); pkt_in_dbell.value = 0; pkt_in_dbell.s.dbell = 0xffffffff; nitrox_write_csr(ndev, offset, pkt_in_dbell.value); /* enable the ring */ enable_pkt_input_ring(ndev, i); } } static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port) { union nps_pkt_slc_ctl pkt_slc_ctl; union nps_pkt_slc_cnts pkt_slc_cnts; int max_retries = MAX_CSR_RETRIES; u64 offset; /* step 1: disable slc port */ offset = NPS_PKT_SLC_CTLX(port); pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); pkt_slc_ctl.s.enb = 0; nitrox_write_csr(ndev, offset, pkt_slc_ctl.value); /* step 2 */ usleep_range(100, 150); /* wait to clear [ENB] */ do { pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); if (!pkt_slc_ctl.s.enb) break; udelay(50); } while (max_retries--); /* step 3: clear slc counters */ offset = NPS_PKT_SLC_CNTSX(port); pkt_slc_cnts.value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, pkt_slc_cnts.value); usleep_range(50, 100); } void enable_pkt_solicit_port(struct nitrox_device *ndev, int port) { union nps_pkt_slc_ctl pkt_slc_ctl; int max_retries = MAX_CSR_RETRIES; u64 offset; offset = NPS_PKT_SLC_CTLX(port); pkt_slc_ctl.value = 0; pkt_slc_ctl.s.enb = 1; /* * 8 trailing 0x00 bytes will be added * to the end of the outgoing packet. */ pkt_slc_ctl.s.z = 1; /* enable response header */ pkt_slc_ctl.s.rh = 1; nitrox_write_csr(ndev, offset, pkt_slc_ctl.value); /* wait to set [ENB] */ do { pkt_slc_ctl.value = nitrox_read_csr(ndev, offset); if (pkt_slc_ctl.s.enb) break; udelay(50); } while (max_retries--); } static void config_pkt_solicit_port(struct nitrox_device *ndev, int port) { union nps_pkt_slc_int_levels pkt_slc_int; u64 offset; reset_pkt_solicit_port(ndev, port); /* step 4: configure interrupt levels */ offset = NPS_PKT_SLC_INT_LEVELSX(port); pkt_slc_int.value = 0; /* time interrupt threshold */ pkt_slc_int.s.timet = 0x3fffff; nitrox_write_csr(ndev, offset, pkt_slc_int.value); /* enable the solicit port */ enable_pkt_solicit_port(ndev, port); } void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev) { int i; for (i = 0; i < ndev->nr_queues; i++) config_pkt_solicit_port(ndev, i); } /** * enable_nps_core_interrupts - enable NPS core interrutps * @ndev: NITROX device. * * This includes NPS core interrupts. */ static void enable_nps_core_interrupts(struct nitrox_device *ndev) { union nps_core_int_ena_w1s core_int; /* NPS core interrutps */ core_int.value = 0; core_int.s.host_wr_err = 1; core_int.s.host_wr_timeout = 1; core_int.s.exec_wr_timeout = 1; core_int.s.npco_dma_malform = 1; core_int.s.host_nps_wr_err = 1; nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value); } void nitrox_config_nps_core_unit(struct nitrox_device *ndev) { union nps_core_gbl_vfcfg core_gbl_vfcfg; /* endian control information */ nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL); /* disable ILK interface */ core_gbl_vfcfg.value = 0; core_gbl_vfcfg.s.ilk_disable = 1; core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF; nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value); /* enable nps core interrupts */ enable_nps_core_interrupts(ndev); } /** * enable_nps_pkt_interrupts - enable NPS packet interrutps * @ndev: NITROX device. * * This includes NPS packet in and slc interrupts. */ static void enable_nps_pkt_interrupts(struct nitrox_device *ndev) { /* NPS packet in ring interrupts */ nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL)); nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL)); nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL)); /* NPS packet slc port interrupts */ nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL)); nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL)); nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL)); } void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev) { /* config input and solicit ports */ nitrox_config_pkt_input_rings(ndev); nitrox_config_pkt_solicit_ports(ndev); /* enable nps packet interrupts */ enable_nps_pkt_interrupts(ndev); } static void reset_aqm_ring(struct nitrox_device *ndev, int ring) { union aqmq_en aqmq_en_reg; union aqmq_activity_stat activity_stat; union aqmq_cmp_cnt cmp_cnt; int max_retries = MAX_CSR_RETRIES; u64 offset; /* step 1: disable the queue */ offset = AQMQ_ENX(ring); aqmq_en_reg.value = 0; aqmq_en_reg.queue_enable = 0; nitrox_write_csr(ndev, offset, aqmq_en_reg.value); /* step 2: wait for AQMQ_ACTIVITY_STATX[QUEUE_ACTIVE] to clear */ usleep_range(100, 150); offset = AQMQ_ACTIVITY_STATX(ring); do { activity_stat.value = nitrox_read_csr(ndev, offset); if (!activity_stat.queue_active) break; udelay(50); } while (max_retries--); /* step 3: clear commands completed count */ offset = AQMQ_CMP_CNTX(ring); cmp_cnt.value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, cmp_cnt.value); usleep_range(50, 100); } void enable_aqm_ring(struct nitrox_device *ndev, int ring) { union aqmq_en aqmq_en_reg; u64 offset; offset = AQMQ_ENX(ring); aqmq_en_reg.value = 0; aqmq_en_reg.queue_enable = 1; nitrox_write_csr(ndev, offset, aqmq_en_reg.value); usleep_range(50, 100); } void nitrox_config_aqm_rings(struct nitrox_device *ndev) { int ring; for (ring = 0; ring < ndev->nr_queues; ring++) { struct nitrox_cmdq *cmdq = ndev->aqmq[ring]; union aqmq_drbl drbl; union aqmq_qsz qsize; union aqmq_cmp_thr cmp_thr; u64 offset; /* steps 1 - 3 */ reset_aqm_ring(ndev, ring); /* step 4: clear doorbell count of ring */ offset = AQMQ_DRBLX(ring); drbl.value = 0; drbl.dbell_count = 0xFFFFFFFF; nitrox_write_csr(ndev, offset, drbl.value); /* step 5: configure host ring details */ /* set host address for next command of ring */ offset = AQMQ_NXT_CMDX(ring); nitrox_write_csr(ndev, offset, 0ULL); /* set host address of ring base */ offset = AQMQ_BADRX(ring); nitrox_write_csr(ndev, offset, cmdq->dma); /* set ring size */ offset = AQMQ_QSZX(ring); qsize.value = 0; qsize.host_queue_size = ndev->qlen; nitrox_write_csr(ndev, offset, qsize.value); /* set command completion threshold */ offset = AQMQ_CMP_THRX(ring); cmp_thr.value = 0; cmp_thr.commands_completed_threshold = 1; nitrox_write_csr(ndev, offset, cmp_thr.value); /* step 6: enable the queue */ enable_aqm_ring(ndev, ring); } } static void enable_aqm_interrupts(struct nitrox_device *ndev) { /* clear interrupt enable bits */ nitrox_write_csr(ndev, AQM_DBELL_OVF_LO_ENA_W1S, (~0ULL)); nitrox_write_csr(ndev, AQM_DBELL_OVF_HI_ENA_W1S, (~0ULL)); nitrox_write_csr(ndev, AQM_DMA_RD_ERR_LO_ENA_W1S, (~0ULL)); nitrox_write_csr(ndev, AQM_DMA_RD_ERR_HI_ENA_W1S, (~0ULL)); nitrox_write_csr(ndev, AQM_EXEC_NA_LO_ENA_W1S, (~0ULL)); nitrox_write_csr(ndev, AQM_EXEC_NA_HI_ENA_W1S, (~0ULL)); nitrox_write_csr(ndev, AQM_EXEC_ERR_LO_ENA_W1S, (~0ULL)); nitrox_write_csr(ndev, AQM_EXEC_ERR_HI_ENA_W1S, (~0ULL)); } void nitrox_config_aqm_unit(struct nitrox_device *ndev) { /* config aqm command queues */ nitrox_config_aqm_rings(ndev); /* enable aqm interrupts */ enable_aqm_interrupts(ndev); } void nitrox_config_pom_unit(struct nitrox_device *ndev) { union pom_int_ena_w1s pom_int; int i; /* enable pom interrupts */ pom_int.value = 0; pom_int.s.illegal_dport = 1; nitrox_write_csr(ndev, POM_INT_ENA_W1S, pom_int.value); /* enable perf counters */ for (i = 0; i < ndev->hw.se_cores; i++) nitrox_write_csr(ndev, POM_PERF_CTL, BIT_ULL(i)); } /** * nitrox_config_rand_unit - enable NITROX random number unit * @ndev: NITROX device */ void nitrox_config_rand_unit(struct nitrox_device *ndev) { union efl_rnm_ctl_status efl_rnm_ctl; u64 offset; offset = EFL_RNM_CTL_STATUS; efl_rnm_ctl.value = nitrox_read_csr(ndev, offset); efl_rnm_ctl.s.ent_en = 1; efl_rnm_ctl.s.rng_en = 1; nitrox_write_csr(ndev, offset, efl_rnm_ctl.value); } void nitrox_config_efl_unit(struct nitrox_device *ndev) { int i; for (i = 0; i < NR_CLUSTERS; i++) { union efl_core_int_ena_w1s efl_core_int; u64 offset; /* EFL core interrupts */ offset = EFL_CORE_INT_ENA_W1SX(i); efl_core_int.value = 0; efl_core_int.s.len_ovr = 1; efl_core_int.s.d_left = 1; efl_core_int.s.epci_decode_err = 1; nitrox_write_csr(ndev, offset, efl_core_int.value); offset = EFL_CORE_VF_ERR_INT0_ENA_W1SX(i); nitrox_write_csr(ndev, offset, (~0ULL)); offset = EFL_CORE_VF_ERR_INT1_ENA_W1SX(i); nitrox_write_csr(ndev, offset, (~0ULL)); } } void nitrox_config_bmi_unit(struct nitrox_device *ndev) { union bmi_ctl bmi_ctl; union bmi_int_ena_w1s bmi_int_ena; u64 offset; /* no threshold limits for PCIe */ offset = BMI_CTL; bmi_ctl.value = nitrox_read_csr(ndev, offset); bmi_ctl.s.max_pkt_len = 0xff; bmi_ctl.s.nps_free_thrsh = 0xff; bmi_ctl.s.nps_hdrq_thrsh = 0x7a; nitrox_write_csr(ndev, offset, bmi_ctl.value); /* enable interrupts */ offset = BMI_INT_ENA_W1S; bmi_int_ena.value = 0; bmi_int_ena.s.max_len_err_nps = 1; bmi_int_ena.s.pkt_rcv_err_nps = 1; bmi_int_ena.s.fpf_undrrn = 1; nitrox_write_csr(ndev, offset, bmi_int_ena.value); } void nitrox_config_bmo_unit(struct nitrox_device *ndev) { union bmo_ctl2 bmo_ctl2; u64 offset; /* no threshold limits for PCIe */ offset = BMO_CTL2; bmo_ctl2.value = nitrox_read_csr(ndev, offset); bmo_ctl2.s.nps_slc_buf_thrsh = 0xff; nitrox_write_csr(ndev, offset, bmo_ctl2.value); } void invalidate_lbc(struct nitrox_device *ndev) { union lbc_inval_ctl lbc_ctl; union lbc_inval_status lbc_stat; int max_retries = MAX_CSR_RETRIES; u64 offset; /* invalidate LBC */ offset = LBC_INVAL_CTL; lbc_ctl.value = nitrox_read_csr(ndev, offset); lbc_ctl.s.cam_inval_start = 1; nitrox_write_csr(ndev, offset, lbc_ctl.value); offset = LBC_INVAL_STATUS; do { lbc_stat.value = nitrox_read_csr(ndev, offset); if (lbc_stat.s.done) break; udelay(50); } while (max_retries--); } void nitrox_config_lbc_unit(struct nitrox_device *ndev) { union lbc_int_ena_w1s lbc_int_ena; u64 offset; invalidate_lbc(ndev); /* enable interrupts */ offset = LBC_INT_ENA_W1S; lbc_int_ena.value = 0; lbc_int_ena.s.dma_rd_err = 1; lbc_int_ena.s.over_fetch_err = 1; lbc_int_ena.s.cam_inval_abort = 1; lbc_int_ena.s.cam_hard_err = 1; nitrox_write_csr(ndev, offset, lbc_int_ena.value); offset = LBC_PLM_VF1_64_INT_ENA_W1S; nitrox_write_csr(ndev, offset, (~0ULL)); offset = LBC_PLM_VF65_128_INT_ENA_W1S; nitrox_write_csr(ndev, offset, (~0ULL)); offset = LBC_ELM_VF1_64_INT_ENA_W1S; nitrox_write_csr(ndev, offset, (~0ULL)); offset = LBC_ELM_VF65_128_INT_ENA_W1S; nitrox_write_csr(ndev, offset, (~0ULL)); } void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode) { union nps_core_gbl_vfcfg vfcfg; vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG); vfcfg.s.cfg = mode & 0x7; nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value); } static const char *get_core_option(u8 se_cores, u8 ae_cores) { const char *option = ""; if (ae_cores == AE_MAX_CORES) { switch (se_cores) { case SE_MAX_CORES: option = "60"; break; case 40: option = "60s"; break; } } else if (ae_cores == (AE_MAX_CORES / 2)) { option = "30"; } else { option = "60i"; } return option; } static const char *get_feature_option(u8 zip_cores, int core_freq) { if (zip_cores == 0) return ""; else if (zip_cores < ZIP_MAX_CORES) return "-C15"; if (core_freq >= 850) return "-C45"; else if (core_freq >= 750) return "-C35"; else if (core_freq >= 550) return "-C25"; return ""; } void nitrox_get_hwinfo(struct nitrox_device *ndev) { union emu_fuse_map emu_fuse; union rst_boot rst_boot; union fus_dat1 fus_dat1; unsigned char name[IFNAMSIZ * 2] = {}; int i, dead_cores; u64 offset; /* get core frequency */ offset = RST_BOOT; rst_boot.value = nitrox_read_csr(ndev, offset); ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK; for (i = 0; i < NR_CLUSTERS; i++) { offset = EMU_FUSE_MAPX(i); emu_fuse.value = nitrox_read_csr(ndev, offset); if (emu_fuse.s.valid) { dead_cores = hweight32(emu_fuse.s.ae_fuse); ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores; dead_cores = hweight16(emu_fuse.s.se_fuse); ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores; } } /* find zip hardware availability */ offset = FUS_DAT1; fus_dat1.value = nitrox_read_csr(ndev, offset); if (!fus_dat1.nozip) { dead_cores = hweight8(fus_dat1.zip_info); ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores; } /* determine the partname * CNN55<core option>-<freq><pincount>-<feature option>-<rev> */ snprintf(name, sizeof(name), "CNN55%s-%3dBG676%s-1.%u", get_core_option(ndev->hw.se_cores, ndev->hw.ae_cores), ndev->hw.freq, get_feature_option(ndev->hw.zip_cores, ndev->hw.freq), ndev->hw.revision_id); /* copy partname */ strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname)); } void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev) { u64 value = ~0ULL; u64 reg_addr; /* Mailbox interrupt low enable set register */ reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1S; nitrox_write_csr(ndev, reg_addr, value); /* Mailbox interrupt high enable set register */ reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1S; nitrox_write_csr(ndev, reg_addr, value); } void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev) { u64 value = ~0ULL; u64 reg_addr; /* Mailbox interrupt low enable clear register */ reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1C; nitrox_write_csr(ndev, reg_addr, value); /* Mailbox interrupt high enable clear register */ reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1C; nitrox_write_csr(ndev, reg_addr, value); }
linux-master
drivers/crypto/cavium/nitrox/nitrox_hal.c
#include "nitrox_common.h" int nitrox_crypto_register(void) { int err; err = nitrox_register_skciphers(); if (err) return err; err = nitrox_register_aeads(); if (err) { nitrox_unregister_skciphers(); return err; } return 0; } void nitrox_crypto_unregister(void) { nitrox_unregister_aeads(); nitrox_unregister_skciphers(); }
linux-master
drivers/crypto/cavium/nitrox/nitrox_algs.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/gfp.h> #include <linux/workqueue.h> #include <crypto/internal/skcipher.h> #include "nitrox_common.h" #include "nitrox_dev.h" #include "nitrox_req.h" #include "nitrox_csr.h" /* SLC_STORE_INFO */ #define MIN_UDD_LEN 16 /* PKT_IN_HDR + SLC_STORE_INFO */ #define FDATA_SIZE 32 /* Base destination port for the solicited requests */ #define SOLICIT_BASE_DPORT 256 #define REQ_NOT_POSTED 1 #define REQ_BACKLOG 2 #define REQ_POSTED 3 /* * Response codes from SE microcode * 0x00 - Success * Completion with no error * 0x43 - ERR_GC_DATA_LEN_INVALID * Invalid Data length if Encryption Data length is * less than 16 bytes for AES-XTS and AES-CTS. * 0x45 - ERR_GC_CTX_LEN_INVALID * Invalid context length: CTXL != 23 words. * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID * DOCSIS support is enabled with other than * AES/DES-CBC mode encryption. * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID * Authentication offset is other than 0 with * Encryption IV source = 0. * Authentication offset is other than 8 (DES)/16 (AES) * with Encryption IV source = 1 * 0x51 - ERR_GC_CRC32_INVALID_SELECTION * CRC32 is enabled for other than DOCSIS encryption. * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID * Invalid flag options in AES-CCM IV. */ static inline int incr_index(int index, int count, int max) { if ((index + count) >= max) index = index + count - max; else index += count; return index; } static void softreq_unmap_sgbufs(struct nitrox_softreq *sr) { struct nitrox_device *ndev = sr->ndev; struct device *dev = DEV(ndev); dma_unmap_sg(dev, sr->in.sg, sg_nents(sr->in.sg), DMA_BIDIRECTIONAL); dma_unmap_single(dev, sr->in.sgcomp_dma, sr->in.sgcomp_len, DMA_TO_DEVICE); kfree(sr->in.sgcomp); sr->in.sg = NULL; sr->in.sgmap_cnt = 0; dma_unmap_sg(dev, sr->out.sg, sg_nents(sr->out.sg), DMA_BIDIRECTIONAL); dma_unmap_single(dev, sr->out.sgcomp_dma, sr->out.sgcomp_len, DMA_TO_DEVICE); kfree(sr->out.sgcomp); sr->out.sg = NULL; sr->out.sgmap_cnt = 0; } static void softreq_destroy(struct nitrox_softreq *sr) { softreq_unmap_sgbufs(sr); kfree(sr); } /** * create_sg_component - create SG componets for N5 device. * @sr: Request structure * @sgtbl: SG table * @map_nents: number of dma mapped entries * * Component structure * * 63 48 47 32 31 16 15 0 * -------------------------------------- * | LEN0 | LEN1 | LEN2 | LEN3 | * |------------------------------------- * | PTR0 | * -------------------------------------- * | PTR1 | * -------------------------------------- * | PTR2 | * -------------------------------------- * | PTR3 | * -------------------------------------- * * Returns 0 if success or a negative errno code on error. */ static int create_sg_component(struct nitrox_softreq *sr, struct nitrox_sgtable *sgtbl, int map_nents) { struct nitrox_device *ndev = sr->ndev; struct nitrox_sgcomp *sgcomp; struct scatterlist *sg; dma_addr_t dma; size_t sz_comp; int i, j, nr_sgcomp; nr_sgcomp = roundup(map_nents, 4) / 4; /* each component holds 4 dma pointers */ sz_comp = nr_sgcomp * sizeof(*sgcomp); sgcomp = kzalloc(sz_comp, sr->gfp); if (!sgcomp) return -ENOMEM; sgtbl->sgcomp = sgcomp; sg = sgtbl->sg; /* populate device sg component */ for (i = 0; i < nr_sgcomp; i++) { for (j = 0; j < 4 && sg; j++) { sgcomp[i].len[j] = cpu_to_be16(sg_dma_len(sg)); sgcomp[i].dma[j] = cpu_to_be64(sg_dma_address(sg)); sg = sg_next(sg); } } /* map the device sg component */ dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE); if (dma_mapping_error(DEV(ndev), dma)) { kfree(sgtbl->sgcomp); sgtbl->sgcomp = NULL; return -ENOMEM; } sgtbl->sgcomp_dma = dma; sgtbl->sgcomp_len = sz_comp; return 0; } /** * dma_map_inbufs - DMA map input sglist and creates sglist component * for N5 device. * @sr: Request structure * @req: Crypto request structre * * Returns 0 if successful or a negative errno code on error. */ static int dma_map_inbufs(struct nitrox_softreq *sr, struct se_crypto_request *req) { struct device *dev = DEV(sr->ndev); struct scatterlist *sg; int i, nents, ret = 0; nents = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); if (!nents) return -EINVAL; for_each_sg(req->src, sg, nents, i) sr->in.total_bytes += sg_dma_len(sg); sr->in.sg = req->src; sr->in.sgmap_cnt = nents; ret = create_sg_component(sr, &sr->in, sr->in.sgmap_cnt); if (ret) goto incomp_err; return 0; incomp_err: dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL); sr->in.sgmap_cnt = 0; return ret; } static int dma_map_outbufs(struct nitrox_softreq *sr, struct se_crypto_request *req) { struct device *dev = DEV(sr->ndev); int nents, ret = 0; nents = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_BIDIRECTIONAL); if (!nents) return -EINVAL; sr->out.sg = req->dst; sr->out.sgmap_cnt = nents; ret = create_sg_component(sr, &sr->out, sr->out.sgmap_cnt); if (ret) goto outcomp_map_err; return 0; outcomp_map_err: dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_BIDIRECTIONAL); sr->out.sgmap_cnt = 0; sr->out.sg = NULL; return ret; } static inline int softreq_map_iobuf(struct nitrox_softreq *sr, struct se_crypto_request *creq) { int ret; ret = dma_map_inbufs(sr, creq); if (ret) return ret; ret = dma_map_outbufs(sr, creq); if (ret) softreq_unmap_sgbufs(sr); return ret; } static inline void backlog_list_add(struct nitrox_softreq *sr, struct nitrox_cmdq *cmdq) { INIT_LIST_HEAD(&sr->backlog); spin_lock_bh(&cmdq->backlog_qlock); list_add_tail(&sr->backlog, &cmdq->backlog_head); atomic_inc(&cmdq->backlog_count); atomic_set(&sr->status, REQ_BACKLOG); spin_unlock_bh(&cmdq->backlog_qlock); } static inline void response_list_add(struct nitrox_softreq *sr, struct nitrox_cmdq *cmdq) { INIT_LIST_HEAD(&sr->response); spin_lock_bh(&cmdq->resp_qlock); list_add_tail(&sr->response, &cmdq->response_head); spin_unlock_bh(&cmdq->resp_qlock); } static inline void response_list_del(struct nitrox_softreq *sr, struct nitrox_cmdq *cmdq) { spin_lock_bh(&cmdq->resp_qlock); list_del(&sr->response); spin_unlock_bh(&cmdq->resp_qlock); } static struct nitrox_softreq * get_first_response_entry(struct nitrox_cmdq *cmdq) { return list_first_entry_or_null(&cmdq->response_head, struct nitrox_softreq, response); } static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen) { if (atomic_inc_return(&cmdq->pending_count) > qlen) { atomic_dec(&cmdq->pending_count); /* sync with other cpus */ smp_mb__after_atomic(); return true; } /* sync with other cpus */ smp_mb__after_atomic(); return false; } /** * post_se_instr - Post SE instruction to Packet Input ring * @sr: Request structure * @cmdq: Command queue structure * * Returns 0 if successful or a negative error code, * if no space in ring. */ static void post_se_instr(struct nitrox_softreq *sr, struct nitrox_cmdq *cmdq) { struct nitrox_device *ndev = sr->ndev; int idx; u8 *ent; spin_lock_bh(&cmdq->cmd_qlock); idx = cmdq->write_idx; /* copy the instruction */ ent = cmdq->base + (idx * cmdq->instr_size); memcpy(ent, &sr->instr, cmdq->instr_size); atomic_set(&sr->status, REQ_POSTED); response_list_add(sr, cmdq); sr->tstamp = jiffies; /* flush the command queue updates */ dma_wmb(); /* Ring doorbell with count 1 */ writeq(1, cmdq->dbell_csr_addr); cmdq->write_idx = incr_index(idx, 1, ndev->qlen); spin_unlock_bh(&cmdq->cmd_qlock); /* increment the posted command count */ atomic64_inc(&ndev->stats.posted); } static int post_backlog_cmds(struct nitrox_cmdq *cmdq) { struct nitrox_device *ndev = cmdq->ndev; struct nitrox_softreq *sr, *tmp; int ret = 0; if (!atomic_read(&cmdq->backlog_count)) return 0; spin_lock_bh(&cmdq->backlog_qlock); list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) { /* submit until space available */ if (unlikely(cmdq_full(cmdq, ndev->qlen))) { ret = -ENOSPC; break; } /* delete from backlog list */ list_del(&sr->backlog); atomic_dec(&cmdq->backlog_count); /* sync with other cpus */ smp_mb__after_atomic(); /* post the command */ post_se_instr(sr, cmdq); } spin_unlock_bh(&cmdq->backlog_qlock); return ret; } static int nitrox_enqueue_request(struct nitrox_softreq *sr) { struct nitrox_cmdq *cmdq = sr->cmdq; struct nitrox_device *ndev = sr->ndev; /* try to post backlog requests */ post_backlog_cmds(cmdq); if (unlikely(cmdq_full(cmdq, ndev->qlen))) { if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { /* increment drop count */ atomic64_inc(&ndev->stats.dropped); return -ENOSPC; } /* add to backlog list */ backlog_list_add(sr, cmdq); return -EINPROGRESS; } post_se_instr(sr, cmdq); return -EINPROGRESS; } /** * nitrox_process_se_request - Send request to SE core * @ndev: NITROX device * @req: Crypto request * @callback: Completion callback * @cb_arg: Completion callback arguments * * Returns 0 on success, or a negative error code. */ int nitrox_process_se_request(struct nitrox_device *ndev, struct se_crypto_request *req, completion_t callback, void *cb_arg) { struct nitrox_softreq *sr; dma_addr_t ctx_handle = 0; int qno, ret = 0; if (!nitrox_ready(ndev)) return -ENODEV; sr = kzalloc(sizeof(*sr), req->gfp); if (!sr) return -ENOMEM; sr->ndev = ndev; sr->flags = req->flags; sr->gfp = req->gfp; sr->callback = callback; sr->cb_arg = cb_arg; atomic_set(&sr->status, REQ_NOT_POSTED); sr->resp.orh = req->orh; sr->resp.completion = req->comp; ret = softreq_map_iobuf(sr, req); if (ret) { kfree(sr); return ret; } /* get the context handle */ if (req->ctx_handle) { struct ctx_hdr *hdr; u8 *ctx_ptr; ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle; hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr)); ctx_handle = hdr->ctx_dma; } /* select the queue */ qno = smp_processor_id() % ndev->nr_queues; sr->cmdq = &ndev->pkt_inq[qno]; /* * 64-Byte Instruction Format * * ---------------------- * | DPTR0 | 8 bytes * ---------------------- * | PKT_IN_INSTR_HDR | 8 bytes * ---------------------- * | PKT_IN_HDR | 16 bytes * ---------------------- * | SLC_INFO | 16 bytes * ---------------------- * | Front data | 16 bytes * ---------------------- */ /* fill the packet instruction */ /* word 0 */ sr->instr.dptr0 = cpu_to_be64(sr->in.sgcomp_dma); /* word 1 */ sr->instr.ih.value = 0; sr->instr.ih.s.g = 1; sr->instr.ih.s.gsz = sr->in.sgmap_cnt; sr->instr.ih.s.ssz = sr->out.sgmap_cnt; sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr); sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes; sr->instr.ih.bev = cpu_to_be64(sr->instr.ih.value); /* word 2 */ sr->instr.irh.value[0] = 0; sr->instr.irh.s.uddl = MIN_UDD_LEN; /* context length in 64-bit words */ sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8); /* offset from solicit base port 256 */ sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno; sr->instr.irh.s.ctxc = req->ctrl.s.ctxc; sr->instr.irh.s.arg = req->ctrl.s.arg; sr->instr.irh.s.opcode = req->opcode; sr->instr.irh.bev[0] = cpu_to_be64(sr->instr.irh.value[0]); /* word 3 */ sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle); /* word 4 */ sr->instr.slc.value[0] = 0; sr->instr.slc.s.ssz = sr->out.sgmap_cnt; sr->instr.slc.bev[0] = cpu_to_be64(sr->instr.slc.value[0]); /* word 5 */ sr->instr.slc.s.rptr = cpu_to_be64(sr->out.sgcomp_dma); /* * No conversion for front data, * It goes into payload * put GP Header in front data */ sr->instr.fdata[0] = *((u64 *)&req->gph); sr->instr.fdata[1] = 0; ret = nitrox_enqueue_request(sr); if (ret == -ENOSPC) goto send_fail; return ret; send_fail: softreq_destroy(sr); return ret; } static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout) { return time_after_eq(jiffies, (tstamp + timeout)); } void backlog_qflush_work(struct work_struct *work) { struct nitrox_cmdq *cmdq; cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush); post_backlog_cmds(cmdq); } static bool sr_completed(struct nitrox_softreq *sr) { u64 orh = READ_ONCE(*sr->resp.orh); unsigned long timeout = jiffies + msecs_to_jiffies(1); if ((orh != PENDING_SIG) && (orh & 0xff)) return true; while (READ_ONCE(*sr->resp.completion) == PENDING_SIG) { if (time_after(jiffies, timeout)) { pr_err("comp not done\n"); return false; } } return true; } /** * process_response_list - process completed requests * @cmdq: Command queue structure * * Returns the number of responses processed. */ static void process_response_list(struct nitrox_cmdq *cmdq) { struct nitrox_device *ndev = cmdq->ndev; struct nitrox_softreq *sr; int req_completed = 0, err = 0, budget; completion_t callback; void *cb_arg; /* check all pending requests */ budget = atomic_read(&cmdq->pending_count); while (req_completed < budget) { sr = get_first_response_entry(cmdq); if (!sr) break; if (atomic_read(&sr->status) != REQ_POSTED) break; /* check orh and completion bytes updates */ if (!sr_completed(sr)) { /* request not completed, check for timeout */ if (!cmd_timeout(sr->tstamp, ndev->timeout)) break; dev_err_ratelimited(DEV(ndev), "Request timeout, orh 0x%016llx\n", READ_ONCE(*sr->resp.orh)); } atomic_dec(&cmdq->pending_count); atomic64_inc(&ndev->stats.completed); /* sync with other cpus */ smp_mb__after_atomic(); /* remove from response list */ response_list_del(sr, cmdq); /* ORH error code */ err = READ_ONCE(*sr->resp.orh) & 0xff; callback = sr->callback; cb_arg = sr->cb_arg; softreq_destroy(sr); if (callback) callback(cb_arg, err); req_completed++; } } /* * pkt_slc_resp_tasklet - post processing of SE responses */ void pkt_slc_resp_tasklet(unsigned long data) { struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data); struct nitrox_cmdq *cmdq = qvec->cmdq; union nps_pkt_slc_cnts slc_cnts; /* read completion count */ slc_cnts.value = readq(cmdq->compl_cnt_csr_addr); /* resend the interrupt if more work to do */ slc_cnts.s.resend = 1; process_response_list(cmdq); /* * clear the interrupt with resend bit enabled, * MSI-X interrupt generates if Completion count > Threshold */ writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr); if (atomic_read(&cmdq->backlog_count)) schedule_work(&cmdq->backlog_qflush); }
linux-master
drivers/crypto/cavium/nitrox/nitrox_reqmgr.c