python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 #include <linux/pci.h> #include <linux/delay.h> #include "nitrox_dev.h" #include "nitrox_hal.h" #include "nitrox_common.h" #include "nitrox_isr.h" #include "nitrox_mbx.h" /** * num_vfs_valid - validate VF count * @num_vfs: number of VF(s) */ static inline bool num_vfs_valid(int num_vfs) { bool valid = false; switch (num_vfs) { case 16: case 32: case 64: case 128: valid = true; break; } return valid; } static inline enum vf_mode num_vfs_to_mode(int num_vfs) { enum vf_mode mode = 0; switch (num_vfs) { case 0: mode = __NDEV_MODE_PF; break; case 16: mode = __NDEV_MODE_VF16; break; case 32: mode = __NDEV_MODE_VF32; break; case 64: mode = __NDEV_MODE_VF64; break; case 128: mode = __NDEV_MODE_VF128; break; } return mode; } static inline int vf_mode_to_nr_queues(enum vf_mode mode) { int nr_queues = 0; switch (mode) { case __NDEV_MODE_PF: nr_queues = MAX_PF_QUEUES; break; case __NDEV_MODE_VF16: nr_queues = 8; break; case __NDEV_MODE_VF32: nr_queues = 4; break; case __NDEV_MODE_VF64: nr_queues = 2; break; case __NDEV_MODE_VF128: nr_queues = 1; break; } return nr_queues; } static void nitrox_pf_cleanup(struct nitrox_device *ndev) { /* PF has no queues in SR-IOV mode */ atomic_set(&ndev->state, __NDEV_NOT_READY); /* unregister crypto algorithms */ nitrox_crypto_unregister(); /* cleanup PF resources */ nitrox_unregister_interrupts(ndev); nitrox_common_sw_cleanup(ndev); } /** * nitrox_pf_reinit - re-initialize PF resources once SR-IOV is disabled * @ndev: NITROX device */ static int nitrox_pf_reinit(struct nitrox_device *ndev) { int err; /* allocate resources for PF */ err = nitrox_common_sw_init(ndev); if (err) return err; err = nitrox_register_interrupts(ndev); if (err) { nitrox_common_sw_cleanup(ndev); return err; } /* configure the AQM queues */ nitrox_config_aqm_rings(ndev); /* configure the packet queues */ nitrox_config_pkt_input_rings(ndev); nitrox_config_pkt_solicit_ports(ndev); /* set device to ready state */ atomic_set(&ndev->state, __NDEV_READY); /* register crypto algorithms */ return nitrox_crypto_register(); } static void nitrox_sriov_cleanup(struct nitrox_device *ndev) { /* unregister interrupts for PF in SR-IOV */ nitrox_sriov_unregister_interrupts(ndev); nitrox_mbox_cleanup(ndev); } static int nitrox_sriov_init(struct nitrox_device *ndev) { int ret; /* register interrupts for PF in SR-IOV */ ret = nitrox_sriov_register_interupts(ndev); if (ret) return ret; ret = nitrox_mbox_init(ndev); if (ret) goto sriov_init_fail; return 0; sriov_init_fail: nitrox_sriov_cleanup(ndev); return ret; } static int nitrox_sriov_enable(struct pci_dev *pdev, int num_vfs) { struct nitrox_device *ndev = pci_get_drvdata(pdev); int err; if (!num_vfs_valid(num_vfs)) { dev_err(DEV(ndev), "Invalid num_vfs %d\n", num_vfs); return -EINVAL; } if (pci_num_vf(pdev) == num_vfs) return num_vfs; err = pci_enable_sriov(pdev, num_vfs); if (err) { dev_err(DEV(ndev), "failed to enable PCI sriov %d\n", err); return err; } dev_info(DEV(ndev), "Enabled VF(s) %d\n", num_vfs); ndev->mode = num_vfs_to_mode(num_vfs); ndev->iov.num_vfs = num_vfs; ndev->iov.max_vf_queues = vf_mode_to_nr_queues(ndev->mode); /* set bit in flags */ set_bit(__NDEV_SRIOV_BIT, &ndev->flags); /* cleanup PF resources */ nitrox_pf_cleanup(ndev); /* PF SR-IOV mode initialization */ err = nitrox_sriov_init(ndev); if (err) goto iov_fail; config_nps_core_vfcfg_mode(ndev, ndev->mode); return num_vfs; iov_fail: pci_disable_sriov(pdev); /* clear bit in flags */ clear_bit(__NDEV_SRIOV_BIT, &ndev->flags); ndev->iov.num_vfs = 0; ndev->mode = __NDEV_MODE_PF; /* reset back to working mode in PF */ nitrox_pf_reinit(ndev); return err; } static int nitrox_sriov_disable(struct pci_dev *pdev) { struct nitrox_device *ndev = pci_get_drvdata(pdev); if (!test_bit(__NDEV_SRIOV_BIT, &ndev->flags)) return 0; if (pci_vfs_assigned(pdev)) { dev_warn(DEV(ndev), "VFs are attached to VM. Can't disable SR-IOV\n"); return -EPERM; } pci_disable_sriov(pdev); /* clear bit in flags */ clear_bit(__NDEV_SRIOV_BIT, &ndev->flags); ndev->iov.num_vfs = 0; ndev->iov.max_vf_queues = 0; ndev->mode = __NDEV_MODE_PF; /* cleanup PF SR-IOV resources */ nitrox_sriov_cleanup(ndev); config_nps_core_vfcfg_mode(ndev, ndev->mode); return nitrox_pf_reinit(ndev); } int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs) { if (!num_vfs) return nitrox_sriov_disable(pdev); return nitrox_sriov_enable(pdev, num_vfs); }
linux-master
drivers/crypto/cavium/nitrox/nitrox_sriov.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/crypto.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/printk.h> #include <crypto/aes.h> #include <crypto/skcipher.h> #include <crypto/scatterwalk.h> #include <crypto/ctr.h> #include <crypto/internal/des.h> #include <crypto/xts.h> #include "nitrox_dev.h" #include "nitrox_common.h" #include "nitrox_req.h" struct nitrox_cipher { const char *name; enum flexi_cipher value; }; /* * supported cipher list */ static const struct nitrox_cipher flexi_cipher_table[] = { { "null", CIPHER_NULL }, { "cbc(des3_ede)", CIPHER_3DES_CBC }, { "ecb(des3_ede)", CIPHER_3DES_ECB }, { "cbc(aes)", CIPHER_AES_CBC }, { "ecb(aes)", CIPHER_AES_ECB }, { "cfb(aes)", CIPHER_AES_CFB }, { "rfc3686(ctr(aes))", CIPHER_AES_CTR }, { "xts(aes)", CIPHER_AES_XTS }, { "cts(cbc(aes))", CIPHER_AES_CBC_CTS }, { NULL, CIPHER_INVALID } }; static enum flexi_cipher flexi_cipher_type(const char *name) { const struct nitrox_cipher *cipher = flexi_cipher_table; while (cipher->name) { if (!strcmp(cipher->name, name)) break; cipher++; } return cipher->value; } static void free_src_sglist(struct skcipher_request *skreq) { struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); kfree(nkreq->src); } static void free_dst_sglist(struct skcipher_request *skreq) { struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); kfree(nkreq->dst); } static void nitrox_skcipher_callback(void *arg, int err) { struct skcipher_request *skreq = arg; free_src_sglist(skreq); free_dst_sglist(skreq); if (err) { pr_err_ratelimited("request failed status 0x%0x\n", err); err = -EINVAL; } skcipher_request_complete(skreq, err); } static void nitrox_cbc_cipher_callback(void *arg, int err) { struct skcipher_request *skreq = arg; struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq); int ivsize = crypto_skcipher_ivsize(cipher); unsigned int start = skreq->cryptlen - ivsize; if (err) { nitrox_skcipher_callback(arg, err); return; } if (nkreq->creq.ctrl.s.arg == ENCRYPT) { scatterwalk_map_and_copy(skreq->iv, skreq->dst, start, ivsize, 0); } else { if (skreq->src != skreq->dst) { scatterwalk_map_and_copy(skreq->iv, skreq->src, start, ivsize, 0); } else { memcpy(skreq->iv, nkreq->iv_out, ivsize); kfree(nkreq->iv_out); } } nitrox_skcipher_callback(arg, err); } static int nitrox_skcipher_init(struct crypto_skcipher *tfm) { struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); struct crypto_ctx_hdr *chdr; /* get the first device */ nctx->ndev = nitrox_get_first_device(); if (!nctx->ndev) return -ENODEV; /* allocate nitrox crypto context */ chdr = crypto_alloc_context(nctx->ndev); if (!chdr) { nitrox_put_device(nctx->ndev); return -ENOMEM; } nctx->callback = nitrox_skcipher_callback; nctx->chdr = chdr; nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + sizeof(struct ctx_hdr)); crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) + sizeof(struct nitrox_kcrypt_request)); return 0; } static int nitrox_cbc_init(struct crypto_skcipher *tfm) { int err; struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); err = nitrox_skcipher_init(tfm); if (err) return err; nctx->callback = nitrox_cbc_cipher_callback; return 0; } static void nitrox_skcipher_exit(struct crypto_skcipher *tfm) { struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm); /* free the nitrox crypto context */ if (nctx->u.ctx_handle) { struct flexi_crypto_context *fctx = nctx->u.fctx; memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys)); memzero_explicit(&fctx->auth, sizeof(struct auth_keys)); crypto_free_context((void *)nctx->chdr); } nitrox_put_device(nctx->ndev); nctx->u.ctx_handle = 0; nctx->ndev = NULL; } static inline int nitrox_skcipher_setkey(struct crypto_skcipher *cipher, int aes_keylen, const u8 *key, unsigned int keylen) { struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher); struct nitrox_crypto_ctx *nctx = crypto_tfm_ctx(tfm); struct flexi_crypto_context *fctx; union fc_ctx_flags *flags; enum flexi_cipher cipher_type; const char *name; name = crypto_tfm_alg_name(tfm); cipher_type = flexi_cipher_type(name); if (unlikely(cipher_type == CIPHER_INVALID)) { pr_err("unsupported cipher: %s\n", name); return -EINVAL; } /* fill crypto context */ fctx = nctx->u.fctx; flags = &fctx->flags; flags->f = 0; flags->w0.cipher_type = cipher_type; flags->w0.aes_keylen = aes_keylen; flags->w0.iv_source = IV_FROM_DPTR; flags->f = cpu_to_be64(*(u64 *)&flags->w0); /* copy the key to context */ memcpy(fctx->crypto.u.key, key, keylen); return 0; } static int nitrox_aes_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { int aes_keylen; aes_keylen = flexi_aes_keylen(keylen); if (aes_keylen < 0) return -EINVAL; return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); } static int alloc_src_sglist(struct skcipher_request *skreq, int ivsize) { struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); int nents = sg_nents(skreq->src) + 1; int ret; /* Allocate buffer to hold IV and input scatterlist array */ ret = alloc_src_req_buf(nkreq, nents, ivsize); if (ret) return ret; nitrox_creq_copy_iv(nkreq->src, skreq->iv, ivsize); nitrox_creq_set_src_sg(nkreq, nents, ivsize, skreq->src, skreq->cryptlen); return 0; } static int alloc_dst_sglist(struct skcipher_request *skreq, int ivsize) { struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); int nents = sg_nents(skreq->dst) + 3; int ret; /* Allocate buffer to hold ORH, COMPLETION and output scatterlist * array */ ret = alloc_dst_req_buf(nkreq, nents); if (ret) return ret; nitrox_creq_set_orh(nkreq); nitrox_creq_set_comp(nkreq); nitrox_creq_set_dst_sg(nkreq, nents, ivsize, skreq->dst, skreq->cryptlen); return 0; } static int nitrox_skcipher_crypt(struct skcipher_request *skreq, bool enc) { struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq); struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher); struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); int ivsize = crypto_skcipher_ivsize(cipher); struct se_crypto_request *creq; int ret; creq = &nkreq->creq; creq->flags = skreq->base.flags; creq->gfp = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; /* fill the request */ creq->ctrl.value = 0; creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; creq->ctrl.s.arg = (enc ? ENCRYPT : DECRYPT); /* param0: length of the data to be encrypted */ creq->gph.param0 = cpu_to_be16(skreq->cryptlen); creq->gph.param1 = 0; /* param2: encryption data offset */ creq->gph.param2 = cpu_to_be16(ivsize); creq->gph.param3 = 0; creq->ctx_handle = nctx->u.ctx_handle; creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); ret = alloc_src_sglist(skreq, ivsize); if (ret) return ret; ret = alloc_dst_sglist(skreq, ivsize); if (ret) { free_src_sglist(skreq); return ret; } /* send the crypto request */ return nitrox_process_se_request(nctx->ndev, creq, nctx->callback, skreq); } static int nitrox_cbc_decrypt(struct skcipher_request *skreq) { struct nitrox_kcrypt_request *nkreq = skcipher_request_ctx(skreq); struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(skreq); int ivsize = crypto_skcipher_ivsize(cipher); gfp_t flags = (skreq->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; unsigned int start = skreq->cryptlen - ivsize; if (skreq->src != skreq->dst) return nitrox_skcipher_crypt(skreq, false); nkreq->iv_out = kmalloc(ivsize, flags); if (!nkreq->iv_out) return -ENOMEM; scatterwalk_map_and_copy(nkreq->iv_out, skreq->src, start, ivsize, 0); return nitrox_skcipher_crypt(skreq, false); } static int nitrox_aes_encrypt(struct skcipher_request *skreq) { return nitrox_skcipher_crypt(skreq, true); } static int nitrox_aes_decrypt(struct skcipher_request *skreq) { return nitrox_skcipher_crypt(skreq, false); } static int nitrox_3des_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { return verify_skcipher_des3_key(cipher, key) ?: nitrox_skcipher_setkey(cipher, 0, key, keylen); } static int nitrox_3des_encrypt(struct skcipher_request *skreq) { return nitrox_skcipher_crypt(skreq, true); } static int nitrox_3des_decrypt(struct skcipher_request *skreq) { return nitrox_skcipher_crypt(skreq, false); } static int nitrox_aes_xts_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher); struct flexi_crypto_context *fctx; int aes_keylen, ret; ret = xts_verify_key(cipher, key, keylen); if (ret) return ret; keylen /= 2; aes_keylen = flexi_aes_keylen(keylen); if (aes_keylen < 0) return -EINVAL; fctx = nctx->u.fctx; /* copy KEY2 */ memcpy(fctx->auth.u.key2, (key + keylen), keylen); return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); } static int nitrox_aes_ctr_rfc3686_setkey(struct crypto_skcipher *cipher, const u8 *key, unsigned int keylen) { struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(cipher); struct flexi_crypto_context *fctx; int aes_keylen; if (keylen < CTR_RFC3686_NONCE_SIZE) return -EINVAL; fctx = nctx->u.fctx; memcpy(fctx->crypto.iv, key + (keylen - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE); keylen -= CTR_RFC3686_NONCE_SIZE; aes_keylen = flexi_aes_keylen(keylen); if (aes_keylen < 0) return -EINVAL; return nitrox_skcipher_setkey(cipher, aes_keylen, key, keylen); } static struct skcipher_alg nitrox_skciphers[] = { { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "n5_cbc(aes)", .cra_priority = PRIO, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = nitrox_aes_setkey, .encrypt = nitrox_aes_encrypt, .decrypt = nitrox_cbc_decrypt, .init = nitrox_cbc_init, .exit = nitrox_skcipher_exit, }, { .base = { .cra_name = "ecb(aes)", .cra_driver_name = "n5_ecb(aes)", .cra_priority = PRIO, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = nitrox_aes_setkey, .encrypt = nitrox_aes_encrypt, .decrypt = nitrox_aes_decrypt, .init = nitrox_skcipher_init, .exit = nitrox_skcipher_exit, }, { .base = { .cra_name = "cfb(aes)", .cra_driver_name = "n5_cfb(aes)", .cra_priority = PRIO, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = nitrox_aes_setkey, .encrypt = nitrox_aes_encrypt, .decrypt = nitrox_aes_decrypt, .init = nitrox_skcipher_init, .exit = nitrox_skcipher_exit, }, { .base = { .cra_name = "xts(aes)", .cra_driver_name = "n5_xts(aes)", .cra_priority = PRIO, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = nitrox_aes_xts_setkey, .encrypt = nitrox_aes_encrypt, .decrypt = nitrox_aes_decrypt, .init = nitrox_skcipher_init, .exit = nitrox_skcipher_exit, }, { .base = { .cra_name = "rfc3686(ctr(aes))", .cra_driver_name = "n5_rfc3686(ctr(aes))", .cra_priority = PRIO, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .ivsize = CTR_RFC3686_IV_SIZE, .init = nitrox_skcipher_init, .exit = nitrox_skcipher_exit, .setkey = nitrox_aes_ctr_rfc3686_setkey, .encrypt = nitrox_aes_encrypt, .decrypt = nitrox_aes_decrypt, }, { .base = { .cra_name = "cts(cbc(aes))", .cra_driver_name = "n5_cts(cbc(aes))", .cra_priority = PRIO, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .setkey = nitrox_aes_setkey, .encrypt = nitrox_aes_encrypt, .decrypt = nitrox_aes_decrypt, .init = nitrox_skcipher_init, .exit = nitrox_skcipher_exit, }, { .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "n5_cbc(des3_ede)", .cra_priority = PRIO, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = nitrox_3des_setkey, .encrypt = nitrox_3des_encrypt, .decrypt = nitrox_cbc_decrypt, .init = nitrox_cbc_init, .exit = nitrox_skcipher_exit, }, { .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "n5_ecb(des3_ede)", .cra_priority = PRIO, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = DES3_EDE_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, .setkey = nitrox_3des_setkey, .encrypt = nitrox_3des_encrypt, .decrypt = nitrox_3des_decrypt, .init = nitrox_skcipher_init, .exit = nitrox_skcipher_exit, } }; int nitrox_register_skciphers(void) { return crypto_register_skciphers(nitrox_skciphers, ARRAY_SIZE(nitrox_skciphers)); } void nitrox_unregister_skciphers(void) { crypto_unregister_skciphers(nitrox_skciphers, ARRAY_SIZE(nitrox_skciphers)); }
linux-master
drivers/crypto/cavium/nitrox/nitrox_skcipher.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/pci.h> #include <linux/printk.h> #include <linux/slab.h> #include "nitrox_dev.h" #include "nitrox_csr.h" #include "nitrox_common.h" #include "nitrox_hal.h" #include "nitrox_isr.h" #include "nitrox_mbx.h" /* * One vector for each type of ring * - NPS packet ring, AQMQ ring and ZQMQ ring */ #define NR_RING_VECTORS 3 #define NR_NON_RING_VECTORS 1 /* base entry for packet ring/port */ #define PKT_RING_MSIX_BASE 0 #define NON_RING_MSIX_BASE 192 /** * nps_pkt_slc_isr - IRQ handler for NPS solicit port * @irq: irq number * @data: argument */ static irqreturn_t nps_pkt_slc_isr(int irq, void *data) { struct nitrox_q_vector *qvec = data; union nps_pkt_slc_cnts slc_cnts; struct nitrox_cmdq *cmdq = qvec->cmdq; slc_cnts.value = readq(cmdq->compl_cnt_csr_addr); /* New packet on SLC output port */ if (slc_cnts.s.slc_int) tasklet_hi_schedule(&qvec->resp_tasklet); return IRQ_HANDLED; } static void clear_nps_core_err_intr(struct nitrox_device *ndev) { u64 value; /* Write 1 to clear */ value = nitrox_read_csr(ndev, NPS_CORE_INT); nitrox_write_csr(ndev, NPS_CORE_INT, value); dev_err_ratelimited(DEV(ndev), "NSP_CORE_INT 0x%016llx\n", value); } static void clear_nps_pkt_err_intr(struct nitrox_device *ndev) { union nps_pkt_int pkt_int; unsigned long value, offset; int i; pkt_int.value = nitrox_read_csr(ndev, NPS_PKT_INT); dev_err_ratelimited(DEV(ndev), "NPS_PKT_INT 0x%016llx\n", pkt_int.value); if (pkt_int.s.slc_err) { offset = NPS_PKT_SLC_ERR_TYPE; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); dev_err_ratelimited(DEV(ndev), "NPS_PKT_SLC_ERR_TYPE 0x%016lx\n", value); offset = NPS_PKT_SLC_RERR_LO; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); /* enable the solicit ports */ for_each_set_bit(i, &value, BITS_PER_LONG) enable_pkt_solicit_port(ndev, i); dev_err_ratelimited(DEV(ndev), "NPS_PKT_SLC_RERR_LO 0x%016lx\n", value); offset = NPS_PKT_SLC_RERR_HI; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); dev_err_ratelimited(DEV(ndev), "NPS_PKT_SLC_RERR_HI 0x%016lx\n", value); } if (pkt_int.s.in_err) { offset = NPS_PKT_IN_ERR_TYPE; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); dev_err_ratelimited(DEV(ndev), "NPS_PKT_IN_ERR_TYPE 0x%016lx\n", value); offset = NPS_PKT_IN_RERR_LO; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); /* enable the input ring */ for_each_set_bit(i, &value, BITS_PER_LONG) enable_pkt_input_ring(ndev, i); dev_err_ratelimited(DEV(ndev), "NPS_PKT_IN_RERR_LO 0x%016lx\n", value); offset = NPS_PKT_IN_RERR_HI; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); dev_err_ratelimited(DEV(ndev), "NPS_PKT_IN_RERR_HI 0x%016lx\n", value); } } static void clear_pom_err_intr(struct nitrox_device *ndev) { u64 value; value = nitrox_read_csr(ndev, POM_INT); nitrox_write_csr(ndev, POM_INT, value); dev_err_ratelimited(DEV(ndev), "POM_INT 0x%016llx\n", value); } static void clear_pem_err_intr(struct nitrox_device *ndev) { u64 value; value = nitrox_read_csr(ndev, PEM0_INT); nitrox_write_csr(ndev, PEM0_INT, value); dev_err_ratelimited(DEV(ndev), "PEM(0)_INT 0x%016llx\n", value); } static void clear_lbc_err_intr(struct nitrox_device *ndev) { union lbc_int lbc_int; u64 value, offset; int i; lbc_int.value = nitrox_read_csr(ndev, LBC_INT); dev_err_ratelimited(DEV(ndev), "LBC_INT 0x%016llx\n", lbc_int.value); if (lbc_int.s.dma_rd_err) { for (i = 0; i < NR_CLUSTERS; i++) { offset = EFL_CORE_VF_ERR_INT0X(i); value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); offset = EFL_CORE_VF_ERR_INT1X(i); value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); } } if (lbc_int.s.cam_soft_err) { dev_err_ratelimited(DEV(ndev), "CAM_SOFT_ERR, invalidating LBC\n"); invalidate_lbc(ndev); } if (lbc_int.s.pref_dat_len_mismatch_err) { offset = LBC_PLM_VF1_64_INT; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); offset = LBC_PLM_VF65_128_INT; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); } if (lbc_int.s.rd_dat_len_mismatch_err) { offset = LBC_ELM_VF1_64_INT; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); offset = LBC_ELM_VF65_128_INT; value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); } nitrox_write_csr(ndev, LBC_INT, lbc_int.value); } static void clear_efl_err_intr(struct nitrox_device *ndev) { int i; for (i = 0; i < NR_CLUSTERS; i++) { union efl_core_int core_int; u64 value, offset; offset = EFL_CORE_INTX(i); core_int.value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, core_int.value); dev_err_ratelimited(DEV(ndev), "ELF_CORE(%d)_INT 0x%016llx\n", i, core_int.value); if (core_int.s.se_err) { offset = EFL_CORE_SE_ERR_INTX(i); value = nitrox_read_csr(ndev, offset); nitrox_write_csr(ndev, offset, value); } } } static void clear_bmi_err_intr(struct nitrox_device *ndev) { u64 value; value = nitrox_read_csr(ndev, BMI_INT); nitrox_write_csr(ndev, BMI_INT, value); dev_err_ratelimited(DEV(ndev), "BMI_INT 0x%016llx\n", value); } static void nps_core_int_tasklet(unsigned long data) { struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data); struct nitrox_device *ndev = qvec->ndev; /* if pf mode do queue recovery */ if (ndev->mode == __NDEV_MODE_PF) { } else { /** * if VF(s) enabled communicate the error information * to VF(s) */ } } /* * nps_core_int_isr - interrupt handler for NITROX errors and * mailbox communication */ static irqreturn_t nps_core_int_isr(int irq, void *data) { struct nitrox_q_vector *qvec = data; struct nitrox_device *ndev = qvec->ndev; union nps_core_int_active core_int; core_int.value = nitrox_read_csr(ndev, NPS_CORE_INT_ACTIVE); if (core_int.s.nps_core) clear_nps_core_err_intr(ndev); if (core_int.s.nps_pkt) clear_nps_pkt_err_intr(ndev); if (core_int.s.pom) clear_pom_err_intr(ndev); if (core_int.s.pem) clear_pem_err_intr(ndev); if (core_int.s.lbc) clear_lbc_err_intr(ndev); if (core_int.s.efl) clear_efl_err_intr(ndev); if (core_int.s.bmi) clear_bmi_err_intr(ndev); /* Mailbox interrupt */ if (core_int.s.mbox) nitrox_pf2vf_mbox_handler(ndev); /* If more work callback the ISR, set resend */ core_int.s.resend = 1; nitrox_write_csr(ndev, NPS_CORE_INT_ACTIVE, core_int.value); return IRQ_HANDLED; } void nitrox_unregister_interrupts(struct nitrox_device *ndev) { struct pci_dev *pdev = ndev->pdev; int i; for (i = 0; i < ndev->num_vecs; i++) { struct nitrox_q_vector *qvec; int vec; qvec = ndev->qvec + i; if (!qvec->valid) continue; /* get the vector number */ vec = pci_irq_vector(pdev, i); irq_set_affinity_hint(vec, NULL); free_irq(vec, qvec); tasklet_disable(&qvec->resp_tasklet); tasklet_kill(&qvec->resp_tasklet); qvec->valid = false; } kfree(ndev->qvec); ndev->qvec = NULL; pci_free_irq_vectors(pdev); } int nitrox_register_interrupts(struct nitrox_device *ndev) { struct pci_dev *pdev = ndev->pdev; struct nitrox_q_vector *qvec; int nr_vecs, vec, cpu; int ret, i; /* * PF MSI-X vectors * * Entry 0: NPS PKT ring 0 * Entry 1: AQMQ ring 0 * Entry 2: ZQM ring 0 * Entry 3: NPS PKT ring 1 * Entry 4: AQMQ ring 1 * Entry 5: ZQM ring 1 * .... * Entry 192: NPS_CORE_INT_ACTIVE */ nr_vecs = pci_msix_vec_count(pdev); if (nr_vecs < 0) { dev_err(DEV(ndev), "Error in getting vec count %d\n", nr_vecs); return nr_vecs; } /* Enable MSI-X */ ret = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX); if (ret < 0) { dev_err(DEV(ndev), "msix vectors %d alloc failed\n", nr_vecs); return ret; } ndev->num_vecs = nr_vecs; ndev->qvec = kcalloc(nr_vecs, sizeof(*qvec), GFP_KERNEL); if (!ndev->qvec) { pci_free_irq_vectors(pdev); return -ENOMEM; } /* request irqs for packet rings/ports */ for (i = PKT_RING_MSIX_BASE; i < (nr_vecs - 1); i += NR_RING_VECTORS) { qvec = &ndev->qvec[i]; qvec->ring = i / NR_RING_VECTORS; if (qvec->ring >= ndev->nr_queues) break; qvec->cmdq = &ndev->pkt_inq[qvec->ring]; snprintf(qvec->name, IRQ_NAMESZ, "nitrox-pkt%d", qvec->ring); /* get the vector number */ vec = pci_irq_vector(pdev, i); ret = request_irq(vec, nps_pkt_slc_isr, 0, qvec->name, qvec); if (ret) { dev_err(DEV(ndev), "irq failed for pkt ring/port%d\n", qvec->ring); goto irq_fail; } cpu = qvec->ring % num_online_cpus(); irq_set_affinity_hint(vec, get_cpu_mask(cpu)); tasklet_init(&qvec->resp_tasklet, pkt_slc_resp_tasklet, (unsigned long)qvec); qvec->valid = true; } /* request irqs for non ring vectors */ i = NON_RING_MSIX_BASE; qvec = &ndev->qvec[i]; qvec->ndev = ndev; snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", i); /* get the vector number */ vec = pci_irq_vector(pdev, i); ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec); if (ret) { dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", i); goto irq_fail; } cpu = num_online_cpus(); irq_set_affinity_hint(vec, get_cpu_mask(cpu)); tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet, (unsigned long)qvec); qvec->valid = true; return 0; irq_fail: nitrox_unregister_interrupts(ndev); return ret; } void nitrox_sriov_unregister_interrupts(struct nitrox_device *ndev) { struct pci_dev *pdev = ndev->pdev; int i; for (i = 0; i < ndev->num_vecs; i++) { struct nitrox_q_vector *qvec; int vec; qvec = ndev->qvec + i; if (!qvec->valid) continue; vec = ndev->iov.msix.vector; irq_set_affinity_hint(vec, NULL); free_irq(vec, qvec); tasklet_disable(&qvec->resp_tasklet); tasklet_kill(&qvec->resp_tasklet); qvec->valid = false; } kfree(ndev->qvec); ndev->qvec = NULL; pci_disable_msix(pdev); } int nitrox_sriov_register_interupts(struct nitrox_device *ndev) { struct pci_dev *pdev = ndev->pdev; struct nitrox_q_vector *qvec; int vec, cpu; int ret; /** * only non ring vectors i.e Entry 192 is available * for PF in SR-IOV mode. */ ndev->iov.msix.entry = NON_RING_MSIX_BASE; ret = pci_enable_msix_exact(pdev, &ndev->iov.msix, NR_NON_RING_VECTORS); if (ret) { dev_err(DEV(ndev), "failed to allocate nps-core-int%d\n", NON_RING_MSIX_BASE); return ret; } qvec = kcalloc(NR_NON_RING_VECTORS, sizeof(*qvec), GFP_KERNEL); if (!qvec) { pci_disable_msix(pdev); return -ENOMEM; } qvec->ndev = ndev; ndev->qvec = qvec; ndev->num_vecs = NR_NON_RING_VECTORS; snprintf(qvec->name, IRQ_NAMESZ, "nitrox-core-int%d", NON_RING_MSIX_BASE); vec = ndev->iov.msix.vector; ret = request_irq(vec, nps_core_int_isr, 0, qvec->name, qvec); if (ret) { dev_err(DEV(ndev), "irq failed for nitrox-core-int%d\n", NON_RING_MSIX_BASE); goto iov_irq_fail; } cpu = num_online_cpus(); irq_set_affinity_hint(vec, get_cpu_mask(cpu)); tasklet_init(&qvec->resp_tasklet, nps_core_int_tasklet, (unsigned long)qvec); qvec->valid = true; return 0; iov_irq_fail: nitrox_sriov_unregister_interrupts(ndev); return ret; }
linux-master
drivers/crypto/cavium/nitrox/nitrox_isr.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/kernel.h> #include <linux/printk.h> #include <linux/crypto.h> #include <linux/rtnetlink.h> #include <crypto/aead.h> #include <crypto/authenc.h> #include <crypto/des.h> #include <crypto/internal/aead.h> #include <crypto/scatterwalk.h> #include <crypto/gcm.h> #include "nitrox_dev.h" #include "nitrox_common.h" #include "nitrox_req.h" #define GCM_AES_SALT_SIZE 4 union gph_p3 { struct { #ifdef __BIG_ENDIAN_BITFIELD u16 iv_offset : 8; u16 auth_offset : 8; #else u16 auth_offset : 8; u16 iv_offset : 8; #endif }; u16 param; }; static int nitrox_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { int aes_keylen; struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); struct flexi_crypto_context *fctx; union fc_ctx_flags flags; aes_keylen = flexi_aes_keylen(keylen); if (aes_keylen < 0) return -EINVAL; /* fill crypto context */ fctx = nctx->u.fctx; flags.fu = be64_to_cpu(fctx->flags.f); flags.w0.aes_keylen = aes_keylen; fctx->flags.f = cpu_to_be64(flags.fu); /* copy enc key to context */ memset(&fctx->crypto, 0, sizeof(fctx->crypto)); memcpy(fctx->crypto.u.key, key, keylen); return 0; } static int nitrox_aead_setauthsize(struct crypto_aead *aead, unsigned int authsize) { struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); struct flexi_crypto_context *fctx = nctx->u.fctx; union fc_ctx_flags flags; flags.fu = be64_to_cpu(fctx->flags.f); flags.w0.mac_len = authsize; fctx->flags.f = cpu_to_be64(flags.fu); aead->authsize = authsize; return 0; } static int nitrox_aes_gcm_setauthsize(struct crypto_aead *aead, unsigned int authsize) { switch (authsize) { case 4: case 8: case 12: case 13: case 14: case 15: case 16: break; default: return -EINVAL; } return nitrox_aead_setauthsize(aead, authsize); } static int alloc_src_sglist(struct nitrox_kcrypt_request *nkreq, struct scatterlist *src, char *iv, int ivsize, int buflen) { int nents = sg_nents_for_len(src, buflen); int ret; if (nents < 0) return nents; /* IV entry */ nents += 1; /* Allocate buffer to hold IV and input scatterlist array */ ret = alloc_src_req_buf(nkreq, nents, ivsize); if (ret) return ret; nitrox_creq_copy_iv(nkreq->src, iv, ivsize); nitrox_creq_set_src_sg(nkreq, nents, ivsize, src, buflen); return 0; } static int alloc_dst_sglist(struct nitrox_kcrypt_request *nkreq, struct scatterlist *dst, int ivsize, int buflen) { int nents = sg_nents_for_len(dst, buflen); int ret; if (nents < 0) return nents; /* IV, ORH, COMPLETION entries */ nents += 3; /* Allocate buffer to hold ORH, COMPLETION and output scatterlist * array */ ret = alloc_dst_req_buf(nkreq, nents); if (ret) return ret; nitrox_creq_set_orh(nkreq); nitrox_creq_set_comp(nkreq); nitrox_creq_set_dst_sg(nkreq, nents, ivsize, dst, buflen); return 0; } static void free_src_sglist(struct nitrox_kcrypt_request *nkreq) { kfree(nkreq->src); } static void free_dst_sglist(struct nitrox_kcrypt_request *nkreq) { kfree(nkreq->dst); } static int nitrox_set_creq(struct nitrox_aead_rctx *rctx) { struct se_crypto_request *creq = &rctx->nkreq.creq; union gph_p3 param3; int ret; creq->flags = rctx->flags; creq->gfp = (rctx->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; creq->ctrl.value = 0; creq->opcode = FLEXI_CRYPTO_ENCRYPT_HMAC; creq->ctrl.s.arg = rctx->ctrl_arg; creq->gph.param0 = cpu_to_be16(rctx->cryptlen); creq->gph.param1 = cpu_to_be16(rctx->cryptlen + rctx->assoclen); creq->gph.param2 = cpu_to_be16(rctx->ivsize + rctx->assoclen); param3.iv_offset = 0; param3.auth_offset = rctx->ivsize; creq->gph.param3 = cpu_to_be16(param3.param); creq->ctx_handle = rctx->ctx_handle; creq->ctrl.s.ctxl = sizeof(struct flexi_crypto_context); ret = alloc_src_sglist(&rctx->nkreq, rctx->src, rctx->iv, rctx->ivsize, rctx->srclen); if (ret) return ret; ret = alloc_dst_sglist(&rctx->nkreq, rctx->dst, rctx->ivsize, rctx->dstlen); if (ret) { free_src_sglist(&rctx->nkreq); return ret; } return 0; } static void nitrox_aead_callback(void *arg, int err) { struct aead_request *areq = arg; struct nitrox_aead_rctx *rctx = aead_request_ctx(areq); free_src_sglist(&rctx->nkreq); free_dst_sglist(&rctx->nkreq); if (err) { pr_err_ratelimited("request failed status 0x%0x\n", err); err = -EINVAL; } aead_request_complete(areq, err); } static inline bool nitrox_aes_gcm_assoclen_supported(unsigned int assoclen) { if (assoclen <= 512) return true; return false; } static int nitrox_aes_gcm_enc(struct aead_request *areq) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); struct nitrox_aead_rctx *rctx = aead_request_ctx(areq); struct se_crypto_request *creq = &rctx->nkreq.creq; struct flexi_crypto_context *fctx = nctx->u.fctx; int ret; if (!nitrox_aes_gcm_assoclen_supported(areq->assoclen)) return -EINVAL; memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE); rctx->cryptlen = areq->cryptlen; rctx->assoclen = areq->assoclen; rctx->srclen = areq->assoclen + areq->cryptlen; rctx->dstlen = rctx->srclen + aead->authsize; rctx->iv = &areq->iv[GCM_AES_SALT_SIZE]; rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE; rctx->flags = areq->base.flags; rctx->ctx_handle = nctx->u.ctx_handle; rctx->src = areq->src; rctx->dst = areq->dst; rctx->ctrl_arg = ENCRYPT; ret = nitrox_set_creq(rctx); if (ret) return ret; /* send the crypto request */ return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback, areq); } static int nitrox_aes_gcm_dec(struct aead_request *areq) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); struct nitrox_aead_rctx *rctx = aead_request_ctx(areq); struct se_crypto_request *creq = &rctx->nkreq.creq; struct flexi_crypto_context *fctx = nctx->u.fctx; int ret; if (!nitrox_aes_gcm_assoclen_supported(areq->assoclen)) return -EINVAL; memcpy(fctx->crypto.iv, areq->iv, GCM_AES_SALT_SIZE); rctx->cryptlen = areq->cryptlen - aead->authsize; rctx->assoclen = areq->assoclen; rctx->srclen = areq->cryptlen + areq->assoclen; rctx->dstlen = rctx->srclen - aead->authsize; rctx->iv = &areq->iv[GCM_AES_SALT_SIZE]; rctx->ivsize = GCM_AES_IV_SIZE - GCM_AES_SALT_SIZE; rctx->flags = areq->base.flags; rctx->ctx_handle = nctx->u.ctx_handle; rctx->src = areq->src; rctx->dst = areq->dst; rctx->ctrl_arg = DECRYPT; ret = nitrox_set_creq(rctx); if (ret) return ret; /* send the crypto request */ return nitrox_process_se_request(nctx->ndev, creq, nitrox_aead_callback, areq); } static int nitrox_aead_init(struct crypto_aead *aead) { struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); struct crypto_ctx_hdr *chdr; /* get the first device */ nctx->ndev = nitrox_get_first_device(); if (!nctx->ndev) return -ENODEV; /* allocate nitrox crypto context */ chdr = crypto_alloc_context(nctx->ndev); if (!chdr) { nitrox_put_device(nctx->ndev); return -ENOMEM; } nctx->chdr = chdr; nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr + sizeof(struct ctx_hdr)); nctx->u.fctx->flags.f = 0; return 0; } static int nitrox_gcm_common_init(struct crypto_aead *aead) { int ret; struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); union fc_ctx_flags *flags; ret = nitrox_aead_init(aead); if (ret) return ret; flags = &nctx->u.fctx->flags; flags->w0.cipher_type = CIPHER_AES_GCM; flags->w0.hash_type = AUTH_NULL; flags->w0.iv_source = IV_FROM_DPTR; /* ask microcode to calculate ipad/opad */ flags->w0.auth_input_type = 1; flags->f = cpu_to_be64(flags->fu); return 0; } static int nitrox_aes_gcm_init(struct crypto_aead *aead) { int ret; ret = nitrox_gcm_common_init(aead); if (ret) return ret; crypto_aead_set_reqsize(aead, sizeof(struct aead_request) + sizeof(struct nitrox_aead_rctx)); return 0; } static void nitrox_aead_exit(struct crypto_aead *aead) { struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); /* free the nitrox crypto context */ if (nctx->u.ctx_handle) { struct flexi_crypto_context *fctx = nctx->u.fctx; memzero_explicit(&fctx->crypto, sizeof(struct crypto_keys)); memzero_explicit(&fctx->auth, sizeof(struct auth_keys)); crypto_free_context((void *)nctx->chdr); } nitrox_put_device(nctx->ndev); nctx->u.ctx_handle = 0; nctx->ndev = NULL; } static int nitrox_rfc4106_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); struct flexi_crypto_context *fctx = nctx->u.fctx; int ret; if (keylen < GCM_AES_SALT_SIZE) return -EINVAL; keylen -= GCM_AES_SALT_SIZE; ret = nitrox_aes_gcm_setkey(aead, key, keylen); if (ret) return ret; memcpy(fctx->crypto.iv, key + keylen, GCM_AES_SALT_SIZE); return 0; } static int nitrox_rfc4106_setauthsize(struct crypto_aead *aead, unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } return nitrox_aead_setauthsize(aead, authsize); } static int nitrox_rfc4106_set_aead_rctx_sglist(struct aead_request *areq) { struct nitrox_rfc4106_rctx *rctx = aead_request_ctx_dma(areq); struct nitrox_aead_rctx *aead_rctx = &rctx->base; unsigned int assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE; struct scatterlist *sg; if (areq->assoclen != 16 && areq->assoclen != 20) return -EINVAL; scatterwalk_map_and_copy(rctx->assoc, areq->src, 0, assoclen, 0); sg_init_table(rctx->src, 3); sg_set_buf(rctx->src, rctx->assoc, assoclen); sg = scatterwalk_ffwd(rctx->src + 1, areq->src, areq->assoclen); if (sg != rctx->src + 1) sg_chain(rctx->src, 2, sg); if (areq->src != areq->dst) { sg_init_table(rctx->dst, 3); sg_set_buf(rctx->dst, rctx->assoc, assoclen); sg = scatterwalk_ffwd(rctx->dst + 1, areq->dst, areq->assoclen); if (sg != rctx->dst + 1) sg_chain(rctx->dst, 2, sg); } aead_rctx->src = rctx->src; aead_rctx->dst = (areq->src == areq->dst) ? rctx->src : rctx->dst; return 0; } static void nitrox_rfc4106_callback(void *arg, int err) { struct aead_request *areq = arg; struct nitrox_rfc4106_rctx *rctx = aead_request_ctx_dma(areq); struct nitrox_kcrypt_request *nkreq = &rctx->base.nkreq; free_src_sglist(nkreq); free_dst_sglist(nkreq); if (err) { pr_err_ratelimited("request failed status 0x%0x\n", err); err = -EINVAL; } aead_request_complete(areq, err); } static int nitrox_rfc4106_enc(struct aead_request *areq) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct nitrox_crypto_ctx *nctx = crypto_aead_ctx(aead); struct nitrox_rfc4106_rctx *rctx = aead_request_ctx_dma(areq); struct nitrox_aead_rctx *aead_rctx = &rctx->base; struct se_crypto_request *creq = &aead_rctx->nkreq.creq; int ret; aead_rctx->cryptlen = areq->cryptlen; aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE; aead_rctx->srclen = aead_rctx->assoclen + aead_rctx->cryptlen; aead_rctx->dstlen = aead_rctx->srclen + aead->authsize; aead_rctx->iv = areq->iv; aead_rctx->ivsize = GCM_RFC4106_IV_SIZE; aead_rctx->flags = areq->base.flags; aead_rctx->ctx_handle = nctx->u.ctx_handle; aead_rctx->ctrl_arg = ENCRYPT; ret = nitrox_rfc4106_set_aead_rctx_sglist(areq); if (ret) return ret; ret = nitrox_set_creq(aead_rctx); if (ret) return ret; /* send the crypto request */ return nitrox_process_se_request(nctx->ndev, creq, nitrox_rfc4106_callback, areq); } static int nitrox_rfc4106_dec(struct aead_request *areq) { struct crypto_aead *aead = crypto_aead_reqtfm(areq); struct nitrox_crypto_ctx *nctx = crypto_aead_ctx_dma(aead); struct nitrox_rfc4106_rctx *rctx = aead_request_ctx(areq); struct nitrox_aead_rctx *aead_rctx = &rctx->base; struct se_crypto_request *creq = &aead_rctx->nkreq.creq; int ret; aead_rctx->cryptlen = areq->cryptlen - aead->authsize; aead_rctx->assoclen = areq->assoclen - GCM_RFC4106_IV_SIZE; aead_rctx->srclen = areq->cryptlen - GCM_RFC4106_IV_SIZE + areq->assoclen; aead_rctx->dstlen = aead_rctx->srclen - aead->authsize; aead_rctx->iv = areq->iv; aead_rctx->ivsize = GCM_RFC4106_IV_SIZE; aead_rctx->flags = areq->base.flags; aead_rctx->ctx_handle = nctx->u.ctx_handle; aead_rctx->ctrl_arg = DECRYPT; ret = nitrox_rfc4106_set_aead_rctx_sglist(areq); if (ret) return ret; ret = nitrox_set_creq(aead_rctx); if (ret) return ret; /* send the crypto request */ return nitrox_process_se_request(nctx->ndev, creq, nitrox_rfc4106_callback, areq); } static int nitrox_rfc4106_init(struct crypto_aead *aead) { int ret; ret = nitrox_gcm_common_init(aead); if (ret) return ret; crypto_aead_set_reqsize_dma(aead, sizeof(struct aead_request) + sizeof(struct nitrox_rfc4106_rctx)); return 0; } static struct aead_alg nitrox_aeads[] = { { .base = { .cra_name = "gcm(aes)", .cra_driver_name = "n5_aes_gcm", .cra_priority = PRIO, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .setkey = nitrox_aes_gcm_setkey, .setauthsize = nitrox_aes_gcm_setauthsize, .encrypt = nitrox_aes_gcm_enc, .decrypt = nitrox_aes_gcm_dec, .init = nitrox_aes_gcm_init, .exit = nitrox_aead_exit, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, { .base = { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "n5_rfc4106", .cra_priority = PRIO, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_blocksize = 1, .cra_ctxsize = sizeof(struct nitrox_crypto_ctx), .cra_alignmask = 0, .cra_module = THIS_MODULE, }, .setkey = nitrox_rfc4106_setkey, .setauthsize = nitrox_rfc4106_setauthsize, .encrypt = nitrox_rfc4106_enc, .decrypt = nitrox_rfc4106_dec, .init = nitrox_rfc4106_init, .exit = nitrox_aead_exit, .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, } }; int nitrox_register_aeads(void) { return crypto_register_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads)); } void nitrox_unregister_aeads(void) { crypto_unregister_aeads(nitrox_aeads, ARRAY_SIZE(nitrox_aeads)); }
linux-master
drivers/crypto/cavium/nitrox/nitrox_aead.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/seq_file.h> #include <linux/debugfs.h> #include "nitrox_csr.h" #include "nitrox_debugfs.h" #include "nitrox_dev.h" static int firmware_show(struct seq_file *s, void *v) { struct nitrox_device *ndev = s->private; seq_printf(s, "Version: %s\n", ndev->hw.fw_name[0]); seq_printf(s, "Version: %s\n", ndev->hw.fw_name[1]); return 0; } DEFINE_SHOW_ATTRIBUTE(firmware); static int device_show(struct seq_file *s, void *v) { struct nitrox_device *ndev = s->private; seq_printf(s, "NITROX [%d]\n", ndev->idx); seq_printf(s, " Part Name: %s\n", ndev->hw.partname); seq_printf(s, " Frequency: %d MHz\n", ndev->hw.freq); seq_printf(s, " Device ID: 0x%0x\n", ndev->hw.device_id); seq_printf(s, " Revision ID: 0x%0x\n", ndev->hw.revision_id); seq_printf(s, " Cores: [AE=%u SE=%u ZIP=%u]\n", ndev->hw.ae_cores, ndev->hw.se_cores, ndev->hw.zip_cores); return 0; } DEFINE_SHOW_ATTRIBUTE(device); static int stats_show(struct seq_file *s, void *v) { struct nitrox_device *ndev = s->private; seq_printf(s, "NITROX [%d] Request Statistics\n", ndev->idx); seq_printf(s, " Posted: %llu\n", (u64)atomic64_read(&ndev->stats.posted)); seq_printf(s, " Completed: %llu\n", (u64)atomic64_read(&ndev->stats.completed)); seq_printf(s, " Dropped: %llu\n", (u64)atomic64_read(&ndev->stats.dropped)); return 0; } DEFINE_SHOW_ATTRIBUTE(stats); void nitrox_debugfs_exit(struct nitrox_device *ndev) { debugfs_remove_recursive(ndev->debugfs_dir); ndev->debugfs_dir = NULL; } void nitrox_debugfs_init(struct nitrox_device *ndev) { struct dentry *dir; dir = debugfs_create_dir(KBUILD_MODNAME, NULL); ndev->debugfs_dir = dir; debugfs_create_file("firmware", 0400, dir, ndev, &firmware_fops); debugfs_create_file("device", 0400, dir, ndev, &device_fops); debugfs_create_file("stats", 0400, dir, ndev, &stats_fops); }
linux-master
drivers/crypto/cavium/nitrox/nitrox_debugfs.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/cpumask.h> #include <linux/dma-mapping.h> #include <linux/dmapool.h> #include <linux/delay.h> #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci_regs.h> #include <linux/vmalloc.h> #include <linux/pci.h> #include "nitrox_dev.h" #include "nitrox_common.h" #include "nitrox_req.h" #include "nitrox_csr.h" #define CRYPTO_CTX_SIZE 256 /* packet inuput ring alignments */ #define PKTIN_Q_ALIGN_BYTES 16 /* AQM Queue input alignments */ #define AQM_Q_ALIGN_BYTES 32 static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) { struct nitrox_device *ndev = cmdq->ndev; cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, &cmdq->unalign_dma, GFP_KERNEL); if (!cmdq->unalign_base) return -ENOMEM; cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes); cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma); cmdq->write_idx = 0; spin_lock_init(&cmdq->cmd_qlock); spin_lock_init(&cmdq->resp_qlock); spin_lock_init(&cmdq->backlog_qlock); INIT_LIST_HEAD(&cmdq->response_head); INIT_LIST_HEAD(&cmdq->backlog_head); INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work); atomic_set(&cmdq->pending_count, 0); atomic_set(&cmdq->backlog_count, 0); return 0; } static void nitrox_cmdq_reset(struct nitrox_cmdq *cmdq) { cmdq->write_idx = 0; atomic_set(&cmdq->pending_count, 0); atomic_set(&cmdq->backlog_count, 0); } static void nitrox_cmdq_cleanup(struct nitrox_cmdq *cmdq) { struct nitrox_device *ndev; if (!cmdq) return; if (!cmdq->unalign_base) return; ndev = cmdq->ndev; cancel_work_sync(&cmdq->backlog_qflush); dma_free_coherent(DEV(ndev), cmdq->qsize, cmdq->unalign_base, cmdq->unalign_dma); nitrox_cmdq_reset(cmdq); cmdq->dbell_csr_addr = NULL; cmdq->compl_cnt_csr_addr = NULL; cmdq->unalign_base = NULL; cmdq->base = NULL; cmdq->unalign_dma = 0; cmdq->dma = 0; cmdq->qsize = 0; cmdq->instr_size = 0; } static void nitrox_free_aqm_queues(struct nitrox_device *ndev) { int i; for (i = 0; i < ndev->nr_queues; i++) { nitrox_cmdq_cleanup(ndev->aqmq[i]); kfree_sensitive(ndev->aqmq[i]); ndev->aqmq[i] = NULL; } } static int nitrox_alloc_aqm_queues(struct nitrox_device *ndev) { int i, err; for (i = 0; i < ndev->nr_queues; i++) { struct nitrox_cmdq *cmdq; u64 offset; cmdq = kzalloc_node(sizeof(*cmdq), GFP_KERNEL, ndev->node); if (!cmdq) { err = -ENOMEM; goto aqmq_fail; } cmdq->ndev = ndev; cmdq->qno = i; cmdq->instr_size = sizeof(struct aqmq_command_s); /* AQM Queue Doorbell Counter Register Address */ offset = AQMQ_DRBLX(i); cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset); /* AQM Queue Commands Completed Count Register Address */ offset = AQMQ_CMD_CNTX(i); cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset); err = nitrox_cmdq_init(cmdq, AQM_Q_ALIGN_BYTES); if (err) { kfree_sensitive(cmdq); goto aqmq_fail; } ndev->aqmq[i] = cmdq; } return 0; aqmq_fail: nitrox_free_aqm_queues(ndev); return err; } static void nitrox_free_pktin_queues(struct nitrox_device *ndev) { int i; for (i = 0; i < ndev->nr_queues; i++) { struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i]; nitrox_cmdq_cleanup(cmdq); } kfree(ndev->pkt_inq); ndev->pkt_inq = NULL; } static int nitrox_alloc_pktin_queues(struct nitrox_device *ndev) { int i, err; ndev->pkt_inq = kcalloc_node(ndev->nr_queues, sizeof(struct nitrox_cmdq), GFP_KERNEL, ndev->node); if (!ndev->pkt_inq) return -ENOMEM; for (i = 0; i < ndev->nr_queues; i++) { struct nitrox_cmdq *cmdq; u64 offset; cmdq = &ndev->pkt_inq[i]; cmdq->ndev = ndev; cmdq->qno = i; cmdq->instr_size = sizeof(struct nps_pkt_instr); /* packet input ring doorbell address */ offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i); cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset); /* packet solicit port completion count address */ offset = NPS_PKT_SLC_CNTSX(i); cmdq->compl_cnt_csr_addr = NITROX_CSR_ADDR(ndev, offset); err = nitrox_cmdq_init(cmdq, PKTIN_Q_ALIGN_BYTES); if (err) goto pktq_fail; } return 0; pktq_fail: nitrox_free_pktin_queues(ndev); return err; } static int create_crypto_dma_pool(struct nitrox_device *ndev) { size_t size; /* Crypto context pool, 16 byte aligned */ size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr); ndev->ctx_pool = dma_pool_create("nitrox-context", DEV(ndev), size, 16, 0); if (!ndev->ctx_pool) return -ENOMEM; return 0; } static void destroy_crypto_dma_pool(struct nitrox_device *ndev) { if (!ndev->ctx_pool) return; dma_pool_destroy(ndev->ctx_pool); ndev->ctx_pool = NULL; } /* * crypto_alloc_context - Allocate crypto context from pool * @ndev: NITROX Device */ void *crypto_alloc_context(struct nitrox_device *ndev) { struct ctx_hdr *ctx; struct crypto_ctx_hdr *chdr; void *vaddr; dma_addr_t dma; chdr = kmalloc(sizeof(*chdr), GFP_KERNEL); if (!chdr) return NULL; vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma); if (!vaddr) { kfree(chdr); return NULL; } /* fill meta data */ ctx = vaddr; ctx->pool = ndev->ctx_pool; ctx->dma = dma; ctx->ctx_dma = dma + sizeof(struct ctx_hdr); chdr->pool = ndev->ctx_pool; chdr->dma = dma; chdr->vaddr = vaddr; return chdr; } /** * crypto_free_context - Free crypto context to pool * @ctx: context to free */ void crypto_free_context(void *ctx) { struct crypto_ctx_hdr *ctxp; if (!ctx) return; ctxp = ctx; dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma); kfree(ctxp); } /** * nitrox_common_sw_init - allocate software resources. * @ndev: NITROX device * * Allocates crypto context pools and command queues etc. * * Return: 0 on success, or a negative error code on error. */ int nitrox_common_sw_init(struct nitrox_device *ndev) { int err = 0; /* per device crypto context pool */ err = create_crypto_dma_pool(ndev); if (err) return err; err = nitrox_alloc_pktin_queues(ndev); if (err) destroy_crypto_dma_pool(ndev); err = nitrox_alloc_aqm_queues(ndev); if (err) { nitrox_free_pktin_queues(ndev); destroy_crypto_dma_pool(ndev); } return err; } /** * nitrox_common_sw_cleanup - free software resources. * @ndev: NITROX device */ void nitrox_common_sw_cleanup(struct nitrox_device *ndev) { nitrox_free_aqm_queues(ndev); nitrox_free_pktin_queues(ndev); destroy_crypto_dma_pool(ndev); }
linux-master
drivers/crypto/cavium/nitrox/nitrox_lib.c
/***********************license start************************************ * Copyright (c) 2003-2017 Cavium, Inc. * All rights reserved. * * License: one of 'Cavium License' or 'GNU General Public License Version 2' * * This file is provided under the terms of the Cavium License (see below) * or under the terms of GNU General Public License, Version 2, as * published by the Free Software Foundation. When using or redistributing * this file, you may do so under either license. * * Cavium License: Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the following * conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * * Neither the name of Cavium Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * This Software, including technical data, may be subject to U.S. export * control laws, including the U.S. Export Administration Act and its * associated regulations, and may be subject to export or import * regulations in other countries. * * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES * WITH YOU. ***********************license end**************************************/ #include <linux/delay.h> #include <linux/sched.h> #include "common.h" #include "zip_inflate.h" static int prepare_inflate_zcmd(struct zip_operation *zip_ops, struct zip_state *s, union zip_inst_s *zip_cmd) { union zip_zres_s *result_ptr = &s->result; memset(zip_cmd, 0, sizeof(s->zip_cmd)); memset(result_ptr, 0, sizeof(s->result)); /* IWORD#0 */ /* Decompression History Gather list - no gather list */ zip_cmd->s.hg = 0; /* For decompression, CE must be 0x0. */ zip_cmd->s.ce = 0; /* For decompression, SS must be 0x0. */ zip_cmd->s.ss = 0; /* For decompression, SF should always be set. */ zip_cmd->s.sf = 1; /* Begin File */ if (zip_ops->begin_file == 0) zip_cmd->s.bf = 0; else zip_cmd->s.bf = 1; zip_cmd->s.ef = 1; /* 0: for Deflate decompression, 3: for LZS decompression */ zip_cmd->s.cc = zip_ops->ccode; /* IWORD #1*/ /* adler checksum */ zip_cmd->s.adlercrc32 = zip_ops->csum; /* * HISTORYLENGTH must be 0x0 for any ZIP decompress operation. * History data is added to a decompression operation via IWORD3. */ zip_cmd->s.historylength = 0; zip_cmd->s.ds = 0; /* IWORD # 8 and 9 - Output pointer */ zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output); zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len; /* Maximum number of output-stream bytes that can be written */ zip_cmd->s.totaloutputlength = zip_ops->output_len; zip_dbg("Data Direct Input case "); /* IWORD # 6 and 7 - input pointer */ zip_cmd->s.dg = 0; zip_cmd->s.inp_ptr_addr.s.addr = __pa((u8 *)zip_ops->input); zip_cmd->s.inp_ptr_ctl.s.length = zip_ops->input_len; /* IWORD # 10 and 11 - Result pointer */ zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr); /* Clearing completion code */ result_ptr->s.compcode = 0; /* Returning 0 for time being.*/ return 0; } /** * zip_inflate - API to offload inflate operation to hardware * @zip_ops: Pointer to zip operation structure * @s: Pointer to the structure representing zip state * @zip_dev: Pointer to zip device structure * * This function prepares the zip inflate command and submits it to the zip * engine for processing. * * Return: 0 if successful or error code */ int zip_inflate(struct zip_operation *zip_ops, struct zip_state *s, struct zip_device *zip_dev) { union zip_inst_s *zip_cmd = &s->zip_cmd; union zip_zres_s *result_ptr = &s->result; u32 queue; /* Prepare inflate zip command */ prepare_inflate_zcmd(zip_ops, s, zip_cmd); atomic64_add(zip_ops->input_len, &zip_dev->stats.decomp_in_bytes); /* Load inflate command to zip queue and ring the doorbell */ queue = zip_load_instr(zip_cmd, zip_dev); /* Decompression requests submitted stats update */ atomic64_inc(&zip_dev->stats.decomp_req_submit); /* Wait for completion or error */ zip_poll_result(result_ptr); /* Decompression requests completed stats update */ atomic64_inc(&zip_dev->stats.decomp_req_complete); zip_ops->compcode = result_ptr->s.compcode; switch (zip_ops->compcode) { case ZIP_CMD_NOTDONE: zip_dbg("Zip Instruction not yet completed\n"); return ZIP_ERROR; case ZIP_CMD_SUCCESS: zip_dbg("Zip Instruction completed successfully\n"); break; case ZIP_CMD_DYNAMIC_STOP: zip_dbg(" Dynamic stop Initiated\n"); break; default: zip_dbg("Instruction failed. Code = %d\n", zip_ops->compcode); atomic64_inc(&zip_dev->stats.decomp_bad_reqs); zip_update_cmd_bufs(zip_dev, queue); return ZIP_ERROR; } zip_update_cmd_bufs(zip_dev, queue); if ((zip_ops->ccode == 3) && (zip_ops->flush == 4) && (zip_ops->compcode != ZIP_CMD_DYNAMIC_STOP)) result_ptr->s.ef = 1; zip_ops->csum = result_ptr->s.adler32; atomic64_add(result_ptr->s.totalbyteswritten, &zip_dev->stats.decomp_out_bytes); if (zip_ops->output_len < result_ptr->s.totalbyteswritten) { zip_err("output_len (%d) < total bytes written (%d)\n", zip_ops->output_len, result_ptr->s.totalbyteswritten); zip_ops->output_len = 0; } else { zip_ops->output_len = result_ptr->s.totalbyteswritten; } zip_ops->bytes_read = result_ptr->s.totalbytesread; zip_ops->bits_processed = result_ptr->s.totalbitsprocessed; zip_ops->end_file = result_ptr->s.ef; if (zip_ops->end_file) { switch (zip_ops->format) { case RAW_FORMAT: zip_dbg("RAW Format: %d ", zip_ops->format); /* Get checksum from engine */ zip_ops->csum = result_ptr->s.adler32; break; case ZLIB_FORMAT: zip_dbg("ZLIB Format: %d ", zip_ops->format); zip_ops->csum = result_ptr->s.adler32; break; case GZIP_FORMAT: zip_dbg("GZIP Format: %d ", zip_ops->format); zip_ops->csum = result_ptr->s.crc32; break; case LZS_FORMAT: zip_dbg("LZS Format: %d ", zip_ops->format); break; default: zip_err("Format error:%d\n", zip_ops->format); } } return 0; }
linux-master
drivers/crypto/cavium/zip/zip_inflate.c
/***********************license start************************************ * Copyright (c) 2003-2017 Cavium, Inc. * All rights reserved. * * License: one of 'Cavium License' or 'GNU General Public License Version 2' * * This file is provided under the terms of the Cavium License (see below) * or under the terms of GNU General Public License, Version 2, as * published by the Free Software Foundation. When using or redistributing * this file, you may do so under either license. * * Cavium License: Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the following * conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * * Neither the name of Cavium Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * This Software, including technical data, may be subject to U.S. export * control laws, including the U.S. Export Administration Act and its * associated regulations, and may be subject to export or import * regulations in other countries. * * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES * WITH YOU. ***********************license end**************************************/ #include <linux/delay.h> #include <linux/sched.h> #include "common.h" #include "zip_deflate.h" /* Prepares the deflate zip command */ static int prepare_zip_command(struct zip_operation *zip_ops, struct zip_state *s, union zip_inst_s *zip_cmd) { union zip_zres_s *result_ptr = &s->result; memset(zip_cmd, 0, sizeof(s->zip_cmd)); memset(result_ptr, 0, sizeof(s->result)); /* IWORD #0 */ /* History gather */ zip_cmd->s.hg = 0; /* compression enable = 1 for deflate */ zip_cmd->s.ce = 1; /* sf (sync flush) */ zip_cmd->s.sf = 1; /* ef (end of file) */ if (zip_ops->flush == ZIP_FLUSH_FINISH) { zip_cmd->s.ef = 1; zip_cmd->s.sf = 0; } zip_cmd->s.cc = zip_ops->ccode; /* ss (compression speed/storage) */ zip_cmd->s.ss = zip_ops->speed; /* IWORD #1 */ /* adler checksum */ zip_cmd->s.adlercrc32 = zip_ops->csum; zip_cmd->s.historylength = zip_ops->history_len; zip_cmd->s.dg = 0; /* IWORD # 6 and 7 - compression input/history pointer */ zip_cmd->s.inp_ptr_addr.s.addr = __pa(zip_ops->input); zip_cmd->s.inp_ptr_ctl.s.length = (zip_ops->input_len + zip_ops->history_len); zip_cmd->s.ds = 0; /* IWORD # 8 and 9 - Output pointer */ zip_cmd->s.out_ptr_addr.s.addr = __pa(zip_ops->output); zip_cmd->s.out_ptr_ctl.s.length = zip_ops->output_len; /* maximum number of output-stream bytes that can be written */ zip_cmd->s.totaloutputlength = zip_ops->output_len; /* IWORD # 10 and 11 - Result pointer */ zip_cmd->s.res_ptr_addr.s.addr = __pa(result_ptr); /* Clearing completion code */ result_ptr->s.compcode = 0; return 0; } /** * zip_deflate - API to offload deflate operation to hardware * @zip_ops: Pointer to zip operation structure * @s: Pointer to the structure representing zip state * @zip_dev: Pointer to zip device structure * * This function prepares the zip deflate command and submits it to the zip * engine for processing. * * Return: 0 if successful or error code */ int zip_deflate(struct zip_operation *zip_ops, struct zip_state *s, struct zip_device *zip_dev) { union zip_inst_s *zip_cmd = &s->zip_cmd; union zip_zres_s *result_ptr = &s->result; u32 queue; /* Prepares zip command based on the input parameters */ prepare_zip_command(zip_ops, s, zip_cmd); atomic64_add(zip_ops->input_len, &zip_dev->stats.comp_in_bytes); /* Loads zip command into command queues and rings door bell */ queue = zip_load_instr(zip_cmd, zip_dev); /* Stats update for compression requests submitted */ atomic64_inc(&zip_dev->stats.comp_req_submit); /* Wait for completion or error */ zip_poll_result(result_ptr); /* Stats update for compression requests completed */ atomic64_inc(&zip_dev->stats.comp_req_complete); zip_ops->compcode = result_ptr->s.compcode; switch (zip_ops->compcode) { case ZIP_CMD_NOTDONE: zip_dbg("Zip instruction not yet completed"); return ZIP_ERROR; case ZIP_CMD_SUCCESS: zip_dbg("Zip instruction completed successfully"); zip_update_cmd_bufs(zip_dev, queue); break; case ZIP_CMD_DTRUNC: zip_dbg("Output Truncate error"); /* Returning ZIP_ERROR to avoid copy to user */ return ZIP_ERROR; default: zip_err("Zip instruction failed. Code:%d", zip_ops->compcode); return ZIP_ERROR; } /* Update the CRC depending on the format */ switch (zip_ops->format) { case RAW_FORMAT: zip_dbg("RAW Format: %d ", zip_ops->format); /* Get checksum from engine, need to feed it again */ zip_ops->csum = result_ptr->s.adler32; break; case ZLIB_FORMAT: zip_dbg("ZLIB Format: %d ", zip_ops->format); zip_ops->csum = result_ptr->s.adler32; break; case GZIP_FORMAT: zip_dbg("GZIP Format: %d ", zip_ops->format); zip_ops->csum = result_ptr->s.crc32; break; case LZS_FORMAT: zip_dbg("LZS Format: %d ", zip_ops->format); break; default: zip_err("Unknown Format:%d\n", zip_ops->format); } atomic64_add(result_ptr->s.totalbyteswritten, &zip_dev->stats.comp_out_bytes); /* Update output_len */ if (zip_ops->output_len < result_ptr->s.totalbyteswritten) { /* Dynamic stop && strm->output_len < zipconstants[onfsize] */ zip_err("output_len (%d) < total bytes written(%d)\n", zip_ops->output_len, result_ptr->s.totalbyteswritten); zip_ops->output_len = 0; } else { zip_ops->output_len = result_ptr->s.totalbyteswritten; } return 0; }
linux-master
drivers/crypto/cavium/zip/zip_deflate.c
/***********************license start************************************ * Copyright (c) 2003-2017 Cavium, Inc. * All rights reserved. * * License: one of 'Cavium License' or 'GNU General Public License Version 2' * * This file is provided under the terms of the Cavium License (see below) * or under the terms of GNU General Public License, Version 2, as * published by the Free Software Foundation. When using or redistributing * this file, you may do so under either license. * * Cavium License: Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the following * conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * * Neither the name of Cavium Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * This Software, including technical data, may be subject to U.S. export * control laws, including the U.S. Export Administration Act and its * associated regulations, and may be subject to export or import * regulations in other countries. * * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES * WITH YOU. ***********************license end**************************************/ #include <linux/types.h> #include <linux/vmalloc.h> #include "common.h" /** * zip_cmd_qbuf_alloc - Allocates a cmd buffer for ZIP Instruction Queue * @zip: Pointer to zip device structure * @q: Queue number to allocate bufffer to * Return: 0 if successful, -ENOMEM otherwise */ int zip_cmd_qbuf_alloc(struct zip_device *zip, int q) { zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA), get_order(ZIP_CMD_QBUF_SIZE)); if (!zip->iq[q].sw_head) return -ENOMEM; memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE); zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head); return 0; } /** * zip_cmd_qbuf_free - Frees the cmd Queue buffer * @zip: Pointer to zip device structure * @q: Queue number to free buffer of */ void zip_cmd_qbuf_free(struct zip_device *zip, int q) { zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail); free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE)); } /** * zip_data_buf_alloc - Allocates memory for a data bufffer * @size: Size of the buffer to allocate * Returns: Pointer to the buffer allocated */ u8 *zip_data_buf_alloc(u64 size) { u8 *ptr; ptr = (u8 *)__get_free_pages((GFP_KERNEL | GFP_DMA), get_order(size)); if (!ptr) return NULL; memset(ptr, 0, size); zip_dbg("Data buffer allocation success\n"); return ptr; } /** * zip_data_buf_free - Frees the memory of a data buffer * @ptr: Pointer to the buffer * @size: Buffer size */ void zip_data_buf_free(u8 *ptr, u64 size) { zip_dbg("Freeing data buffer 0x%lx\n", ptr); free_pages((u64)ptr, get_order(size)); }
linux-master
drivers/crypto/cavium/zip/zip_mem.c
/***********************license start************************************ * Copyright (c) 2003-2017 Cavium, Inc. * All rights reserved. * * License: one of 'Cavium License' or 'GNU General Public License Version 2' * * This file is provided under the terms of the Cavium License (see below) * or under the terms of GNU General Public License, Version 2, as * published by the Free Software Foundation. When using or redistributing * this file, you may do so under either license. * * Cavium License: Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the following * conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * * Neither the name of Cavium Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * This Software, including technical data, may be subject to U.S. export * control laws, including the U.S. Export Administration Act and its * associated regulations, and may be subject to export or import * regulations in other countries. * * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES * WITH YOU. ***********************license end**************************************/ #include "zip_crypto.h" static void zip_static_init_zip_ops(struct zip_operation *zip_ops, int lzs_flag) { zip_ops->flush = ZIP_FLUSH_FINISH; /* equivalent to level 6 of opensource zlib */ zip_ops->speed = 1; if (!lzs_flag) { zip_ops->ccode = 0; /* Auto Huffman */ zip_ops->lzs_flag = 0; zip_ops->format = ZLIB_FORMAT; } else { zip_ops->ccode = 3; /* LZS Encoding */ zip_ops->lzs_flag = 1; zip_ops->format = LZS_FORMAT; } zip_ops->begin_file = 1; zip_ops->history_len = 0; zip_ops->end_file = 1; zip_ops->compcode = 0; zip_ops->csum = 1; /* Adler checksum desired */ } static int zip_ctx_init(struct zip_kernel_ctx *zip_ctx, int lzs_flag) { struct zip_operation *comp_ctx = &zip_ctx->zip_comp; struct zip_operation *decomp_ctx = &zip_ctx->zip_decomp; zip_static_init_zip_ops(comp_ctx, lzs_flag); zip_static_init_zip_ops(decomp_ctx, lzs_flag); comp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE); if (!comp_ctx->input) return -ENOMEM; comp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE); if (!comp_ctx->output) goto err_comp_input; decomp_ctx->input = zip_data_buf_alloc(MAX_INPUT_BUFFER_SIZE); if (!decomp_ctx->input) goto err_comp_output; decomp_ctx->output = zip_data_buf_alloc(MAX_OUTPUT_BUFFER_SIZE); if (!decomp_ctx->output) goto err_decomp_input; return 0; err_decomp_input: zip_data_buf_free(decomp_ctx->input, MAX_INPUT_BUFFER_SIZE); err_comp_output: zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE); err_comp_input: zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE); return -ENOMEM; } static void zip_ctx_exit(struct zip_kernel_ctx *zip_ctx) { struct zip_operation *comp_ctx = &zip_ctx->zip_comp; struct zip_operation *dec_ctx = &zip_ctx->zip_decomp; zip_data_buf_free(comp_ctx->input, MAX_INPUT_BUFFER_SIZE); zip_data_buf_free(comp_ctx->output, MAX_OUTPUT_BUFFER_SIZE); zip_data_buf_free(dec_ctx->input, MAX_INPUT_BUFFER_SIZE); zip_data_buf_free(dec_ctx->output, MAX_OUTPUT_BUFFER_SIZE); } static int zip_compress(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, struct zip_kernel_ctx *zip_ctx) { struct zip_operation *zip_ops = NULL; struct zip_state *zip_state; struct zip_device *zip = NULL; int ret; if (!zip_ctx || !src || !dst || !dlen) return -ENOMEM; zip = zip_get_device(zip_get_node_id()); if (!zip) return -ENODEV; zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC); if (!zip_state) return -ENOMEM; zip_ops = &zip_ctx->zip_comp; zip_ops->input_len = slen; zip_ops->output_len = *dlen; memcpy(zip_ops->input, src, slen); ret = zip_deflate(zip_ops, zip_state, zip); if (!ret) { *dlen = zip_ops->output_len; memcpy(dst, zip_ops->output, *dlen); } kfree(zip_state); return ret; } static int zip_decompress(const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, struct zip_kernel_ctx *zip_ctx) { struct zip_operation *zip_ops = NULL; struct zip_state *zip_state; struct zip_device *zip = NULL; int ret; if (!zip_ctx || !src || !dst || !dlen) return -ENOMEM; zip = zip_get_device(zip_get_node_id()); if (!zip) return -ENODEV; zip_state = kzalloc(sizeof(*zip_state), GFP_ATOMIC); if (!zip_state) return -ENOMEM; zip_ops = &zip_ctx->zip_decomp; memcpy(zip_ops->input, src, slen); /* Work around for a bug in zlib which needs an extra bytes sometimes */ if (zip_ops->ccode != 3) /* Not LZS Encoding */ zip_ops->input[slen++] = 0; zip_ops->input_len = slen; zip_ops->output_len = *dlen; ret = zip_inflate(zip_ops, zip_state, zip); if (!ret) { *dlen = zip_ops->output_len; memcpy(dst, zip_ops->output, *dlen); } kfree(zip_state); return ret; } /* Legacy Compress framework start */ int zip_alloc_comp_ctx_deflate(struct crypto_tfm *tfm) { struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); return zip_ctx_init(zip_ctx, 0); } int zip_alloc_comp_ctx_lzs(struct crypto_tfm *tfm) { struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); return zip_ctx_init(zip_ctx, 1); } void zip_free_comp_ctx(struct crypto_tfm *tfm) { struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); zip_ctx_exit(zip_ctx); } int zip_comp_compress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); return zip_compress(src, slen, dst, dlen, zip_ctx); } int zip_comp_decompress(struct crypto_tfm *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen) { struct zip_kernel_ctx *zip_ctx = crypto_tfm_ctx(tfm); return zip_decompress(src, slen, dst, dlen, zip_ctx); } /* Legacy compress framework end */ /* SCOMP framework start */ void *zip_alloc_scomp_ctx_deflate(struct crypto_scomp *tfm) { int ret; struct zip_kernel_ctx *zip_ctx; zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL); if (!zip_ctx) return ERR_PTR(-ENOMEM); ret = zip_ctx_init(zip_ctx, 0); if (ret) { kfree_sensitive(zip_ctx); return ERR_PTR(ret); } return zip_ctx; } void *zip_alloc_scomp_ctx_lzs(struct crypto_scomp *tfm) { int ret; struct zip_kernel_ctx *zip_ctx; zip_ctx = kzalloc(sizeof(*zip_ctx), GFP_KERNEL); if (!zip_ctx) return ERR_PTR(-ENOMEM); ret = zip_ctx_init(zip_ctx, 1); if (ret) { kfree_sensitive(zip_ctx); return ERR_PTR(ret); } return zip_ctx; } void zip_free_scomp_ctx(struct crypto_scomp *tfm, void *ctx) { struct zip_kernel_ctx *zip_ctx = ctx; zip_ctx_exit(zip_ctx); kfree_sensitive(zip_ctx); } int zip_scomp_compress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { struct zip_kernel_ctx *zip_ctx = ctx; return zip_compress(src, slen, dst, dlen, zip_ctx); } int zip_scomp_decompress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) { struct zip_kernel_ctx *zip_ctx = ctx; return zip_decompress(src, slen, dst, dlen, zip_ctx); } /* SCOMP framework end */
linux-master
drivers/crypto/cavium/zip/zip_crypto.c
/***********************license start************************************ * Copyright (c) 2003-2017 Cavium, Inc. * All rights reserved. * * License: one of 'Cavium License' or 'GNU General Public License Version 2' * * This file is provided under the terms of the Cavium License (see below) * or under the terms of GNU General Public License, Version 2, as * published by the Free Software Foundation. When using or redistributing * this file, you may do so under either license. * * Cavium License: Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the following * conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * * Neither the name of Cavium Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * This Software, including technical data, may be subject to U.S. export * control laws, including the U.S. Export Administration Act and its * associated regulations, and may be subject to export or import * regulations in other countries. * * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES * WITH YOU. ***********************license end**************************************/ #include "common.h" #include "zip_crypto.h" #define DRV_NAME "ThunderX-ZIP" static struct zip_device *zip_dev[MAX_ZIP_DEVICES]; static const struct pci_device_id zip_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDERX_ZIP) }, { 0, } }; static void zip_debugfs_init(void); static void zip_debugfs_exit(void); static int zip_register_compression_device(void); static void zip_unregister_compression_device(void); void zip_reg_write(u64 val, u64 __iomem *addr) { writeq(val, addr); } u64 zip_reg_read(u64 __iomem *addr) { return readq(addr); } /* * Allocates new ZIP device structure * Returns zip_device pointer or NULL if cannot allocate memory for zip_device */ static struct zip_device *zip_alloc_device(struct pci_dev *pdev) { struct zip_device *zip = NULL; int idx; for (idx = 0; idx < MAX_ZIP_DEVICES; idx++) { if (!zip_dev[idx]) break; } /* To ensure that the index is within the limit */ if (idx < MAX_ZIP_DEVICES) zip = devm_kzalloc(&pdev->dev, sizeof(*zip), GFP_KERNEL); if (!zip) return NULL; zip_dev[idx] = zip; zip->index = idx; return zip; } /** * zip_get_device - Get ZIP device based on node id of cpu * * @node: Node id of the current cpu * Return: Pointer to Zip device structure */ struct zip_device *zip_get_device(int node) { if ((node < MAX_ZIP_DEVICES) && (node >= 0)) return zip_dev[node]; zip_err("ZIP device not found for node id %d\n", node); return NULL; } /** * zip_get_node_id - Get the node id of the current cpu * * Return: Node id of the current cpu */ int zip_get_node_id(void) { return cpu_to_node(raw_smp_processor_id()); } /* Initializes the ZIP h/w sub-system */ static int zip_init_hw(struct zip_device *zip) { union zip_cmd_ctl cmd_ctl; union zip_constants constants; union zip_que_ena que_ena; union zip_quex_map que_map; union zip_que_pri que_pri; union zip_quex_sbuf_addr que_sbuf_addr; union zip_quex_sbuf_ctl que_sbuf_ctl; int q = 0; /* Enable the ZIP Engine(Core) Clock */ cmd_ctl.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CMD_CTL); cmd_ctl.s.forceclk = 1; zip_reg_write(cmd_ctl.u_reg64 & 0xFF, (zip->reg_base + ZIP_CMD_CTL)); zip_msg("ZIP_CMD_CTL : 0x%016llx", zip_reg_read(zip->reg_base + ZIP_CMD_CTL)); constants.u_reg64 = zip_reg_read(zip->reg_base + ZIP_CONSTANTS); zip->depth = constants.s.depth; zip->onfsize = constants.s.onfsize; zip->ctxsize = constants.s.ctxsize; zip_msg("depth: 0x%016llx , onfsize : 0x%016llx , ctxsize : 0x%016llx", zip->depth, zip->onfsize, zip->ctxsize); /* * Program ZIP_QUE(0..7)_SBUF_ADDR and ZIP_QUE(0..7)_SBUF_CTL to * have the correct buffer pointer and size configured for each * instruction queue. */ for (q = 0; q < ZIP_NUM_QUEUES; q++) { que_sbuf_ctl.u_reg64 = 0ull; que_sbuf_ctl.s.size = (ZIP_CMD_QBUF_SIZE / sizeof(u64)); que_sbuf_ctl.s.inst_be = 0; que_sbuf_ctl.s.stream_id = 0; zip_reg_write(que_sbuf_ctl.u_reg64, (zip->reg_base + ZIP_QUEX_SBUF_CTL(q))); zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q, zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q))); } for (q = 0; q < ZIP_NUM_QUEUES; q++) { memset(&zip->iq[q], 0x0, sizeof(struct zip_iq)); spin_lock_init(&zip->iq[q].lock); if (zip_cmd_qbuf_alloc(zip, q)) { while (q != 0) { q--; zip_cmd_qbuf_free(zip, q); } return -ENOMEM; } /* Initialize tail ptr to head */ zip->iq[q].sw_tail = zip->iq[q].sw_head; zip->iq[q].hw_tail = zip->iq[q].sw_head; /* Write the physical addr to register */ que_sbuf_addr.u_reg64 = 0ull; que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >> ZIP_128B_ALIGN); zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q, (u64)que_sbuf_addr.s.ptr); zip_reg_write(que_sbuf_addr.u_reg64, (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q))); zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q, zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q))); zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx", zip->iq[q].sw_head, zip->iq[q].sw_tail, zip->iq[q].hw_tail); zip_dbg("sw_head phy addr : 0x%lx", que_sbuf_addr.s.ptr); } /* * Queue-to-ZIP core mapping * If a queue is not mapped to a particular core, it is equivalent to * the ZIP core being disabled. */ que_ena.u_reg64 = 0x0ull; /* Enabling queues based on ZIP_NUM_QUEUES */ for (q = 0; q < ZIP_NUM_QUEUES; q++) que_ena.s.ena |= (0x1 << q); zip_reg_write(que_ena.u_reg64, (zip->reg_base + ZIP_QUE_ENA)); zip_msg("QUE_ENA : 0x%016llx", zip_reg_read(zip->reg_base + ZIP_QUE_ENA)); for (q = 0; q < ZIP_NUM_QUEUES; q++) { que_map.u_reg64 = 0ull; /* Mapping each queue to two ZIP cores */ que_map.s.zce = 0x3; zip_reg_write(que_map.u_reg64, (zip->reg_base + ZIP_QUEX_MAP(q))); zip_msg("QUE_MAP(%d) : 0x%016llx", q, zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q))); } que_pri.u_reg64 = 0ull; for (q = 0; q < ZIP_NUM_QUEUES; q++) que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */ zip_reg_write(que_pri.u_reg64, (zip->reg_base + ZIP_QUE_PRI)); zip_msg("QUE_PRI %016llx", zip_reg_read(zip->reg_base + ZIP_QUE_PRI)); return 0; } static void zip_reset(struct zip_device *zip) { union zip_cmd_ctl cmd_ctl; cmd_ctl.u_reg64 = 0x0ull; cmd_ctl.s.reset = 1; /* Forces ZIP cores to do reset */ zip_reg_write(cmd_ctl.u_reg64, (zip->reg_base + ZIP_CMD_CTL)); } static int zip_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; struct zip_device *zip = NULL; int err; zip = zip_alloc_device(pdev); if (!zip) return -ENOMEM; dev_info(dev, "Found ZIP device %d %x:%x on Node %d\n", zip->index, pdev->vendor, pdev->device, dev_to_node(dev)); pci_set_drvdata(pdev, zip); zip->pdev = pdev; err = pci_enable_device(pdev); if (err) { dev_err(dev, "Failed to enable PCI device"); goto err_free_device; } err = pci_request_regions(pdev, DRV_NAME); if (err) { dev_err(dev, "PCI request regions failed 0x%x", err); goto err_disable_device; } err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "Unable to get usable 48-bit DMA configuration\n"); goto err_release_regions; } /* MAP configuration registers */ zip->reg_base = pci_ioremap_bar(pdev, PCI_CFG_ZIP_PF_BAR0); if (!zip->reg_base) { dev_err(dev, "ZIP: Cannot map BAR0 CSR memory space, aborting"); err = -ENOMEM; goto err_release_regions; } /* Initialize ZIP Hardware */ err = zip_init_hw(zip); if (err) goto err_release_regions; /* Register with the Kernel Crypto Interface */ err = zip_register_compression_device(); if (err < 0) { zip_err("ZIP: Kernel Crypto Registration failed\n"); goto err_register; } /* comp-decomp statistics are handled with debugfs interface */ zip_debugfs_init(); return 0; err_register: zip_reset(zip); err_release_regions: if (zip->reg_base) iounmap(zip->reg_base); pci_release_regions(pdev); err_disable_device: pci_disable_device(pdev); err_free_device: pci_set_drvdata(pdev, NULL); /* Remove zip_dev from zip_device list, free the zip_device memory */ zip_dev[zip->index] = NULL; devm_kfree(dev, zip); return err; } static void zip_remove(struct pci_dev *pdev) { struct zip_device *zip = pci_get_drvdata(pdev); int q = 0; if (!zip) return; zip_debugfs_exit(); zip_unregister_compression_device(); if (zip->reg_base) { zip_reset(zip); iounmap(zip->reg_base); } pci_release_regions(pdev); pci_disable_device(pdev); /* * Free Command Queue buffers. This free should be called for all * the enabled Queues. */ for (q = 0; q < ZIP_NUM_QUEUES; q++) zip_cmd_qbuf_free(zip, q); pci_set_drvdata(pdev, NULL); /* remove zip device from zip device list */ zip_dev[zip->index] = NULL; } /* PCI Sub-System Interface */ static struct pci_driver zip_driver = { .name = DRV_NAME, .id_table = zip_id_table, .probe = zip_probe, .remove = zip_remove, }; /* Kernel Crypto Subsystem Interface */ static struct crypto_alg zip_comp_deflate = { .cra_name = "deflate", .cra_driver_name = "deflate-cavium", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct zip_kernel_ctx), .cra_priority = 300, .cra_module = THIS_MODULE, .cra_init = zip_alloc_comp_ctx_deflate, .cra_exit = zip_free_comp_ctx, .cra_u = { .compress = { .coa_compress = zip_comp_compress, .coa_decompress = zip_comp_decompress } } }; static struct crypto_alg zip_comp_lzs = { .cra_name = "lzs", .cra_driver_name = "lzs-cavium", .cra_flags = CRYPTO_ALG_TYPE_COMPRESS, .cra_ctxsize = sizeof(struct zip_kernel_ctx), .cra_priority = 300, .cra_module = THIS_MODULE, .cra_init = zip_alloc_comp_ctx_lzs, .cra_exit = zip_free_comp_ctx, .cra_u = { .compress = { .coa_compress = zip_comp_compress, .coa_decompress = zip_comp_decompress } } }; static struct scomp_alg zip_scomp_deflate = { .alloc_ctx = zip_alloc_scomp_ctx_deflate, .free_ctx = zip_free_scomp_ctx, .compress = zip_scomp_compress, .decompress = zip_scomp_decompress, .base = { .cra_name = "deflate", .cra_driver_name = "deflate-scomp-cavium", .cra_module = THIS_MODULE, .cra_priority = 300, } }; static struct scomp_alg zip_scomp_lzs = { .alloc_ctx = zip_alloc_scomp_ctx_lzs, .free_ctx = zip_free_scomp_ctx, .compress = zip_scomp_compress, .decompress = zip_scomp_decompress, .base = { .cra_name = "lzs", .cra_driver_name = "lzs-scomp-cavium", .cra_module = THIS_MODULE, .cra_priority = 300, } }; static int zip_register_compression_device(void) { int ret; ret = crypto_register_alg(&zip_comp_deflate); if (ret < 0) { zip_err("Deflate algorithm registration failed\n"); return ret; } ret = crypto_register_alg(&zip_comp_lzs); if (ret < 0) { zip_err("LZS algorithm registration failed\n"); goto err_unregister_alg_deflate; } ret = crypto_register_scomp(&zip_scomp_deflate); if (ret < 0) { zip_err("Deflate scomp algorithm registration failed\n"); goto err_unregister_alg_lzs; } ret = crypto_register_scomp(&zip_scomp_lzs); if (ret < 0) { zip_err("LZS scomp algorithm registration failed\n"); goto err_unregister_scomp_deflate; } return ret; err_unregister_scomp_deflate: crypto_unregister_scomp(&zip_scomp_deflate); err_unregister_alg_lzs: crypto_unregister_alg(&zip_comp_lzs); err_unregister_alg_deflate: crypto_unregister_alg(&zip_comp_deflate); return ret; } static void zip_unregister_compression_device(void) { crypto_unregister_alg(&zip_comp_deflate); crypto_unregister_alg(&zip_comp_lzs); crypto_unregister_scomp(&zip_scomp_deflate); crypto_unregister_scomp(&zip_scomp_lzs); } /* * debugfs functions */ #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> /* Displays ZIP device statistics */ static int zip_stats_show(struct seq_file *s, void *unused) { u64 val = 0ull; u64 avg_chunk = 0ull, avg_cr = 0ull; u32 q = 0; int index = 0; struct zip_device *zip; struct zip_stats *st; for (index = 0; index < MAX_ZIP_DEVICES; index++) { u64 pending = 0; if (zip_dev[index]) { zip = zip_dev[index]; st = &zip->stats; /* Get all the pending requests */ for (q = 0; q < ZIP_NUM_QUEUES; q++) { val = zip_reg_read((zip->reg_base + ZIP_DBG_QUEX_STA(q))); pending += val >> 32 & 0xffffff; } val = atomic64_read(&st->comp_req_complete); avg_chunk = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0; val = atomic64_read(&st->comp_out_bytes); avg_cr = (val) ? atomic64_read(&st->comp_in_bytes) / val : 0; seq_printf(s, " ZIP Device %d Stats\n" "-----------------------------------\n" "Comp Req Submitted : \t%lld\n" "Comp Req Completed : \t%lld\n" "Compress In Bytes : \t%lld\n" "Compressed Out Bytes : \t%lld\n" "Average Chunk size : \t%llu\n" "Average Compression ratio : \t%llu\n" "Decomp Req Submitted : \t%lld\n" "Decomp Req Completed : \t%lld\n" "Decompress In Bytes : \t%lld\n" "Decompressed Out Bytes : \t%lld\n" "Decompress Bad requests : \t%lld\n" "Pending Req : \t%lld\n" "---------------------------------\n", index, (u64)atomic64_read(&st->comp_req_submit), (u64)atomic64_read(&st->comp_req_complete), (u64)atomic64_read(&st->comp_in_bytes), (u64)atomic64_read(&st->comp_out_bytes), avg_chunk, avg_cr, (u64)atomic64_read(&st->decomp_req_submit), (u64)atomic64_read(&st->decomp_req_complete), (u64)atomic64_read(&st->decomp_in_bytes), (u64)atomic64_read(&st->decomp_out_bytes), (u64)atomic64_read(&st->decomp_bad_reqs), pending); } } return 0; } /* Clears stats data */ static int zip_clear_show(struct seq_file *s, void *unused) { int index = 0; for (index = 0; index < MAX_ZIP_DEVICES; index++) { if (zip_dev[index]) { memset(&zip_dev[index]->stats, 0, sizeof(struct zip_stats)); seq_printf(s, "Cleared stats for zip %d\n", index); } } return 0; } static struct zip_registers zipregs[64] = { {"ZIP_CMD_CTL ", 0x0000ull}, {"ZIP_THROTTLE ", 0x0010ull}, {"ZIP_CONSTANTS ", 0x00A0ull}, {"ZIP_QUE0_MAP ", 0x1400ull}, {"ZIP_QUE1_MAP ", 0x1408ull}, {"ZIP_QUE_ENA ", 0x0500ull}, {"ZIP_QUE_PRI ", 0x0508ull}, {"ZIP_QUE0_DONE ", 0x2000ull}, {"ZIP_QUE1_DONE ", 0x2008ull}, {"ZIP_QUE0_DOORBELL ", 0x4000ull}, {"ZIP_QUE1_DOORBELL ", 0x4008ull}, {"ZIP_QUE0_SBUF_ADDR ", 0x1000ull}, {"ZIP_QUE1_SBUF_ADDR ", 0x1008ull}, {"ZIP_QUE0_SBUF_CTL ", 0x1200ull}, {"ZIP_QUE1_SBUF_CTL ", 0x1208ull}, { NULL, 0} }; /* Prints registers' contents */ static int zip_regs_show(struct seq_file *s, void *unused) { u64 val = 0; int i = 0, index = 0; for (index = 0; index < MAX_ZIP_DEVICES; index++) { if (zip_dev[index]) { seq_printf(s, "--------------------------------\n" " ZIP Device %d Registers\n" "--------------------------------\n", index); i = 0; while (zipregs[i].reg_name) { val = zip_reg_read((zip_dev[index]->reg_base + zipregs[i].reg_offset)); seq_printf(s, "%s: 0x%016llx\n", zipregs[i].reg_name, val); i++; } } } return 0; } DEFINE_SHOW_ATTRIBUTE(zip_stats); DEFINE_SHOW_ATTRIBUTE(zip_clear); DEFINE_SHOW_ATTRIBUTE(zip_regs); /* Root directory for thunderx_zip debugfs entry */ static struct dentry *zip_debugfs_root; static void zip_debugfs_init(void) { if (!debugfs_initialized()) return; zip_debugfs_root = debugfs_create_dir("thunderx_zip", NULL); /* Creating files for entries inside thunderx_zip directory */ debugfs_create_file("zip_stats", 0444, zip_debugfs_root, NULL, &zip_stats_fops); debugfs_create_file("zip_clear", 0444, zip_debugfs_root, NULL, &zip_clear_fops); debugfs_create_file("zip_regs", 0444, zip_debugfs_root, NULL, &zip_regs_fops); } static void zip_debugfs_exit(void) { debugfs_remove_recursive(zip_debugfs_root); } #else static void __init zip_debugfs_init(void) { } static void __exit zip_debugfs_exit(void) { } #endif /* debugfs - end */ module_pci_driver(zip_driver); MODULE_AUTHOR("Cavium Inc"); MODULE_DESCRIPTION("Cavium Inc ThunderX ZIP Driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(pci, zip_id_table);
linux-master
drivers/crypto/cavium/zip/zip_main.c
/***********************license start************************************ * Copyright (c) 2003-2017 Cavium, Inc. * All rights reserved. * * License: one of 'Cavium License' or 'GNU General Public License Version 2' * * This file is provided under the terms of the Cavium License (see below) * or under the terms of GNU General Public License, Version 2, as * published by the Free Software Foundation. When using or redistributing * this file, you may do so under either license. * * Cavium License: Redistribution and use in source and binary forms, with * or without modification, are permitted provided that the following * conditions are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * * Neither the name of Cavium Inc. nor the names of its contributors may be * used to endorse or promote products derived from this software without * specific prior written permission. * * This Software, including technical data, may be subject to U.S. export * control laws, including the U.S. Export Administration Act and its * associated regulations, and may be subject to export or import * regulations in other countries. * * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) * WARRANTIES OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A * PARTICULAR PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET * ENJOYMENT, QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE * ENTIRE RISK ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES * WITH YOU. ***********************license end**************************************/ #include "common.h" #include "zip_deflate.h" /** * zip_cmd_queue_consumed - Calculates the space consumed in the command queue. * * @zip_dev: Pointer to zip device structure * @queue: Queue number * * Return: Bytes consumed in the command queue buffer. */ static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue) { return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * sizeof(u64 *)); } /** * zip_load_instr - Submits the instruction into the ZIP command queue * @instr: Pointer to the instruction to be submitted * @zip_dev: Pointer to ZIP device structure to which the instruction is to * be submitted * * This function copies the ZIP instruction to the command queue and rings the * doorbell to notify the engine of the instruction submission. The command * queue is maintained in a circular fashion. When there is space for exactly * one instruction in the queue, next chunk pointer of the queue is made to * point to the head of the queue, thus maintaining a circular queue. * * Return: Queue number to which the instruction was submitted */ u32 zip_load_instr(union zip_inst_s *instr, struct zip_device *zip_dev) { union zip_quex_doorbell dbell; u32 queue = 0; u32 consumed = 0; u64 *ncb_ptr = NULL; union zip_nptr_s ncp; /* * Distribute the instructions between the enabled queues based on * the CPU id. */ if (raw_smp_processor_id() % 2 == 0) queue = 0; else queue = 1; zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue); /* Take cmd buffer lock */ spin_lock(&zip_dev->iq[queue].lock); /* * Command Queue implementation * 1. If there is place for new instructions, push the cmd at sw_head. * 2. If there is place for exactly one instruction, push the new cmd * at the sw_head. Make sw_head point to the sw_tail to make it * circular. Write sw_head's physical address to the "Next-Chunk * Buffer Ptr" to make it cmd_hw_tail. * 3. Ring the door bell. */ zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head); zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail); consumed = zip_cmd_queue_consumed(zip_dev, queue); /* Check if there is space to push just one cmd */ if ((consumed + 128) == (ZIP_CMD_QBUF_SIZE - 8)) { zip_dbg("Cmd queue space available for single command"); /* Space for one cmd, pust it and make it circular queue */ memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr, sizeof(union zip_inst_s)); zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */ /* Now, point the "Next-Chunk Buffer Ptr" to sw_head */ ncb_ptr = zip_dev->iq[queue].sw_head; zip_dbg("ncb addr :0x%lx sw_head addr :0x%lx", ncb_ptr, zip_dev->iq[queue].sw_head - 16); /* Using Circular command queue */ zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail; /* Mark this buffer for free */ zip_dev->iq[queue].free_flag = 1; /* Write new chunk buffer address at "Next-Chunk Buffer Ptr" */ ncp.u_reg64 = 0ull; ncp.s.addr = __pa(zip_dev->iq[queue].sw_head); *ncb_ptr = ncp.u_reg64; zip_dbg("*ncb_ptr :0x%lx sw_head[phys] :0x%lx", *ncb_ptr, __pa(zip_dev->iq[queue].sw_head)); zip_dev->iq[queue].pend_cnt++; } else { zip_dbg("Enough space is available for commands"); /* Push this cmd to cmd queue buffer */ memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr, sizeof(union zip_inst_s)); zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */ zip_dev->iq[queue].pend_cnt++; } zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx", zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail, zip_dev->iq[queue].hw_tail); zip_dbg(" Pushed the new cmd : pend_cnt : %d", zip_dev->iq[queue].pend_cnt); /* Ring the doorbell */ dbell.u_reg64 = 0ull; dbell.s.dbell_cnt = 1; zip_reg_write(dbell.u_reg64, (zip_dev->reg_base + ZIP_QUEX_DOORBELL(queue))); /* Unlock cmd buffer lock */ spin_unlock(&zip_dev->iq[queue].lock); return queue; } /** * zip_update_cmd_bufs - Updates the queue statistics after posting the * instruction * @zip_dev: Pointer to zip device structure * @queue: Queue number */ void zip_update_cmd_bufs(struct zip_device *zip_dev, u32 queue) { /* Take cmd buffer lock */ spin_lock(&zip_dev->iq[queue].lock); /* Check if the previous buffer can be freed */ if (zip_dev->iq[queue].free_flag == 1) { zip_dbg("Free flag. Free cmd buffer, adjust sw head and tail"); /* Reset the free flag */ zip_dev->iq[queue].free_flag = 0; /* Point the hw_tail to start of the new chunk buffer */ zip_dev->iq[queue].hw_tail = zip_dev->iq[queue].sw_head; } else { zip_dbg("Free flag not set. increment hw tail"); zip_dev->iq[queue].hw_tail += 16; /* 16 64_bit words = 128B */ } zip_dev->iq[queue].done_cnt++; zip_dev->iq[queue].pend_cnt--; zip_dbg("sw_head :0x%lx sw_tail :0x%lx hw_tail :0x%lx", zip_dev->iq[queue].sw_head, zip_dev->iq[queue].sw_tail, zip_dev->iq[queue].hw_tail); zip_dbg(" Got CC : pend_cnt : %d\n", zip_dev->iq[queue].pend_cnt); spin_unlock(&zip_dev->iq[queue].lock); }
linux-master
drivers/crypto/cavium/zip/zip_device.c
// SPDX-License-Identifier: GPL-2.0+ /* * caam - Freescale FSL CAAM support for crypto API * * Copyright 2008-2011 Freescale Semiconductor, Inc. * Copyright 2016-2019, 2023 NXP * * Based on talitos crypto API driver. * * relationship of job descriptors to shared descriptors (SteveC Dec 10 2008): * * --------------- --------------- * | JobDesc #1 |-------------------->| ShareDesc | * | *(packet 1) | | (PDB) | * --------------- |------------->| (hashKey) | * . | | (cipherKey) | * . | |-------->| (operation) | * --------------- | | --------------- * | JobDesc #2 |------| | * | *(packet 2) | | * --------------- | * . | * . | * --------------- | * | JobDesc #3 |------------ * | *(packet 3) | * --------------- * * The SharedDesc never changes for a connection unless rekeyed, but * each packet will likely be in a different place. So all we need * to know to process the packet is where the input is, where the * output goes, and what context we want to process with. Context is * in the SharedDesc, packet references in the JobDesc. * * So, a job desc looks like: * * --------------------- * | Header | * | ShareDesc Pointer | * | SEQ_OUT_PTR | * | (output buffer) | * | (output length) | * | SEQ_IN_PTR | * | (input buffer) | * | (input length) | * --------------------- */ #include "compat.h" #include "regs.h" #include "intern.h" #include "desc_constr.h" #include "jr.h" #include "error.h" #include "sg_sw_sec4.h" #include "key_gen.h" #include "caamalg_desc.h" #include <asm/unaligned.h> #include <crypto/internal/aead.h> #include <crypto/internal/engine.h> #include <crypto/internal/skcipher.h> #include <crypto/xts.h> #include <linux/dma-mapping.h> #include <linux/device.h> #include <linux/err.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> /* * crypto alg */ #define CAAM_CRA_PRIORITY 3000 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ CTR_RFC3686_NONCE_SIZE + \ SHA512_DIGEST_SIZE * 2) #define AEAD_DESC_JOB_IO_LEN (DESC_JOB_IO_LEN + CAAM_CMD_SZ * 2) #define GCM_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ CAAM_CMD_SZ * 4) #define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \ CAAM_CMD_SZ * 5) #define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6) #define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN_MIN) #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) struct caam_alg_entry { int class1_alg_type; int class2_alg_type; bool rfc3686; bool geniv; bool nodkp; }; struct caam_aead_alg { struct aead_engine_alg aead; struct caam_alg_entry caam; bool registered; }; struct caam_skcipher_alg { struct skcipher_engine_alg skcipher; struct caam_alg_entry caam; bool registered; }; /* * per-session context */ struct caam_ctx { u32 sh_desc_enc[DESC_MAX_USED_LEN]; u32 sh_desc_dec[DESC_MAX_USED_LEN]; u8 key[CAAM_MAX_KEY_SIZE]; dma_addr_t sh_desc_enc_dma; dma_addr_t sh_desc_dec_dma; dma_addr_t key_dma; enum dma_data_direction dir; struct device *jrdev; struct alginfo adata; struct alginfo cdata; unsigned int authsize; bool xts_key_fallback; struct crypto_skcipher *fallback; }; struct caam_skcipher_req_ctx { struct skcipher_edesc *edesc; struct skcipher_request fallback_req; }; struct caam_aead_req_ctx { struct aead_edesc *edesc; }; static int aead_null_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 *desc; int rem_bytes = CAAM_DESC_BYTES_MAX - AEAD_DESC_JOB_IO_LEN - ctx->adata.keylen_pad; /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_AEAD_NULL_ENC_LEN) { ctx->adata.key_inline = true; ctx->adata.key_virt = ctx->key; } else { ctx->adata.key_inline = false; ctx->adata.key_dma = ctx->key_dma; } /* aead_encrypt shared descriptor */ desc = ctx->sh_desc_enc; cnstr_shdsc_aead_null_encap(desc, &ctx->adata, ctx->authsize, ctrlpriv->era); dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc), ctx->dir); /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_AEAD_NULL_DEC_LEN) { ctx->adata.key_inline = true; ctx->adata.key_virt = ctx->key; } else { ctx->adata.key_inline = false; ctx->adata.key_dma = ctx->key_dma; } /* aead_decrypt shared descriptor */ desc = ctx->sh_desc_dec; cnstr_shdsc_aead_null_decap(desc, &ctx->adata, ctx->authsize, ctrlpriv->era); dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, desc_bytes(desc), ctx->dir); return 0; } static int aead_set_sh_desc(struct crypto_aead *aead) { struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), struct caam_aead_alg, aead.base); unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 ctx1_iv_off = 0; u32 *desc, *nonce = NULL; u32 inl_mask; unsigned int data_len[2]; const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CTR_MOD128); const bool is_rfc3686 = alg->caam.rfc3686; if (!ctx->authsize) return 0; /* NULL encryption / decryption */ if (!ctx->cdata.keylen) return aead_null_set_sh_desc(aead); /* * AES-CTR needs to load IV in CONTEXT1 reg * at an offset of 128bits (16bytes) * CONTEXT1[255:128] = IV */ if (ctr_mode) ctx1_iv_off = 16; /* * RFC3686 specific: * CONTEXT1[255:128] = {NONCE, IV, COUNTER} */ if (is_rfc3686) { ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); } /* * In case |user key| > |derived key|, using DKP<imm,imm> * would result in invalid opcodes (last bytes of user key) in * the resulting descriptor. Use DKP<ptr,imm> instead => both * virtual and dma key addresses are needed. */ ctx->adata.key_virt = ctx->key; ctx->adata.key_dma = ctx->key_dma; ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; data_len[0] = ctx->adata.keylen_pad; data_len[1] = ctx->cdata.keylen; if (alg->caam.geniv) goto skip_enc; /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ if (desc_inline_query(DESC_AEAD_ENC_LEN + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, ARRAY_SIZE(data_len)) < 0) return -EINVAL; ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); /* aead_encrypt shared descriptor */ desc = ctx->sh_desc_enc; cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, false, ctrlpriv->era); dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc), ctx->dir); skip_enc: /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ if (desc_inline_query(DESC_AEAD_DEC_LEN + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, ARRAY_SIZE(data_len)) < 0) return -EINVAL; ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); /* aead_decrypt shared descriptor */ desc = ctx->sh_desc_dec; cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, alg->caam.geniv, is_rfc3686, nonce, ctx1_iv_off, false, ctrlpriv->era); dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, desc_bytes(desc), ctx->dir); if (!alg->caam.geniv) goto skip_givenc; /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ if (desc_inline_query(DESC_AEAD_GIVENC_LEN + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), AUTHENC_DESC_JOB_IO_LEN, data_len, &inl_mask, ARRAY_SIZE(data_len)) < 0) return -EINVAL; ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); /* aead_givencrypt shared descriptor */ desc = ctx->sh_desc_enc; cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, false, ctrlpriv->era); dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc), ctx->dir); skip_givenc: return 0; } static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); ctx->authsize = authsize; aead_set_sh_desc(authenc); return 0; } static int gcm_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_aead_ivsize(aead); u32 *desc; int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - ctx->cdata.keylen; if (!ctx->cdata.keylen || !ctx->authsize) return 0; /* * AES GCM encrypt shared descriptor * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_GCM_ENC_LEN) { ctx->cdata.key_inline = true; ctx->cdata.key_virt = ctx->key; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } desc = ctx->sh_desc_enc; cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false); dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc), ctx->dir); /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_GCM_DEC_LEN) { ctx->cdata.key_inline = true; ctx->cdata.key_virt = ctx->key; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } desc = ctx->sh_desc_dec; cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false); dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, desc_bytes(desc), ctx->dir); return 0; } static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_gcm_check_authsize(authsize); if (err) return err; ctx->authsize = authsize; gcm_set_sh_desc(authenc); return 0; } static int rfc4106_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_aead_ivsize(aead); u32 *desc; int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - ctx->cdata.keylen; if (!ctx->cdata.keylen || !ctx->authsize) return 0; /* * RFC4106 encrypt shared descriptor * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_RFC4106_ENC_LEN) { ctx->cdata.key_inline = true; ctx->cdata.key_virt = ctx->key; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } desc = ctx->sh_desc_enc; cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false); dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc), ctx->dir); /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_RFC4106_DEC_LEN) { ctx->cdata.key_inline = true; ctx->cdata.key_virt = ctx->key; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } desc = ctx->sh_desc_dec; cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false); dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, desc_bytes(desc), ctx->dir); return 0; } static int rfc4106_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_rfc4106_check_authsize(authsize); if (err) return err; ctx->authsize = authsize; rfc4106_set_sh_desc(authenc); return 0; } static int rfc4543_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_aead_ivsize(aead); u32 *desc; int rem_bytes = CAAM_DESC_BYTES_MAX - GCM_DESC_JOB_IO_LEN - ctx->cdata.keylen; if (!ctx->cdata.keylen || !ctx->authsize) return 0; /* * RFC4543 encrypt shared descriptor * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_RFC4543_ENC_LEN) { ctx->cdata.key_inline = true; ctx->cdata.key_virt = ctx->key; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } desc = ctx->sh_desc_enc; cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, false); dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc), ctx->dir); /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_RFC4543_DEC_LEN) { ctx->cdata.key_inline = true; ctx->cdata.key_virt = ctx->key; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } desc = ctx->sh_desc_dec; cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, false); dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, desc_bytes(desc), ctx->dir); return 0; } static int rfc4543_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); if (authsize != 16) return -EINVAL; ctx->authsize = authsize; rfc4543_set_sh_desc(authenc); return 0; } static int chachapoly_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_aead_ivsize(aead); u32 *desc; if (!ctx->cdata.keylen || !ctx->authsize) return 0; desc = ctx->sh_desc_enc; cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, true, false); dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc), ctx->dir); desc = ctx->sh_desc_dec; cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, false, false); dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, desc_bytes(desc), ctx->dir); return 0; } static int chachapoly_setauthsize(struct crypto_aead *aead, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); if (authsize != POLY1305_DIGEST_SIZE) return -EINVAL; ctx->authsize = authsize; return chachapoly_set_sh_desc(aead); } static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; if (keylen != CHACHA_KEY_SIZE + saltlen) return -EINVAL; ctx->cdata.key_virt = key; ctx->cdata.keylen = keylen - saltlen; return chachapoly_set_sh_desc(aead); } static int aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); struct crypto_authenc_keys keys; int ret = 0; if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto badkey; dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", keys.authkeylen + keys.enckeylen, keys.enckeylen, keys.authkeylen); print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); /* * If DKP is supported, use it in the shared descriptor to generate * the split key. */ if (ctrlpriv->era >= 6) { ctx->adata.keylen = keys.authkeylen; ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & OP_ALG_ALGSEL_MASK); if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) goto badkey; memcpy(ctx->key, keys.authkey, keys.authkeylen); memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + keys.enckeylen, ctx->dir); goto skip_split_key; } ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, keys.authkey, keys.authkeylen, CAAM_MAX_KEY_SIZE - keys.enckeylen); if (ret) { goto badkey; } /* postpend encryption key to auth split key */ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + keys.enckeylen, ctx->dir); print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ctx->adata.keylen_pad + keys.enckeylen, 1); skip_split_key: ctx->cdata.keylen = keys.enckeylen; memzero_explicit(&keys, sizeof(keys)); return aead_set_sh_desc(aead); badkey: memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_authenc_keys keys; int err; err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) return err; err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: aead_setkey(aead, key, keylen); memzero_explicit(&keys, sizeof(keys)); return err; } static int gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; int err; err = aes_check_keylen(keylen); if (err) return err; print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); ctx->cdata.keylen = keylen; return gcm_set_sh_desc(aead); } static int rfc4106_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; int err; err = aes_check_keylen(keylen - 4); if (err) return err; print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); /* * The last four bytes of the key material are used as the salt value * in the nonce. Update the AES key length. */ ctx->cdata.keylen = keylen - 4; dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, ctx->dir); return rfc4106_set_sh_desc(aead); } static int rfc4543_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; int err; err = aes_check_keylen(keylen - 4); if (err) return err; print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); /* * The last four bytes of the key material are used as the salt value * in the nonce. Update the AES key length. */ ctx->cdata.keylen = keylen - 4; dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->cdata.keylen, ctx->dir); return rfc4543_set_sh_desc(aead); } static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen, const u32 ctx1_iv_off) { struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_skcipher_alg *alg = container_of(crypto_skcipher_alg(skcipher), typeof(*alg), skcipher.base); struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_skcipher_ivsize(skcipher); u32 *desc; const bool is_rfc3686 = alg->caam.rfc3686; print_hex_dump_debug("key in @"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; /* skcipher_encrypt shared descriptor */ desc = ctx->sh_desc_enc; cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, ctx1_iv_off); dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc), ctx->dir); /* skcipher_decrypt shared descriptor */ desc = ctx->sh_desc_dec; cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, ctx1_iv_off); dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, desc_bytes(desc), ctx->dir); return 0; } static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { int err; err = aes_check_keylen(keylen); if (err) return err; return skcipher_setkey(skcipher, key, keylen, 0); } static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { u32 ctx1_iv_off; int err; /* * RFC3686 specific: * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} * | *key = {KEY, NONCE} */ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; keylen -= CTR_RFC3686_NONCE_SIZE; err = aes_check_keylen(keylen); if (err) return err; return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { u32 ctx1_iv_off; int err; /* * AES-CTR needs to load IV in CONTEXT1 reg * at an offset of 128bits (16bytes) * CONTEXT1[255:128] = IV */ ctx1_iv_off = 16; err = aes_check_keylen(keylen); if (err) return err; return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } static int des_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { return verify_skcipher_des_key(skcipher, key) ?: skcipher_setkey(skcipher, key, keylen, 0); } static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { return verify_skcipher_des3_key(skcipher, key) ?: skcipher_setkey(skcipher, key, keylen, 0); } static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 *desc; int err; err = xts_verify_key(skcipher, key, keylen); if (err) { dev_dbg(jrdev, "key size mismatch\n"); return err; } if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) ctx->xts_key_fallback = true; if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) { err = crypto_skcipher_setkey(ctx->fallback, key, keylen); if (err) return err; } ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; /* xts_skcipher_encrypt shared descriptor */ desc = ctx->sh_desc_enc; cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata); dma_sync_single_for_device(jrdev, ctx->sh_desc_enc_dma, desc_bytes(desc), ctx->dir); /* xts_skcipher_decrypt shared descriptor */ desc = ctx->sh_desc_dec; cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata); dma_sync_single_for_device(jrdev, ctx->sh_desc_dec_dma, desc_bytes(desc), ctx->dir); return 0; } /* * aead_edesc - s/w-extended aead descriptor * @src_nents: number of segments in input s/w scatterlist * @dst_nents: number of segments in output s/w scatterlist * @mapped_src_nents: number of segments in input h/w link table * @mapped_dst_nents: number of segments in output h/w link table * @sec4_sg_bytes: length of dma mapped sec4_sg space * @bklog: stored to determine if the request needs backlog * @sec4_sg_dma: bus physical mapped address of h/w link table * @sec4_sg: pointer to h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables */ struct aead_edesc { int src_nents; int dst_nents; int mapped_src_nents; int mapped_dst_nents; int sec4_sg_bytes; bool bklog; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; u32 hw_desc[]; }; /* * skcipher_edesc - s/w-extended skcipher descriptor * @src_nents: number of segments in input s/w scatterlist * @dst_nents: number of segments in output s/w scatterlist * @mapped_src_nents: number of segments in input h/w link table * @mapped_dst_nents: number of segments in output h/w link table * @iv_dma: dma address of iv for checking continuity and link table * @sec4_sg_bytes: length of dma mapped sec4_sg space * @bklog: stored to determine if the request needs backlog * @sec4_sg_dma: bus physical mapped address of h/w link table * @sec4_sg: pointer to h/w link table * @hw_desc: the h/w job descriptor followed by any referenced link tables * and IV */ struct skcipher_edesc { int src_nents; int dst_nents; int mapped_src_nents; int mapped_dst_nents; dma_addr_t iv_dma; int sec4_sg_bytes; bool bklog; dma_addr_t sec4_sg_dma; struct sec4_sg_entry *sec4_sg; u32 hw_desc[]; }; static void caam_unmap(struct device *dev, struct scatterlist *src, struct scatterlist *dst, int src_nents, int dst_nents, dma_addr_t iv_dma, int ivsize, dma_addr_t sec4_sg_dma, int sec4_sg_bytes) { if (dst != src) { if (src_nents) dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); if (dst_nents) dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); } else { dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); } if (iv_dma) dma_unmap_single(dev, iv_dma, ivsize, DMA_BIDIRECTIONAL); if (sec4_sg_bytes) dma_unmap_single(dev, sec4_sg_dma, sec4_sg_bytes, DMA_TO_DEVICE); } static void aead_unmap(struct device *dev, struct aead_edesc *edesc, struct aead_request *req) { caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, 0, 0, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); int ivsize = crypto_skcipher_ivsize(skcipher); caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, edesc->iv_dma, ivsize, edesc->sec4_sg_dma, edesc->sec4_sg_bytes); } static void aead_crypt_done(struct device *jrdev, u32 *desc, u32 err, void *context) { struct aead_request *req = context; struct caam_aead_req_ctx *rctx = aead_request_ctx(req); struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); struct aead_edesc *edesc; int ecode = 0; bool has_bklog; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = rctx->edesc; has_bklog = edesc->bklog; if (err) ecode = caam_jr_strstatus(jrdev, err); aead_unmap(jrdev, edesc, req); kfree(edesc); /* * If no backlog flag, the completion of the request is done * by CAAM, not crypto engine. */ if (!has_bklog) aead_request_complete(req, ecode); else crypto_finalize_aead_request(jrp->engine, req, ecode); } static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc) { return PTR_ALIGN((u8 *)edesc->sec4_sg + edesc->sec4_sg_bytes, dma_get_cache_alignment()); } static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err, void *context) { struct skcipher_request *req = context; struct skcipher_edesc *edesc; struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); int ivsize = crypto_skcipher_ivsize(skcipher); int ecode = 0; bool has_bklog; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = rctx->edesc; has_bklog = edesc->bklog; if (err) ecode = caam_jr_strstatus(jrdev, err); skcipher_unmap(jrdev, edesc, req); /* * The crypto API expects us to set the IV (req->iv) to the last * ciphertext block (CBC mode) or last counter (CTR mode). * This is used e.g. by the CTS mode. */ if (ivsize && !ecode) { memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize); print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); } caam_dump_sg("dst @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->dst, edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); kfree(edesc); /* * If no backlog flag, the completion of the request is done * by CAAM, not crypto engine. */ if (!has_bklog) skcipher_request_complete(req, ecode); else crypto_finalize_skcipher_request(jrp->engine, req, ecode); } /* * Fill in aead job descriptor */ static void init_aead_job(struct aead_request *req, struct aead_edesc *edesc, bool all_contig, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); int authsize = ctx->authsize; u32 *desc = edesc->hw_desc; u32 out_options, in_options; dma_addr_t dst_dma, src_dma; int len, sec4_sg_index = 0; dma_addr_t ptr; u32 *sh_desc; sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); if (all_contig) { src_dma = edesc->mapped_src_nents ? sg_dma_address(req->src) : 0; in_options = 0; } else { src_dma = edesc->sec4_sg_dma; sec4_sg_index += edesc->mapped_src_nents; in_options = LDST_SGF; } append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen, in_options); dst_dma = src_dma; out_options = in_options; if (unlikely(req->src != req->dst)) { if (!edesc->mapped_dst_nents) { dst_dma = 0; out_options = 0; } else if (edesc->mapped_dst_nents == 1) { dst_dma = sg_dma_address(req->dst); out_options = 0; } else { dst_dma = edesc->sec4_sg_dma + sec4_sg_index * sizeof(struct sec4_sg_entry); out_options = LDST_SGF; } } if (encrypt) append_seq_out_ptr(desc, dst_dma, req->assoclen + req->cryptlen + authsize, out_options); else append_seq_out_ptr(desc, dst_dma, req->assoclen + req->cryptlen - authsize, out_options); } static void init_gcm_job(struct aead_request *req, struct aead_edesc *edesc, bool all_contig, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); u32 *desc = edesc->hw_desc; bool generic_gcm = (ivsize == GCM_AES_IV_SIZE); unsigned int last; init_aead_job(req, edesc, all_contig, encrypt); append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); /* BUG This should not be specific to generic GCM. */ last = 0; if (encrypt && generic_gcm && !(req->assoclen + req->cryptlen)) last = FIFOLD_TYPE_LAST1; /* Read GCM IV */ append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | GCM_AES_IV_SIZE | last); /* Append Salt */ if (!generic_gcm) append_data(desc, ctx->key + ctx->cdata.keylen, 4); /* Append IV */ append_data(desc, req->iv, ivsize); /* End of blank commands */ } static void init_chachapoly_job(struct aead_request *req, struct aead_edesc *edesc, bool all_contig, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int assoclen = req->assoclen; u32 *desc = edesc->hw_desc; u32 ctx_iv_off = 4; init_aead_job(req, edesc, all_contig, encrypt); if (ivsize != CHACHAPOLY_IV_SIZE) { /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */ ctx_iv_off += 4; /* * The associated data comes already with the IV but we need * to skip it when we authenticate or encrypt... */ assoclen -= ivsize; } append_math_add_imm_u32(desc, REG3, ZERO, IMM, assoclen); /* * For IPsec load the IV further in the same register. * For RFC7539 simply load the 12 bytes nonce in a single operation */ append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | ctx_iv_off << LDST_OFFSET_SHIFT); } static void init_authenc_job(struct aead_request *req, struct aead_edesc *edesc, bool all_contig, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), struct caam_aead_alg, aead.base); unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CTR_MOD128); const bool is_rfc3686 = alg->caam.rfc3686; u32 *desc = edesc->hw_desc; u32 ivoffset = 0; /* * AES-CTR needs to load IV in CONTEXT1 reg * at an offset of 128bits (16bytes) * CONTEXT1[255:128] = IV */ if (ctr_mode) ivoffset = 16; /* * RFC3686 specific: * CONTEXT1[255:128] = {NONCE, IV, COUNTER} */ if (is_rfc3686) ivoffset = 16 + CTR_RFC3686_NONCE_SIZE; init_aead_job(req, edesc, all_contig, encrypt); /* * {REG3, DPOVRD} = assoclen, depending on whether MATH command supports * having DPOVRD as destination. */ if (ctrlpriv->era < 3) append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); else append_math_add_imm_u32(desc, DPOVRD, ZERO, IMM, req->assoclen); if (ivsize && ((is_rfc3686 && encrypt) || !alg->caam.geniv)) append_load_as_imm(desc, req->iv, ivsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | (ivoffset << LDST_OFFSET_SHIFT)); } /* * Fill in skcipher job descriptor */ static void init_skcipher_job(struct skcipher_request *req, struct skcipher_edesc *edesc, const bool encrypt) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *jrdev = ctx->jrdev; int ivsize = crypto_skcipher_ivsize(skcipher); u32 *desc = edesc->hw_desc; u32 *sh_desc; u32 in_options = 0, out_options = 0; dma_addr_t src_dma, dst_dma, ptr; int len, sec4_sg_index = 0; print_hex_dump_debug("presciv@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, ivsize, 1); dev_dbg(jrdev, "asked=%d, cryptlen%d\n", (int)edesc->src_nents > 1 ? 100 : req->cryptlen, req->cryptlen); caam_dump_sg("src @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->src, edesc->src_nents > 1 ? 100 : req->cryptlen, 1); sh_desc = encrypt ? ctx->sh_desc_enc : ctx->sh_desc_dec; ptr = encrypt ? ctx->sh_desc_enc_dma : ctx->sh_desc_dec_dma; len = desc_len(sh_desc); init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); if (ivsize || edesc->mapped_src_nents > 1) { src_dma = edesc->sec4_sg_dma; sec4_sg_index = edesc->mapped_src_nents + !!ivsize; in_options = LDST_SGF; } else { src_dma = sg_dma_address(req->src); } append_seq_in_ptr(desc, src_dma, req->cryptlen + ivsize, in_options); if (likely(req->src == req->dst)) { dst_dma = src_dma + !!ivsize * sizeof(struct sec4_sg_entry); out_options = in_options; } else if (!ivsize && edesc->mapped_dst_nents == 1) { dst_dma = sg_dma_address(req->dst); } else { dst_dma = edesc->sec4_sg_dma + sec4_sg_index * sizeof(struct sec4_sg_entry); out_options = LDST_SGF; } append_seq_out_ptr(desc, dst_dma, req->cryptlen + ivsize, out_options); } /* * allocate and map the aead extended descriptor */ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, int desc_bytes, bool *all_contig_ptr, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; struct caam_aead_req_ctx *rctx = aead_request_ctx(req); gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; int src_len, dst_len = 0; struct aead_edesc *edesc; int sec4_sg_index, sec4_sg_len, sec4_sg_bytes; unsigned int authsize = ctx->authsize; if (unlikely(req->dst != req->src)) { src_len = req->assoclen + req->cryptlen; dst_len = src_len + (encrypt ? authsize : (-authsize)); src_nents = sg_nents_for_len(req->src, src_len); if (unlikely(src_nents < 0)) { dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", src_len); return ERR_PTR(src_nents); } dst_nents = sg_nents_for_len(req->dst, dst_len); if (unlikely(dst_nents < 0)) { dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", dst_len); return ERR_PTR(dst_nents); } } else { src_len = req->assoclen + req->cryptlen + (encrypt ? authsize : 0); src_nents = sg_nents_for_len(req->src, src_len); if (unlikely(src_nents < 0)) { dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", src_len); return ERR_PTR(src_nents); } } if (likely(req->src == req->dst)) { mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, DMA_BIDIRECTIONAL); if (unlikely(!mapped_src_nents)) { dev_err(jrdev, "unable to map source\n"); return ERR_PTR(-ENOMEM); } } else { /* Cover also the case of null (zero length) input data */ if (src_nents) { mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); if (unlikely(!mapped_src_nents)) { dev_err(jrdev, "unable to map source\n"); return ERR_PTR(-ENOMEM); } } else { mapped_src_nents = 0; } /* Cover also the case of null (zero length) output data */ if (dst_nents) { mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, DMA_FROM_DEVICE); if (unlikely(!mapped_dst_nents)) { dev_err(jrdev, "unable to map destination\n"); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return ERR_PTR(-ENOMEM); } } else { mapped_dst_nents = 0; } } /* * HW reads 4 S/G entries at a time; make sure the reads don't go beyond * the end of the table by allocating more S/G entries. */ sec4_sg_len = mapped_src_nents > 1 ? mapped_src_nents : 0; if (mapped_dst_nents > 1) sec4_sg_len += pad_sg_nents(mapped_dst_nents); else sec4_sg_len = pad_sg_nents(sec4_sg_len); sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes, flags); if (!edesc) { caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 0, 0, 0); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->mapped_src_nents = mapped_src_nents; edesc->mapped_dst_nents = mapped_dst_nents; edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) + desc_bytes; rctx->edesc = edesc; *all_contig_ptr = !(mapped_src_nents > 1); sec4_sg_index = 0; if (mapped_src_nents > 1) { sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + sec4_sg_index, 0); sec4_sg_index += mapped_src_nents; } if (mapped_dst_nents > 1) { sg_to_sec4_sg_last(req->dst, dst_len, edesc->sec4_sg + sec4_sg_index, 0); } if (!sec4_sg_bytes) return edesc; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); aead_unmap(jrdev, edesc, req); kfree(edesc); return ERR_PTR(-ENOMEM); } edesc->sec4_sg_bytes = sec4_sg_bytes; return edesc; } static int aead_enqueue_req(struct device *jrdev, struct aead_request *req) { struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); struct caam_aead_req_ctx *rctx = aead_request_ctx(req); struct aead_edesc *edesc = rctx->edesc; u32 *desc = edesc->hw_desc; int ret; /* * Only the backlog request are sent to crypto-engine since the others * can be handled by CAAM, if free, especially since JR has up to 1024 * entries (more than the 10 entries from crypto-engine). */ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) ret = crypto_transfer_aead_request_to_engine(jrpriv->engine, req); else ret = caam_jr_enqueue(jrdev, desc, aead_crypt_done, req); if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { aead_unmap(jrdev, edesc, req); kfree(rctx->edesc); } return ret; } static inline int chachapoly_crypt(struct aead_request *req, bool encrypt) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; bool all_contig; u32 *desc; edesc = aead_edesc_alloc(req, CHACHAPOLY_DESC_JOB_IO_LEN, &all_contig, encrypt); if (IS_ERR(edesc)) return PTR_ERR(edesc); desc = edesc->hw_desc; init_chachapoly_job(req, edesc, all_contig, encrypt); print_hex_dump_debug("chachapoly jobdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return aead_enqueue_req(jrdev, req); } static int chachapoly_encrypt(struct aead_request *req) { return chachapoly_crypt(req, true); } static int chachapoly_decrypt(struct aead_request *req) { return chachapoly_crypt(req, false); } static inline int aead_crypt(struct aead_request *req, bool encrypt) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; bool all_contig; /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, AUTHENC_DESC_JOB_IO_LEN, &all_contig, encrypt); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor */ init_authenc_job(req, edesc, all_contig, encrypt); print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); return aead_enqueue_req(jrdev, req); } static int aead_encrypt(struct aead_request *req) { return aead_crypt(req, true); } static int aead_decrypt(struct aead_request *req) { return aead_crypt(req, false); } static int aead_do_one_req(struct crypto_engine *engine, void *areq) { struct aead_request *req = aead_request_cast(areq); struct caam_ctx *ctx = crypto_aead_ctx_dma(crypto_aead_reqtfm(req)); struct caam_aead_req_ctx *rctx = aead_request_ctx(req); u32 *desc = rctx->edesc->hw_desc; int ret; rctx->edesc->bklog = true; ret = caam_jr_enqueue(ctx->jrdev, desc, aead_crypt_done, req); if (ret == -ENOSPC && engine->retry_support) return ret; if (ret != -EINPROGRESS) { aead_unmap(ctx->jrdev, rctx->edesc, req); kfree(rctx->edesc); } else { ret = 0; } return ret; } static inline int gcm_crypt(struct aead_request *req, bool encrypt) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; bool all_contig; /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, GCM_DESC_JOB_IO_LEN, &all_contig, encrypt); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor */ init_gcm_job(req, edesc, all_contig, encrypt); print_hex_dump_debug("aead jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); return aead_enqueue_req(jrdev, req); } static int gcm_encrypt(struct aead_request *req) { return gcm_crypt(req, true); } static int gcm_decrypt(struct aead_request *req) { return gcm_crypt(req, false); } static int ipsec_gcm_encrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_encrypt(req); } static int ipsec_gcm_decrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ? : gcm_decrypt(req); } /* * allocate and map the skcipher extended descriptor for skcipher */ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, int desc_bytes) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); struct device *jrdev = ctx->jrdev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; struct skcipher_edesc *edesc; dma_addr_t iv_dma = 0; u8 *iv; int ivsize = crypto_skcipher_ivsize(skcipher); int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes; unsigned int aligned_size; src_nents = sg_nents_for_len(req->src, req->cryptlen); if (unlikely(src_nents < 0)) { dev_err(jrdev, "Insufficient bytes (%d) in src S/G\n", req->cryptlen); return ERR_PTR(src_nents); } if (req->dst != req->src) { dst_nents = sg_nents_for_len(req->dst, req->cryptlen); if (unlikely(dst_nents < 0)) { dev_err(jrdev, "Insufficient bytes (%d) in dst S/G\n", req->cryptlen); return ERR_PTR(dst_nents); } } if (likely(req->src == req->dst)) { mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, DMA_BIDIRECTIONAL); if (unlikely(!mapped_src_nents)) { dev_err(jrdev, "unable to map source\n"); return ERR_PTR(-ENOMEM); } } else { mapped_src_nents = dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); if (unlikely(!mapped_src_nents)) { dev_err(jrdev, "unable to map source\n"); return ERR_PTR(-ENOMEM); } mapped_dst_nents = dma_map_sg(jrdev, req->dst, dst_nents, DMA_FROM_DEVICE); if (unlikely(!mapped_dst_nents)) { dev_err(jrdev, "unable to map destination\n"); dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return ERR_PTR(-ENOMEM); } } if (!ivsize && mapped_src_nents == 1) sec4_sg_ents = 0; // no need for an input hw s/g table else sec4_sg_ents = mapped_src_nents + !!ivsize; dst_sg_idx = sec4_sg_ents; /* * Input, output HW S/G tables: [IV, src][dst, IV] * IV entries point to the same buffer * If src == dst, S/G entries are reused (S/G tables overlap) * * HW reads 4 S/G entries at a time; make sure the reads don't go beyond * the end of the table by allocating more S/G entries. Logic: * if (output S/G) * pad output S/G, if needed * else if (input S/G) ... * pad input S/G, if needed */ if (ivsize || mapped_dst_nents > 1) { if (req->src == req->dst) sec4_sg_ents = !!ivsize + pad_sg_nents(sec4_sg_ents); else sec4_sg_ents += pad_sg_nents(mapped_dst_nents + !!ivsize); } else { sec4_sg_ents = pad_sg_nents(sec4_sg_ents); } sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry); /* * allocate space for base edesc and hw desc commands, link tables, IV */ aligned_size = sizeof(*edesc) + desc_bytes + sec4_sg_bytes; aligned_size = ALIGN(aligned_size, dma_get_cache_alignment()); aligned_size += ~(ARCH_KMALLOC_MINALIGN - 1) & (dma_get_cache_alignment() - 1); aligned_size += ALIGN(ivsize, dma_get_cache_alignment()); edesc = kzalloc(aligned_size, flags); if (!edesc) { dev_err(jrdev, "could not allocate extended descriptor\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 0, 0, 0); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->mapped_src_nents = mapped_src_nents; edesc->mapped_dst_nents = mapped_dst_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc + desc_bytes); rctx->edesc = edesc; /* Make sure IV is located in a DMAable area */ if (ivsize) { iv = skcipher_edesc_iv(edesc); memcpy(iv, req->iv, ivsize); iv_dma = dma_map_single(jrdev, iv, ivsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(jrdev, iv_dma)) { dev_err(jrdev, "unable to map IV\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0, 0, 0, 0); kfree(edesc); return ERR_PTR(-ENOMEM); } dma_to_sec4_sg_one(edesc->sec4_sg, iv_dma, ivsize, 0); } if (dst_sg_idx) sg_to_sec4_sg(req->src, req->cryptlen, edesc->sec4_sg + !!ivsize, 0); if (req->src != req->dst && (ivsize || mapped_dst_nents > 1)) sg_to_sec4_sg(req->dst, req->cryptlen, edesc->sec4_sg + dst_sg_idx, 0); if (ivsize) dma_to_sec4_sg_one(edesc->sec4_sg + dst_sg_idx + mapped_dst_nents, iv_dma, ivsize, 0); if (ivsize || mapped_dst_nents > 1) sg_to_sec4_set_last(edesc->sec4_sg + dst_sg_idx + mapped_dst_nents - 1 + !!ivsize); if (sec4_sg_bytes) { edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, iv_dma, ivsize, 0, 0); kfree(edesc); return ERR_PTR(-ENOMEM); } } edesc->iv_dma = iv_dma; print_hex_dump_debug("skcipher sec4_sg@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, sec4_sg_bytes, 1); return edesc; } static int skcipher_do_one_req(struct crypto_engine *engine, void *areq) { struct skcipher_request *req = skcipher_request_cast(areq); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(crypto_skcipher_reqtfm(req)); struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); u32 *desc = rctx->edesc->hw_desc; int ret; rctx->edesc->bklog = true; ret = caam_jr_enqueue(ctx->jrdev, desc, skcipher_crypt_done, req); if (ret == -ENOSPC && engine->retry_support) return ret; if (ret != -EINPROGRESS) { skcipher_unmap(ctx->jrdev, rctx->edesc, req); kfree(rctx->edesc); } else { ret = 0; } return ret; } static inline bool xts_skcipher_ivsize(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); unsigned int ivsize = crypto_skcipher_ivsize(skcipher); return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); } static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) { struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *jrdev = ctx->jrdev; struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 *desc; int ret = 0; /* * XTS is expected to return an error even for input length = 0 * Note that the case input length < block size will be caught during * HW offloading and return an error. */ if (!req->cryptlen && !ctx->fallback) return 0; if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) || ctx->xts_key_fallback)) { struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, req->base.complete, req->base.data); skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, req->cryptlen, req->iv); return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : crypto_skcipher_decrypt(&rctx->fallback_req); } /* allocate extended descriptor */ edesc = skcipher_edesc_alloc(req, DESC_JOB_IO_LEN * CAAM_CMD_SZ); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor*/ init_skcipher_job(req, edesc, encrypt); print_hex_dump_debug("skcipher jobdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc, desc_bytes(edesc->hw_desc), 1); desc = edesc->hw_desc; /* * Only the backlog request are sent to crypto-engine since the others * can be handled by CAAM, if free, especially since JR has up to 1024 * entries (more than the 10 entries from crypto-engine). */ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) ret = crypto_transfer_skcipher_request_to_engine(jrpriv->engine, req); else ret = caam_jr_enqueue(jrdev, desc, skcipher_crypt_done, req); if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { skcipher_unmap(jrdev, edesc, req); kfree(edesc); } return ret; } static int skcipher_encrypt(struct skcipher_request *req) { return skcipher_crypt(req, true); } static int skcipher_decrypt(struct skcipher_request *req) { return skcipher_crypt(req, false); } static struct caam_skcipher_alg driver_algs[] = { { .skcipher.base = { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aes_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .skcipher.op = { .do_one_request = skcipher_do_one_req, }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, }, { .skcipher.base = { .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-3des-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, }, .skcipher.op = { .do_one_request = skcipher_do_one_req, }, .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, }, { .skcipher.base = { .base = { .cra_name = "cbc(des)", .cra_driver_name = "cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = des_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, }, .skcipher.op = { .do_one_request = skcipher_do_one_req, }, .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, }, { .skcipher.base = { .base = { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-caam", .cra_blocksize = 1, }, .setkey = ctr_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .chunksize = AES_BLOCK_SIZE, }, .skcipher.op = { .do_one_request = skcipher_do_one_req, }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, }, { .skcipher.base = { .base = { .cra_name = "rfc3686(ctr(aes))", .cra_driver_name = "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = rfc3686_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .ivsize = CTR_RFC3686_IV_SIZE, .chunksize = AES_BLOCK_SIZE, }, .skcipher.op = { .do_one_request = skcipher_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .rfc3686 = true, }, }, { .skcipher.base = { .base = { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-caam", .cra_flags = CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = xts_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .skcipher.op = { .do_one_request = skcipher_do_one_req, }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, }, { .skcipher.base = { .base = { .cra_name = "ecb(des)", .cra_driver_name = "ecb-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = des_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, }, .skcipher.op = { .do_one_request = skcipher_do_one_req, }, .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_ECB, }, { .skcipher.base = { .base = { .cra_name = "ecb(aes)", .cra_driver_name = "ecb-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aes_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, }, .skcipher.op = { .do_one_request = skcipher_do_one_req, }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_ECB, }, { .skcipher.base = { .base = { .cra_name = "ecb(des3_ede)", .cra_driver_name = "ecb-des3-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, }, .skcipher.op = { .do_one_request = skcipher_do_one_req, }, .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_ECB, }, }; static struct caam_aead_alg driver_aeads[] = { { .aead.base = { .base = { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "rfc4106-gcm-aes-caam", .cra_blocksize = 1, }, .setkey = rfc4106_setkey, .setauthsize = rfc4106_setauthsize, .encrypt = ipsec_gcm_encrypt, .decrypt = ipsec_gcm_decrypt, .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, .nodkp = true, }, }, { .aead.base = { .base = { .cra_name = "rfc4543(gcm(aes))", .cra_driver_name = "rfc4543-gcm-aes-caam", .cra_blocksize = 1, }, .setkey = rfc4543_setkey, .setauthsize = rfc4543_setauthsize, .encrypt = ipsec_gcm_encrypt, .decrypt = ipsec_gcm_decrypt, .ivsize = GCM_RFC4543_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, .nodkp = true, }, }, /* Galois Counter Mode */ { .aead.base = { .base = { .cra_name = "gcm(aes)", .cra_driver_name = "gcm-aes-caam", .cra_blocksize = 1, }, .setkey = gcm_setkey, .setauthsize = gcm_setauthsize, .encrypt = gcm_encrypt, .decrypt = gcm_decrypt, .ivsize = GCM_AES_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, .nodkp = true, }, }, /* single-pass ipsec_esp descriptor */ { .aead.base = { .base = { .cra_name = "authenc(hmac(md5)," "ecb(cipher_null))", .cra_driver_name = "authenc-hmac-md5-" "ecb-cipher_null-caam", .cra_blocksize = NULL_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = NULL_IV_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha1)," "ecb(cipher_null))", .cra_driver_name = "authenc-hmac-sha1-" "ecb-cipher_null-caam", .cra_blocksize = NULL_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = NULL_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha224)," "ecb(cipher_null))", .cra_driver_name = "authenc-hmac-sha224-" "ecb-cipher_null-caam", .cra_blocksize = NULL_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = NULL_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha256)," "ecb(cipher_null))", .cra_driver_name = "authenc-hmac-sha256-" "ecb-cipher_null-caam", .cra_blocksize = NULL_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = NULL_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha384)," "ecb(cipher_null))", .cra_driver_name = "authenc-hmac-sha384-" "ecb-cipher_null-caam", .cra_blocksize = NULL_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = NULL_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha512)," "ecb(cipher_null))", .cra_driver_name = "authenc-hmac-sha512-" "ecb-cipher_null-caam", .cra_blocksize = NULL_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = NULL_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(md5),cbc(aes))", .cra_driver_name = "authenc-hmac-md5-" "cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(md5)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-hmac-md5-" "cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-" "cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha1)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha1-cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha224),cbc(aes))", .cra_driver_name = "authenc-hmac-sha224-" "cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha224)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha224-cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-" "cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha256-cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha384),cbc(aes))", .cra_driver_name = "authenc-hmac-sha384-" "cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha384)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha384-cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "authenc-hmac-sha512-" "cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha512)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha512-cbc-aes-caam", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-md5-" "cbc-des3_ede-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(md5)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-hmac-md5-" "cbc-des3_ede-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha1)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha1-" "cbc-des3_ede-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha1)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha1-" "cbc-des3_ede-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha224)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha224-" "cbc-des3_ede-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha224)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha224-" "cbc-des3_ede-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha256)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha256-" "cbc-des3_ede-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha256-" "cbc-des3_ede-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha384)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha384-" "cbc-des3_ede-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha384)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha384-" "cbc-des3_ede-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha512)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha512-" "cbc-des3_ede-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha512)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha512-" "cbc-des3_ede-caam", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(md5),cbc(des))", .cra_driver_name = "authenc-hmac-md5-" "cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(md5)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-hmac-md5-" "cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha1),cbc(des))", .cra_driver_name = "authenc-hmac-sha1-" "cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha1)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha1-cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha224),cbc(des))", .cra_driver_name = "authenc-hmac-sha224-" "cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha224)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha224-cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha256),cbc(des))", .cra_driver_name = "authenc-hmac-sha256-" "cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha256-cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha384),cbc(des))", .cra_driver_name = "authenc-hmac-sha384-" "cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha384)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha384-cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha512),cbc(des))", .cra_driver_name = "authenc-hmac-sha512-" "cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead.base = { .base = { .cra_name = "echainiv(authenc(hmac(sha512)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha512-cbc-des-caam", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(md5)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-md5-" "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, }, }, { .aead.base = { .base = { .cra_name = "seqiv(authenc(" "hmac(md5),rfc3686(ctr(aes))))", .cra_driver_name = "seqiv-authenc-hmac-md5-" "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha1)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha1-" "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, }, }, { .aead.base = { .base = { .cra_name = "seqiv(authenc(" "hmac(sha1),rfc3686(ctr(aes))))", .cra_driver_name = "seqiv-authenc-hmac-sha1-" "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha224)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha224-" "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, }, }, { .aead.base = { .base = { .cra_name = "seqiv(authenc(" "hmac(sha224),rfc3686(ctr(aes))))", .cra_driver_name = "seqiv-authenc-hmac-sha224-" "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha256)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha256-" "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, }, }, { .aead.base = { .base = { .cra_name = "seqiv(authenc(hmac(sha256)," "rfc3686(ctr(aes))))", .cra_driver_name = "seqiv-authenc-hmac-sha256-" "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha384)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha384-" "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, }, }, { .aead.base = { .base = { .cra_name = "seqiv(authenc(hmac(sha384)," "rfc3686(ctr(aes))))", .cra_driver_name = "seqiv-authenc-hmac-sha384-" "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "authenc(hmac(sha512)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha512-" "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, }, }, { .aead.base = { .base = { .cra_name = "seqiv(authenc(hmac(sha512)," "rfc3686(ctr(aes))))", .cra_driver_name = "seqiv-authenc-hmac-sha512-" "rfc3686-ctr-aes-caam", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, .geniv = true, }, }, { .aead.base = { .base = { .cra_name = "rfc7539(chacha20,poly1305)", .cra_driver_name = "rfc7539-chacha20-poly1305-" "caam", .cra_blocksize = 1, }, .setkey = chachapoly_setkey, .setauthsize = chachapoly_setauthsize, .encrypt = chachapoly_encrypt, .decrypt = chachapoly_decrypt, .ivsize = CHACHAPOLY_IV_SIZE, .maxauthsize = POLY1305_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | OP_ALG_AAI_AEAD, .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | OP_ALG_AAI_AEAD, .nodkp = true, }, }, { .aead.base = { .base = { .cra_name = "rfc7539esp(chacha20,poly1305)", .cra_driver_name = "rfc7539esp-chacha20-" "poly1305-caam", .cra_blocksize = 1, }, .setkey = chachapoly_setkey, .setauthsize = chachapoly_setauthsize, .encrypt = chachapoly_encrypt, .decrypt = chachapoly_decrypt, .ivsize = 8, .maxauthsize = POLY1305_DIGEST_SIZE, }, .aead.op = { .do_one_request = aead_do_one_req, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | OP_ALG_AAI_AEAD, .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | OP_ALG_AAI_AEAD, .nodkp = true, }, }, }; static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, bool uses_dkp) { dma_addr_t dma_addr; struct caam_drv_private *priv; const size_t sh_desc_enc_offset = offsetof(struct caam_ctx, sh_desc_enc); ctx->jrdev = caam_jr_alloc(); if (IS_ERR(ctx->jrdev)) { pr_err("Job Ring Device allocation for transform failed\n"); return PTR_ERR(ctx->jrdev); } priv = dev_get_drvdata(ctx->jrdev->parent); if (priv->era >= 6 && uses_dkp) ctx->dir = DMA_BIDIRECTIONAL; else ctx->dir = DMA_TO_DEVICE; dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_enc, offsetof(struct caam_ctx, sh_desc_enc_dma) - sh_desc_enc_offset, ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(ctx->jrdev, dma_addr)) { dev_err(ctx->jrdev, "unable to map key, shared descriptors\n"); caam_jr_free(ctx->jrdev); return -ENOMEM; } ctx->sh_desc_enc_dma = dma_addr; ctx->sh_desc_dec_dma = dma_addr + offsetof(struct caam_ctx, sh_desc_dec) - sh_desc_enc_offset; ctx->key_dma = dma_addr + offsetof(struct caam_ctx, key) - sh_desc_enc_offset; /* copy descriptor header template value */ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; return 0; } static int caam_cra_init(struct crypto_skcipher *tfm) { struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct caam_skcipher_alg *caam_alg = container_of(alg, typeof(*caam_alg), skcipher.base); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; int ret = 0; if (alg_aai == OP_ALG_AAI_XTS) { const char *tfm_name = crypto_tfm_alg_name(&tfm->base); struct crypto_skcipher *fallback; fallback = crypto_alloc_skcipher(tfm_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback)) { pr_err("Failed to allocate %s fallback: %ld\n", tfm_name, PTR_ERR(fallback)); return PTR_ERR(fallback); } ctx->fallback = fallback; crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) + crypto_skcipher_reqsize(fallback)); } else { crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx)); } ret = caam_init_common(ctx, &caam_alg->caam, false); if (ret && ctx->fallback) crypto_free_skcipher(ctx->fallback); return ret; } static int caam_aead_init(struct crypto_aead *tfm) { struct aead_alg *alg = crypto_aead_alg(tfm); struct caam_aead_alg *caam_alg = container_of(alg, struct caam_aead_alg, aead.base); struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm); crypto_aead_set_reqsize(tfm, sizeof(struct caam_aead_req_ctx)); return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); } static void caam_exit_common(struct caam_ctx *ctx) { dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_enc_dma, offsetof(struct caam_ctx, sh_desc_enc_dma) - offsetof(struct caam_ctx, sh_desc_enc), ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); caam_jr_free(ctx->jrdev); } static void caam_cra_exit(struct crypto_skcipher *tfm) { struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); if (ctx->fallback) crypto_free_skcipher(ctx->fallback); caam_exit_common(ctx); } static void caam_aead_exit(struct crypto_aead *tfm) { caam_exit_common(crypto_aead_ctx_dma(tfm)); } void caam_algapi_exit(void) { int i; for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { struct caam_aead_alg *t_alg = driver_aeads + i; if (t_alg->registered) crypto_engine_unregister_aead(&t_alg->aead); } for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { struct caam_skcipher_alg *t_alg = driver_algs + i; if (t_alg->registered) crypto_engine_unregister_skcipher(&t_alg->skcipher); } } static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) { struct skcipher_alg *alg = &t_alg->skcipher.base; alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY); alg->init = caam_cra_init; alg->exit = caam_cra_exit; } static void caam_aead_alg_init(struct caam_aead_alg *t_alg) { struct aead_alg *alg = &t_alg->aead.base; alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_aead_init; alg->exit = caam_aead_exit; } int caam_algapi_init(struct device *ctrldev) { struct caam_drv_private *priv = dev_get_drvdata(ctrldev); int i = 0, err = 0; u32 aes_vid, aes_inst, des_inst, md_vid, md_inst, ccha_inst, ptha_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false, gcm_support; /* * Register crypto algorithms the device supports. * First, detect presence and attributes of DES, AES, and MD blocks. */ if (priv->era < 10) { struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon; u32 cha_vid, cha_inst, aes_rn; cha_vid = rd_reg32(&perfmon->cha_id_ls); aes_vid = cha_vid & CHA_ID_LS_AES_MASK; md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; cha_inst = rd_reg32(&perfmon->cha_num_ls); des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; aes_inst = cha_inst & CHA_ID_LS_AES_MASK; md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; ccha_inst = 0; ptha_inst = 0; aes_rn = rd_reg32(&perfmon->cha_rev_ls) & CHA_ID_LS_AES_MASK; gcm_support = !(aes_vid == CHA_VER_VID_AES_LP && aes_rn < 8); } else { struct version_regs __iomem *vreg = &priv->jr[0]->vreg; u32 aesa, mdha; aesa = rd_reg32(&vreg->aesa); mdha = rd_reg32(&vreg->mdha); aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; des_inst = rd_reg32(&vreg->desa) & CHA_VER_NUM_MASK; aes_inst = aesa & CHA_VER_NUM_MASK; md_inst = mdha & CHA_VER_NUM_MASK; ccha_inst = rd_reg32(&vreg->ccha) & CHA_VER_NUM_MASK; ptha_inst = rd_reg32(&vreg->ptha) & CHA_VER_NUM_MASK; gcm_support = aesa & CHA_VER_MISC_AES_GCM; } /* If MD is present, limit digest size based on LP256 */ if (md_inst && md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { struct caam_skcipher_alg *t_alg = driver_algs + i; u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; /* Skip DES algorithms if not supported by device */ if (!des_inst && ((alg_sel == OP_ALG_ALGSEL_3DES) || (alg_sel == OP_ALG_ALGSEL_DES))) continue; /* Skip AES algorithms if not supported by device */ if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) continue; /* * Check support for AES modes not available * on LP devices. */ if (aes_vid == CHA_VER_VID_AES_LP && (t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK) == OP_ALG_AAI_XTS) continue; caam_skcipher_alg_init(t_alg); err = crypto_engine_register_skcipher(&t_alg->skcipher); if (err) { pr_warn("%s alg registration failed\n", t_alg->skcipher.base.base.cra_driver_name); continue; } t_alg->registered = true; registered = true; } for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { struct caam_aead_alg *t_alg = driver_aeads + i; u32 c1_alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; u32 c2_alg_sel = t_alg->caam.class2_alg_type & OP_ALG_ALGSEL_MASK; u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; /* Skip DES algorithms if not supported by device */ if (!des_inst && ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || (c1_alg_sel == OP_ALG_ALGSEL_DES))) continue; /* Skip AES algorithms if not supported by device */ if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) continue; /* Skip CHACHA20 algorithms if not supported by device */ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst) continue; /* Skip POLY1305 algorithms if not supported by device */ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst) continue; /* Skip GCM algorithms if not supported by device */ if (c1_alg_sel == OP_ALG_ALGSEL_AES && alg_aai == OP_ALG_AAI_GCM && !gcm_support) continue; /* * Skip algorithms requiring message digests * if MD or MD size is not supported by device. */ if (is_mdha(c2_alg_sel) && (!md_inst || t_alg->aead.base.maxauthsize > md_limit)) continue; caam_aead_alg_init(t_alg); err = crypto_engine_register_aead(&t_alg->aead); if (err) { pr_warn("%s alg registration failed\n", t_alg->aead.base.base.cra_driver_name); continue; } t_alg->registered = true; registered = true; } if (registered) pr_info("caam algorithms registered in /proc/crypto\n"); return err; }
linux-master
drivers/crypto/caam/caamalg.c
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* * Shared descriptors for ahash algorithms * * Copyright 2017-2019 NXP */ #include "compat.h" #include "desc_constr.h" #include "caamhash_desc.h" /** * cnstr_shdsc_ahash - ahash shared descriptor * @desc: pointer to buffer used for descriptor construction * @adata: pointer to authentication transform definitions. * A split key is required for SEC Era < 6; the size of the split key * is specified in this case. * Valid algorithm values - one of OP_ALG_ALGSEL_{MD5, SHA1, SHA224, * SHA256, SHA384, SHA512}. * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} * @digestsize: algorithm's digest size * @ctx_len: size of Context Register * @import_ctx: true if previous Context Register needs to be restored * must be true for ahash update and final * must be false for ahash first and digest * @era: SEC Era */ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state, int digestsize, int ctx_len, bool import_ctx, int era) { u32 op = adata->algtype; init_sh_desc(desc, HDR_SHARE_SERIAL); /* Append key if it has been set; ahash update excluded */ if (state != OP_ALG_AS_UPDATE && adata->keylen) { u32 *skip_key_load; /* Skip key loading if already shared */ skip_key_load = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); if (era < 6) append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); else append_proto_dkp(desc, adata); set_jump_tgt_here(desc, skip_key_load); op |= OP_ALG_AAI_HMAC_PRECOMP; } /* If needed, import context from software */ if (import_ctx) append_seq_load(desc, ctx_len, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); /* Class 2 operation */ append_operation(desc, op | state | OP_ALG_ENCRYPT); /* * Load from buf and/or src and write to req->result or state->context * Calculate remaining bytes to read */ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* Read remaining bytes */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG | KEY_VLF); /* Store class2 context bytes */ append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); } EXPORT_SYMBOL(cnstr_shdsc_ahash); /** * cnstr_shdsc_sk_hash - shared descriptor for symmetric key cipher-based * hash algorithms * @desc: pointer to buffer used for descriptor construction * @adata: pointer to authentication transform definitions. * @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE} * @digestsize: algorithm's digest size * @ctx_len: size of Context Register */ void cnstr_shdsc_sk_hash(u32 * const desc, struct alginfo *adata, u32 state, int digestsize, int ctx_len) { u32 *skip_key_load; init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); /* Skip loading of key, context if already shared */ skip_key_load = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); if (state == OP_ALG_AS_INIT || state == OP_ALG_AS_INITFINAL) { append_key_as_imm(desc, adata->key_virt, adata->keylen, adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); } else { /* UPDATE, FINALIZE */ if (is_xcbc_aes(adata->algtype)) /* Load K1 */ append_key(desc, adata->key_dma, adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG | KEY_ENC); else /* CMAC */ append_key_as_imm(desc, adata->key_virt, adata->keylen, adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); /* Restore context */ append_seq_load(desc, ctx_len, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); } set_jump_tgt_here(desc, skip_key_load); /* Class 1 operation */ append_operation(desc, adata->algtype | state | OP_ALG_ENCRYPT); /* * Load from buf and/or src and write to req->result or state->context * Calculate remaining bytes to read */ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* Read remaining bytes */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 | FIFOLD_TYPE_MSG | FIFOLDST_VLF); /* * Save context: * - xcbc: partial hash, keys K2 and K3 * - cmac: partial hash, constant L = E(K,0) */ append_seq_store(desc, digestsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); if (is_xcbc_aes(adata->algtype) && state == OP_ALG_AS_INIT) /* Save K1 */ append_fifo_store(desc, adata->key_dma, adata->keylen, LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK); } EXPORT_SYMBOL(cnstr_shdsc_sk_hash); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("FSL CAAM ahash descriptors support"); MODULE_AUTHOR("NXP Semiconductors");
linux-master
drivers/crypto/caam/caamhash_desc.c
// SPDX-License-Identifier: GPL-2.0+ /* * caam - Freescale FSL CAAM support for ahash functions of crypto API * * Copyright 2011 Freescale Semiconductor, Inc. * Copyright 2018-2019, 2023 NXP * * Based on caamalg.c crypto API driver. * * relationship of digest job descriptor or first job descriptor after init to * shared descriptors: * * --------------- --------------- * | JobDesc #1 |-------------------->| ShareDesc | * | *(packet 1) | | (hashKey) | * --------------- | (operation) | * --------------- * * relationship of subsequent job descriptors to shared descriptors: * * --------------- --------------- * | JobDesc #2 |-------------------->| ShareDesc | * | *(packet 2) | |------------->| (hashKey) | * --------------- | |-------->| (operation) | * . | | | (load ctx2) | * . | | --------------- * --------------- | | * | JobDesc #3 |------| | * | *(packet 3) | | * --------------- | * . | * . | * --------------- | * | JobDesc #4 |------------ * | *(packet 4) | * --------------- * * The SharedDesc never changes for a connection unless rekeyed, but * each packet will likely be in a different place. So all we need * to know to process the packet is where the input is, where the * output goes, and what context we want to process with. Context is * in the SharedDesc, packet references in the JobDesc. * * So, a job desc looks like: * * --------------------- * | Header | * | ShareDesc Pointer | * | SEQ_OUT_PTR | * | (output buffer) | * | (output length) | * | SEQ_IN_PTR | * | (input buffer) | * | (input length) | * --------------------- */ #include "compat.h" #include "regs.h" #include "intern.h" #include "desc_constr.h" #include "jr.h" #include "error.h" #include "sg_sw_sec4.h" #include "key_gen.h" #include "caamhash_desc.h" #include <crypto/internal/engine.h> #include <crypto/internal/hash.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #define CAAM_CRA_PRIORITY 3000 /* max hash key is max split key size */ #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE #define CAAM_MAX_HASH_DIGEST_SIZE SHA512_DIGEST_SIZE #define DESC_HASH_MAX_USED_BYTES (DESC_AHASH_FINAL_LEN + \ CAAM_MAX_HASH_KEY_SIZE) #define DESC_HASH_MAX_USED_LEN (DESC_HASH_MAX_USED_BYTES / CAAM_CMD_SZ) /* caam context sizes for hashes: running digest + 8 */ #define HASH_MSG_LEN 8 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) static struct list_head hash_list; /* ahash per-session context */ struct caam_hash_ctx { u32 sh_desc_update[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned; u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned; dma_addr_t sh_desc_update_dma ____cacheline_aligned; dma_addr_t sh_desc_update_first_dma; dma_addr_t sh_desc_fin_dma; dma_addr_t sh_desc_digest_dma; enum dma_data_direction dir; enum dma_data_direction key_dir; struct device *jrdev; int ctx_len; struct alginfo adata; }; /* ahash state */ struct caam_hash_state { dma_addr_t buf_dma; dma_addr_t ctx_dma; int ctx_dma_len; u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; int buflen; int next_buflen; u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; int (*update)(struct ahash_request *req) ____cacheline_aligned; int (*final)(struct ahash_request *req); int (*finup)(struct ahash_request *req); struct ahash_edesc *edesc; void (*ahash_op_done)(struct device *jrdev, u32 *desc, u32 err, void *context); }; struct caam_export_state { u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; u8 caam_ctx[MAX_CTX_LEN]; int buflen; int (*update)(struct ahash_request *req); int (*final)(struct ahash_request *req); int (*finup)(struct ahash_request *req); }; static inline bool is_cmac_aes(u32 algtype) { return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) == (OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC); } /* Common job descriptor seq in/out ptr routines */ /* Map state->caam_ctx, and append seq_out_ptr command that points to it */ static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, struct caam_hash_state *state, int ctx_len) { state->ctx_dma_len = ctx_len; state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, DMA_FROM_DEVICE); if (dma_mapping_error(jrdev, state->ctx_dma)) { dev_err(jrdev, "unable to map ctx\n"); state->ctx_dma = 0; return -ENOMEM; } append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); return 0; } /* Map current buffer in state (if length > 0) and put it in link table */ static inline int buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg, struct caam_hash_state *state) { int buflen = state->buflen; if (!buflen) return 0; state->buf_dma = dma_map_single(jrdev, state->buf, buflen, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, state->buf_dma)) { dev_err(jrdev, "unable to map buf\n"); state->buf_dma = 0; return -ENOMEM; } dma_to_sec4_sg_one(sec4_sg, state->buf_dma, buflen, 0); return 0; } /* Map state->caam_ctx, and add it to link table */ static inline int ctx_map_to_sec4_sg(struct device *jrdev, struct caam_hash_state *state, int ctx_len, struct sec4_sg_entry *sec4_sg, u32 flag) { state->ctx_dma_len = ctx_len; state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag); if (dma_mapping_error(jrdev, state->ctx_dma)) { dev_err(jrdev, "unable to map ctx\n"); state->ctx_dma = 0; return -ENOMEM; } dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0); return 0; } static int ahash_set_sh_desc(struct crypto_ahash *ahash) { struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); u32 *desc; ctx->adata.key_virt = ctx->key; /* ahash_update shared descriptor */ desc = ctx->sh_desc_update; cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx->ctx_len, true, ctrlpriv->era); dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("ahash update shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); /* ahash_update_first shared descriptor */ desc = ctx->sh_desc_update_first; cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, ctx->ctx_len, false, ctrlpriv->era); dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("ahash update first shdesc@"__stringify(__LINE__) ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); /* ahash_final shared descriptor */ desc = ctx->sh_desc_fin; cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, ctx->ctx_len, true, ctrlpriv->era); dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("ahash final shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); /* ahash_digest shared descriptor */ desc = ctx->sh_desc_digest; cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, ctx->ctx_len, false, ctrlpriv->era); dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("ahash digest shdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return 0; } static int axcbc_set_sh_desc(struct crypto_ahash *ahash) { struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); struct device *jrdev = ctx->jrdev; u32 *desc; /* shared descriptor for ahash_update */ desc = ctx->sh_desc_update; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); /* shared descriptor for ahash_{final,finup} */ desc = ctx->sh_desc_fin; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); /* key is immediate data for INIT and INITFINAL states */ ctx->adata.key_virt = ctx->key; /* shared descriptor for first invocation of ahash_update */ desc = ctx->sh_desc_update_first; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__) " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); /* shared descriptor for ahash_digest */ desc = ctx->sh_desc_digest; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return 0; } static int acmac_set_sh_desc(struct crypto_ahash *ahash) { struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); struct device *jrdev = ctx->jrdev; u32 *desc; /* shared descriptor for ahash_update */ desc = ctx->sh_desc_update; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("acmac update shdesc@" __stringify(__LINE__)" : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); /* shared descriptor for ahash_{final,finup} */ desc = ctx->sh_desc_fin; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("acmac finup shdesc@" __stringify(__LINE__)" : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); /* shared descriptor for first invocation of ahash_update */ desc = ctx->sh_desc_update_first; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("acmac update first shdesc@" __stringify(__LINE__) " : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); /* shared descriptor for ahash_digest */ desc = ctx->sh_desc_digest; cnstr_shdsc_sk_hash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, ctx->ctx_len); dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma, desc_bytes(desc), ctx->dir); print_hex_dump_debug("acmac digest shdesc@" __stringify(__LINE__)" : ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return 0; } /* Digest hash size if it is too large */ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, u32 digestsize) { struct device *jrdev = ctx->jrdev; u32 *desc; struct split_key_result result; dma_addr_t key_dma; int ret; desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL); if (!desc) return -ENOMEM; init_job_desc(desc, 0); key_dma = dma_map_single(jrdev, key, *keylen, DMA_BIDIRECTIONAL); if (dma_mapping_error(jrdev, key_dma)) { dev_err(jrdev, "unable to map key memory\n"); kfree(desc); return -ENOMEM; } /* Job descriptor to perform unkeyed hash on key_in */ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | OP_ALG_AS_INITFINAL); append_seq_in_ptr(desc, key_dma, *keylen, 0); append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); append_seq_out_ptr(desc, key_dma, digestsize, 0); append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); print_hex_dump_debug("key_in@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); result.err = 0; init_completion(&result.completion); ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (ret == -EINPROGRESS) { /* in progress */ wait_for_completion(&result.completion); ret = result.err; print_hex_dump_debug("digested key@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); } dma_unmap_single(jrdev, key_dma, *keylen, DMA_BIDIRECTIONAL); *keylen = digestsize; kfree(desc); return ret; } static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct device *jrdev = ctx->jrdev; int blocksize = crypto_tfm_alg_blocksize(&ahash->base); int digestsize = crypto_ahash_digestsize(ahash); struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); int ret; u8 *hashed_key = NULL; dev_dbg(jrdev, "keylen %d\n", keylen); if (keylen > blocksize) { unsigned int aligned_len = ALIGN(keylen, dma_get_cache_alignment()); if (aligned_len < keylen) return -EOVERFLOW; hashed_key = kmemdup(key, keylen, GFP_KERNEL); if (!hashed_key) return -ENOMEM; ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); if (ret) goto bad_free_key; key = hashed_key; } /* * If DKP is supported, use it in the shared descriptor to generate * the split key. */ if (ctrlpriv->era >= 6) { ctx->adata.key_inline = true; ctx->adata.keylen = keylen; ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & OP_ALG_ALGSEL_MASK); if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) goto bad_free_key; memcpy(ctx->key, key, keylen); /* * In case |user key| > |derived key|, using DKP<imm,imm> * would result in invalid opcodes (last bytes of user key) in * the resulting descriptor. Use DKP<ptr,imm> instead => both * virtual and dma key addresses are needed. */ if (keylen > ctx->adata.keylen_pad) dma_sync_single_for_device(ctx->jrdev, ctx->adata.key_dma, ctx->adata.keylen_pad, DMA_TO_DEVICE); } else { ret = gen_split_key(ctx->jrdev, ctx->key, &ctx->adata, key, keylen, CAAM_MAX_HASH_KEY_SIZE); if (ret) goto bad_free_key; } kfree(hashed_key); return ahash_set_sh_desc(ahash); bad_free_key: kfree(hashed_key); return -EINVAL; } static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct device *jrdev = ctx->jrdev; if (keylen != AES_KEYSIZE_128) return -EINVAL; memcpy(ctx->key, key, keylen); dma_sync_single_for_device(jrdev, ctx->adata.key_dma, keylen, DMA_TO_DEVICE); ctx->adata.keylen = keylen; print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ", DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1); return axcbc_set_sh_desc(ahash); } static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int err; err = aes_check_keylen(keylen); if (err) return err; /* key is immediate data for all cmac shared descriptors */ ctx->adata.key_virt = key; ctx->adata.keylen = keylen; print_hex_dump_debug("acmac ctx.key@" __stringify(__LINE__)" : ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); return acmac_set_sh_desc(ahash); } /* * ahash_edesc - s/w-extended ahash descriptor * @sec4_sg_dma: physical mapped address of h/w link table * @src_nents: number of segments in input scatterlist * @sec4_sg_bytes: length of dma mapped sec4_sg space * @bklog: stored to determine if the request needs backlog * @hw_desc: the h/w job descriptor followed by any referenced link tables * @sec4_sg: h/w link table */ struct ahash_edesc { dma_addr_t sec4_sg_dma; int src_nents; int sec4_sg_bytes; bool bklog; u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned; struct sec4_sg_entry sec4_sg[]; }; static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, int dst_len) { struct caam_hash_state *state = ahash_request_ctx_dma(req); if (edesc->src_nents) dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); if (edesc->sec4_sg_bytes) dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, DMA_TO_DEVICE); if (state->buf_dma) { dma_unmap_single(dev, state->buf_dma, state->buflen, DMA_TO_DEVICE); state->buf_dma = 0; } } static inline void ahash_unmap_ctx(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, int dst_len, u32 flag) { struct caam_hash_state *state = ahash_request_ctx_dma(req); if (state->ctx_dma) { dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); state->ctx_dma = 0; } ahash_unmap(dev, edesc, req, dst_len); } static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err, void *context, enum dma_data_direction dir) { struct ahash_request *req = context; struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); int digestsize = crypto_ahash_digestsize(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int ecode = 0; bool has_bklog; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = state->edesc; has_bklog = edesc->bklog; if (err) ecode = caam_jr_strstatus(jrdev, err); ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir); memcpy(req->result, state->caam_ctx, digestsize); kfree(edesc); print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); /* * If no backlog flag, the completion of the request is done * by CAAM, not crypto engine. */ if (!has_bklog) ahash_request_complete(req, ecode); else crypto_finalize_hash_request(jrp->engine, req, ecode); } static void ahash_done(struct device *jrdev, u32 *desc, u32 err, void *context) { ahash_done_cpy(jrdev, desc, err, context, DMA_FROM_DEVICE); } static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, void *context) { ahash_done_cpy(jrdev, desc, err, context, DMA_BIDIRECTIONAL); } static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err, void *context, enum dma_data_direction dir) { struct ahash_request *req = context; struct caam_drv_private_jr *jrp = dev_get_drvdata(jrdev); struct ahash_edesc *edesc; struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); int digestsize = crypto_ahash_digestsize(ahash); int ecode = 0; bool has_bklog; dev_dbg(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); edesc = state->edesc; has_bklog = edesc->bklog; if (err) ecode = caam_jr_strstatus(jrdev, err); ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir); kfree(edesc); scatterwalk_map_and_copy(state->buf, req->src, req->nbytes - state->next_buflen, state->next_buflen, 0); state->buflen = state->next_buflen; print_hex_dump_debug("buf@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->buf, state->buflen, 1); print_hex_dump_debug("ctx@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); if (req->result) print_hex_dump_debug("result@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->result, digestsize, 1); /* * If no backlog flag, the completion of the request is done * by CAAM, not crypto engine. */ if (!has_bklog) ahash_request_complete(req, ecode); else crypto_finalize_hash_request(jrp->engine, req, ecode); } static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, void *context) { ahash_done_switch(jrdev, desc, err, context, DMA_BIDIRECTIONAL); } static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, void *context) { ahash_done_switch(jrdev, desc, err, context, DMA_FROM_DEVICE); } /* * Allocate an enhanced descriptor, which contains the hardware descriptor * and space for hardware scatter table containing sg_num entries. */ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req, int sg_num, u32 *sh_desc, dma_addr_t sh_desc_dma) { struct caam_hash_state *state = ahash_request_ctx_dma(req); gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; struct ahash_edesc *edesc; edesc = kzalloc(struct_size(edesc, sec4_sg, sg_num), flags); if (!edesc) return NULL; state->edesc = edesc; init_job_desc_shared(edesc->hw_desc, sh_desc_dma, desc_len(sh_desc), HDR_SHARE_DEFER | HDR_REVERSE); return edesc; } static int ahash_edesc_add_src(struct caam_hash_ctx *ctx, struct ahash_edesc *edesc, struct ahash_request *req, int nents, unsigned int first_sg, unsigned int first_bytes, size_t to_hash) { dma_addr_t src_dma; u32 options; if (nents > 1 || first_sg) { struct sec4_sg_entry *sg = edesc->sec4_sg; unsigned int sgsize = sizeof(*sg) * pad_sg_nents(first_sg + nents); sg_to_sec4_sg_last(req->src, to_hash, sg + first_sg, 0); src_dma = dma_map_single(ctx->jrdev, sg, sgsize, DMA_TO_DEVICE); if (dma_mapping_error(ctx->jrdev, src_dma)) { dev_err(ctx->jrdev, "unable to map S/G table\n"); return -ENOMEM; } edesc->sec4_sg_bytes = sgsize; edesc->sec4_sg_dma = src_dma; options = LDST_SGF; } else { src_dma = sg_dma_address(req->src); options = 0; } append_seq_in_ptr(edesc->hw_desc, src_dma, first_bytes + to_hash, options); return 0; } static int ahash_do_one_req(struct crypto_engine *engine, void *areq) { struct ahash_request *req = ahash_request_cast(areq); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(crypto_ahash_reqtfm(req)); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; u32 *desc = state->edesc->hw_desc; int ret; state->edesc->bklog = true; ret = caam_jr_enqueue(jrdev, desc, state->ahash_op_done, req); if (ret == -ENOSPC && engine->retry_support) return ret; if (ret != -EINPROGRESS) { ahash_unmap(jrdev, state->edesc, req, 0); kfree(state->edesc); } else { ret = 0; } return ret; } static int ahash_enqueue_req(struct device *jrdev, void (*cbk)(struct device *jrdev, u32 *desc, u32 err, void *context), struct ahash_request *req, int dst_len, enum dma_data_direction dir) { struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->edesc; u32 *desc = edesc->hw_desc; int ret; state->ahash_op_done = cbk; /* * Only the backlog request are sent to crypto-engine since the others * can be handled by CAAM, if free, especially since JR has up to 1024 * entries (more than the 10 entries from crypto-engine). */ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) ret = crypto_transfer_hash_request_to_engine(jrpriv->engine, req); else ret = caam_jr_enqueue(jrdev, desc, cbk, req); if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir); kfree(edesc); } return ret; } /* submit update job descriptor */ static int ahash_update_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; u8 *buf = state->buf; int *buflen = &state->buflen; int *next_buflen = &state->next_buflen; int blocksize = crypto_ahash_blocksize(ahash); int in_len = *buflen + req->nbytes, to_hash; u32 *desc; int src_nents, mapped_nents, sec4_sg_bytes, sec4_sg_src_index; struct ahash_edesc *edesc; int ret = 0; *next_buflen = in_len & (blocksize - 1); to_hash = in_len - *next_buflen; /* * For XCBC and CMAC, if to_hash is multiple of block size, * keep last block in internal buffer */ if ((is_xcbc_aes(ctx->adata.algtype) || is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && (*next_buflen == 0)) { *next_buflen = blocksize; to_hash -= blocksize; } if (to_hash) { int pad_nents; int src_len = req->nbytes - *next_buflen; src_nents = sg_nents_for_len(req->src, src_len); if (src_nents < 0) { dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } if (src_nents) { mapped_nents = dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); if (!mapped_nents) { dev_err(jrdev, "unable to DMA map source\n"); return -ENOMEM; } } else { mapped_nents = 0; } sec4_sg_src_index = 1 + (*buflen ? 1 : 0); pad_nents = pad_sg_nents(sec4_sg_src_index + mapped_nents); sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); /* * allocate space for base edesc and hw desc commands, * link tables */ edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update, ctx->sh_desc_update_dma); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); if (ret) goto unmap_ctx; if (mapped_nents) sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + sec4_sg_src_index, 0); else sg_to_sec4_set_last(edesc->sec4_sg + sec4_sg_src_index - 1); desc = edesc->hw_desc; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); ret = -ENOMEM; goto unmap_ctx; } append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + to_hash, LDST_SGF); append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ret = ahash_enqueue_req(jrdev, ahash_done_bi, req, ctx->ctx_len, DMA_BIDIRECTIONAL); } else if (*next_buflen) { scatterwalk_map_and_copy(buf + *buflen, req->src, 0, req->nbytes, 0); *buflen = *next_buflen; print_hex_dump_debug("buf@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); } return ret; unmap_ctx: ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); kfree(edesc); return ret; } static int ahash_final_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; int buflen = state->buflen; u32 *desc; int sec4_sg_bytes; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret; sec4_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ edesc = ahash_edesc_alloc(req, 4, ctx->sh_desc_fin, ctx->sh_desc_fin_dma); if (!edesc) return -ENOMEM; desc = edesc->hw_desc; edesc->sec4_sg_bytes = sec4_sg_bytes; ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); if (ret) goto unmap_ctx; sg_to_sec4_set_last(edesc->sec4_sg + (buflen ? 1 : 0)); edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); ret = -ENOMEM; goto unmap_ctx; } append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, LDST_SGF); append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req, digestsize, DMA_BIDIRECTIONAL); unmap_ctx: ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); kfree(edesc); return ret; } static int ahash_finup_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; int buflen = state->buflen; u32 *desc; int sec4_sg_src_index; int src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } if (src_nents) { mapped_nents = dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); if (!mapped_nents) { dev_err(jrdev, "unable to DMA map source\n"); return -ENOMEM; } } else { mapped_nents = 0; } sec4_sg_src_index = 1 + (buflen ? 1 : 0); /* allocate space for base edesc and hw desc commands, link tables */ edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, ctx->sh_desc_fin, ctx->sh_desc_fin_dma); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } desc = edesc->hw_desc; edesc->src_nents = src_nents; ret = ctx_map_to_sec4_sg(jrdev, state, ctx->ctx_len, edesc->sec4_sg, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1, state); if (ret) goto unmap_ctx; ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, sec4_sg_src_index, ctx->ctx_len + buflen, req->nbytes); if (ret) goto unmap_ctx; append_seq_out_ptr(desc, state->ctx_dma, digestsize, 0); print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return ahash_enqueue_req(jrdev, ahash_done_ctx_src, req, digestsize, DMA_BIDIRECTIONAL); unmap_ctx: ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL); kfree(edesc); return ret; } static int ahash_digest(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; u32 *desc; int digestsize = crypto_ahash_digestsize(ahash); int src_nents, mapped_nents; struct ahash_edesc *edesc; int ret; state->buf_dma = 0; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } if (src_nents) { mapped_nents = dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); if (!mapped_nents) { dev_err(jrdev, "unable to map source for DMA\n"); return -ENOMEM; } } else { mapped_nents = 0; } /* allocate space for base edesc and hw desc commands, link tables */ edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0, ctx->sh_desc_digest, ctx->sh_desc_digest_dma); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } edesc->src_nents = src_nents; ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, req->nbytes); if (ret) { ahash_unmap(jrdev, edesc, req, digestsize); kfree(edesc); return ret; } desc = edesc->hw_desc; ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); if (ret) { ahash_unmap(jrdev, edesc, req, digestsize); kfree(edesc); return -ENOMEM; } print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return ahash_enqueue_req(jrdev, ahash_done, req, digestsize, DMA_FROM_DEVICE); } /* submit ahash final if it the first job descriptor */ static int ahash_final_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; u8 *buf = state->buf; int buflen = state->buflen; u32 *desc; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret; /* allocate space for base edesc and hw desc commands, link tables */ edesc = ahash_edesc_alloc(req, 0, ctx->sh_desc_digest, ctx->sh_desc_digest_dma); if (!edesc) return -ENOMEM; desc = edesc->hw_desc; if (buflen) { state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, state->buf_dma)) { dev_err(jrdev, "unable to map src\n"); goto unmap; } append_seq_in_ptr(desc, state->buf_dma, buflen, 0); } ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); if (ret) goto unmap; print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return ahash_enqueue_req(jrdev, ahash_done, req, digestsize, DMA_FROM_DEVICE); unmap: ahash_unmap(jrdev, edesc, req, digestsize); kfree(edesc); return -ENOMEM; } /* submit ahash update if it the first job descriptor after update */ static int ahash_update_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; u8 *buf = state->buf; int *buflen = &state->buflen; int *next_buflen = &state->next_buflen; int blocksize = crypto_ahash_blocksize(ahash); int in_len = *buflen + req->nbytes, to_hash; int sec4_sg_bytes, src_nents, mapped_nents; struct ahash_edesc *edesc; u32 *desc; int ret = 0; *next_buflen = in_len & (blocksize - 1); to_hash = in_len - *next_buflen; /* * For XCBC and CMAC, if to_hash is multiple of block size, * keep last block in internal buffer */ if ((is_xcbc_aes(ctx->adata.algtype) || is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && (*next_buflen == 0)) { *next_buflen = blocksize; to_hash -= blocksize; } if (to_hash) { int pad_nents; int src_len = req->nbytes - *next_buflen; src_nents = sg_nents_for_len(req->src, src_len); if (src_nents < 0) { dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } if (src_nents) { mapped_nents = dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); if (!mapped_nents) { dev_err(jrdev, "unable to DMA map source\n"); return -ENOMEM; } } else { mapped_nents = 0; } pad_nents = pad_sg_nents(1 + mapped_nents); sec4_sg_bytes = pad_nents * sizeof(struct sec4_sg_entry); /* * allocate space for base edesc and hw desc commands, * link tables */ edesc = ahash_edesc_alloc(req, pad_nents, ctx->sh_desc_update_first, ctx->sh_desc_update_first_dma); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); if (ret) goto unmap_ctx; sg_to_sec4_sg_last(req->src, src_len, edesc->sec4_sg + 1, 0); desc = edesc->hw_desc; edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) { dev_err(jrdev, "unable to map S/G table\n"); ret = -ENOMEM; goto unmap_ctx; } append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); if (ret) goto unmap_ctx; print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req, ctx->ctx_len, DMA_TO_DEVICE); if ((ret != -EINPROGRESS) && (ret != -EBUSY)) return ret; state->update = ahash_update_ctx; state->finup = ahash_finup_ctx; state->final = ahash_final_ctx; } else if (*next_buflen) { scatterwalk_map_and_copy(buf + *buflen, req->src, 0, req->nbytes, 0); *buflen = *next_buflen; print_hex_dump_debug("buf@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); } return ret; unmap_ctx: ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); kfree(edesc); return ret; } /* submit ahash finup if it the first job descriptor after update */ static int ahash_finup_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; int buflen = state->buflen; u32 *desc; int sec4_sg_bytes, sec4_sg_src_index, src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } if (src_nents) { mapped_nents = dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); if (!mapped_nents) { dev_err(jrdev, "unable to DMA map source\n"); return -ENOMEM; } } else { mapped_nents = 0; } sec4_sg_src_index = 2; sec4_sg_bytes = (sec4_sg_src_index + mapped_nents) * sizeof(struct sec4_sg_entry); /* allocate space for base edesc and hw desc commands, link tables */ edesc = ahash_edesc_alloc(req, sec4_sg_src_index + mapped_nents, ctx->sh_desc_digest, ctx->sh_desc_digest_dma); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } desc = edesc->hw_desc; edesc->src_nents = src_nents; edesc->sec4_sg_bytes = sec4_sg_bytes; ret = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, state); if (ret) goto unmap; ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 1, buflen, req->nbytes); if (ret) { dev_err(jrdev, "unable to map S/G table\n"); goto unmap; } ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize); if (ret) goto unmap; print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return ahash_enqueue_req(jrdev, ahash_done, req, digestsize, DMA_FROM_DEVICE); unmap: ahash_unmap(jrdev, edesc, req, digestsize); kfree(edesc); return -ENOMEM; } /* submit first update job descriptor after init */ static int ahash_update_first(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct device *jrdev = ctx->jrdev; u8 *buf = state->buf; int *buflen = &state->buflen; int *next_buflen = &state->next_buflen; int to_hash; int blocksize = crypto_ahash_blocksize(ahash); u32 *desc; int src_nents, mapped_nents; struct ahash_edesc *edesc; int ret = 0; *next_buflen = req->nbytes & (blocksize - 1); to_hash = req->nbytes - *next_buflen; /* * For XCBC and CMAC, if to_hash is multiple of block size, * keep last block in internal buffer */ if ((is_xcbc_aes(ctx->adata.algtype) || is_cmac_aes(ctx->adata.algtype)) && to_hash >= blocksize && (*next_buflen == 0)) { *next_buflen = blocksize; to_hash -= blocksize; } if (to_hash) { src_nents = sg_nents_for_len(req->src, req->nbytes - *next_buflen); if (src_nents < 0) { dev_err(jrdev, "Invalid number of src SG.\n"); return src_nents; } if (src_nents) { mapped_nents = dma_map_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); if (!mapped_nents) { dev_err(jrdev, "unable to map source for DMA\n"); return -ENOMEM; } } else { mapped_nents = 0; } /* * allocate space for base edesc and hw desc commands, * link tables */ edesc = ahash_edesc_alloc(req, mapped_nents > 1 ? mapped_nents : 0, ctx->sh_desc_update_first, ctx->sh_desc_update_first_dma); if (!edesc) { dma_unmap_sg(jrdev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } edesc->src_nents = src_nents; ret = ahash_edesc_add_src(ctx, edesc, req, mapped_nents, 0, 0, to_hash); if (ret) goto unmap_ctx; desc = edesc->hw_desc; ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); if (ret) goto unmap_ctx; print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ret = ahash_enqueue_req(jrdev, ahash_done_ctx_dst, req, ctx->ctx_len, DMA_TO_DEVICE); if ((ret != -EINPROGRESS) && (ret != -EBUSY)) return ret; state->update = ahash_update_ctx; state->finup = ahash_finup_ctx; state->final = ahash_final_ctx; } else if (*next_buflen) { state->update = ahash_update_no_ctx; state->finup = ahash_finup_no_ctx; state->final = ahash_final_no_ctx; scatterwalk_map_and_copy(buf, req->src, 0, req->nbytes, 0); *buflen = *next_buflen; print_hex_dump_debug("buf@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); } return ret; unmap_ctx: ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE); kfree(edesc); return ret; } static int ahash_finup_first(struct ahash_request *req) { return ahash_digest(req); } static int ahash_init(struct ahash_request *req) { struct caam_hash_state *state = ahash_request_ctx_dma(req); state->update = ahash_update_first; state->finup = ahash_finup_first; state->final = ahash_final_no_ctx; state->ctx_dma = 0; state->ctx_dma_len = 0; state->buf_dma = 0; state->buflen = 0; state->next_buflen = 0; return 0; } static int ahash_update(struct ahash_request *req) { struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->update(req); } static int ahash_finup(struct ahash_request *req) { struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->finup(req); } static int ahash_final(struct ahash_request *req) { struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->final(req); } static int ahash_export(struct ahash_request *req, void *out) { struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_export_state *export = out; u8 *buf = state->buf; int len = state->buflen; memcpy(export->buf, buf, len); memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); export->buflen = len; export->update = state->update; export->final = state->final; export->finup = state->finup; return 0; } static int ahash_import(struct ahash_request *req, const void *in) { struct caam_hash_state *state = ahash_request_ctx_dma(req); const struct caam_export_state *export = in; memset(state, 0, sizeof(*state)); memcpy(state->buf, export->buf, export->buflen); memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); state->buflen = export->buflen; state->update = export->update; state->final = export->final; state->finup = export->finup; return 0; } struct caam_hash_template { char name[CRYPTO_MAX_ALG_NAME]; char driver_name[CRYPTO_MAX_ALG_NAME]; char hmac_name[CRYPTO_MAX_ALG_NAME]; char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; unsigned int blocksize; struct ahash_alg template_ahash; u32 alg_type; }; /* ahash descriptors */ static struct caam_hash_template driver_hash[] = { { .name = "sha1", .driver_name = "sha1-caam", .hmac_name = "hmac(sha1)", .hmac_driver_name = "hmac-sha1-caam", .blocksize = SHA1_BLOCK_SIZE, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = ahash_setkey, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_SHA1, }, { .name = "sha224", .driver_name = "sha224-caam", .hmac_name = "hmac(sha224)", .hmac_driver_name = "hmac-sha224-caam", .blocksize = SHA224_BLOCK_SIZE, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = ahash_setkey, .halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_SHA224, }, { .name = "sha256", .driver_name = "sha256-caam", .hmac_name = "hmac(sha256)", .hmac_driver_name = "hmac-sha256-caam", .blocksize = SHA256_BLOCK_SIZE, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = ahash_setkey, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_SHA256, }, { .name = "sha384", .driver_name = "sha384-caam", .hmac_name = "hmac(sha384)", .hmac_driver_name = "hmac-sha384-caam", .blocksize = SHA384_BLOCK_SIZE, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = ahash_setkey, .halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_SHA384, }, { .name = "sha512", .driver_name = "sha512-caam", .hmac_name = "hmac(sha512)", .hmac_driver_name = "hmac-sha512-caam", .blocksize = SHA512_BLOCK_SIZE, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = ahash_setkey, .halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_SHA512, }, { .name = "md5", .driver_name = "md5-caam", .hmac_name = "hmac(md5)", .hmac_driver_name = "hmac-md5-caam", .blocksize = MD5_BLOCK_WORDS * 4, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = ahash_setkey, .halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_MD5, }, { .hmac_name = "xcbc(aes)", .hmac_driver_name = "xcbc-aes-caam", .blocksize = AES_BLOCK_SIZE, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = axcbc_setkey, .halg = { .digestsize = AES_BLOCK_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC, }, { .hmac_name = "cmac(aes)", .hmac_driver_name = "cmac-aes-caam", .blocksize = AES_BLOCK_SIZE, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = acmac_setkey, .halg = { .digestsize = AES_BLOCK_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CMAC, }, }; struct caam_hash_alg { struct list_head entry; int alg_type; struct ahash_engine_alg ahash_alg; }; static int caam_hash_cra_init(struct crypto_tfm *tfm) { struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); struct crypto_alg *base = tfm->__crt_alg; struct hash_alg_common *halg = container_of(base, struct hash_alg_common, base); struct ahash_alg *alg = container_of(halg, struct ahash_alg, halg); struct caam_hash_alg *caam_hash = container_of(alg, struct caam_hash_alg, ahash_alg.base); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, HASH_MSG_LEN + SHA1_DIGEST_SIZE, HASH_MSG_LEN + 32, HASH_MSG_LEN + SHA256_DIGEST_SIZE, HASH_MSG_LEN + 64, HASH_MSG_LEN + SHA512_DIGEST_SIZE }; const size_t sh_desc_update_offset = offsetof(struct caam_hash_ctx, sh_desc_update); dma_addr_t dma_addr; struct caam_drv_private *priv; /* * Get a Job ring from Job Ring driver to ensure in-order * crypto request processing per tfm */ ctx->jrdev = caam_jr_alloc(); if (IS_ERR(ctx->jrdev)) { pr_err("Job Ring Device allocation for transform failed\n"); return PTR_ERR(ctx->jrdev); } priv = dev_get_drvdata(ctx->jrdev->parent); if (is_xcbc_aes(caam_hash->alg_type)) { ctx->dir = DMA_TO_DEVICE; ctx->key_dir = DMA_BIDIRECTIONAL; ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; ctx->ctx_len = 48; } else if (is_cmac_aes(caam_hash->alg_type)) { ctx->dir = DMA_TO_DEVICE; ctx->key_dir = DMA_NONE; ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type; ctx->ctx_len = 32; } else { if (priv->era >= 6) { ctx->dir = DMA_BIDIRECTIONAL; ctx->key_dir = alg->setkey ? DMA_TO_DEVICE : DMA_NONE; } else { ctx->dir = DMA_TO_DEVICE; ctx->key_dir = DMA_NONE; } ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; ctx->ctx_len = runninglen[(ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT]; } if (ctx->key_dir != DMA_NONE) { ctx->adata.key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key, ARRAY_SIZE(ctx->key), ctx->key_dir, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(ctx->jrdev, ctx->adata.key_dma)) { dev_err(ctx->jrdev, "unable to map key\n"); caam_jr_free(ctx->jrdev); return -ENOMEM; } } dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update, offsetof(struct caam_hash_ctx, key) - sh_desc_update_offset, ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(ctx->jrdev, dma_addr)) { dev_err(ctx->jrdev, "unable to map shared descriptors\n"); if (ctx->key_dir != DMA_NONE) dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, ARRAY_SIZE(ctx->key), ctx->key_dir, DMA_ATTR_SKIP_CPU_SYNC); caam_jr_free(ctx->jrdev); return -ENOMEM; } ctx->sh_desc_update_dma = dma_addr; ctx->sh_desc_update_first_dma = dma_addr + offsetof(struct caam_hash_ctx, sh_desc_update_first) - sh_desc_update_offset; ctx->sh_desc_fin_dma = dma_addr + offsetof(struct caam_hash_ctx, sh_desc_fin) - sh_desc_update_offset; ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx, sh_desc_digest) - sh_desc_update_offset; crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state)); /* * For keyed hash algorithms shared descriptors * will be created later in setkey() callback */ return alg->setkey ? 0 : ahash_set_sh_desc(ahash); } static void caam_hash_cra_exit(struct crypto_tfm *tfm) { struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma, offsetof(struct caam_hash_ctx, key) - offsetof(struct caam_hash_ctx, sh_desc_update), ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); if (ctx->key_dir != DMA_NONE) dma_unmap_single_attrs(ctx->jrdev, ctx->adata.key_dma, ARRAY_SIZE(ctx->key), ctx->key_dir, DMA_ATTR_SKIP_CPU_SYNC); caam_jr_free(ctx->jrdev); } void caam_algapi_hash_exit(void) { struct caam_hash_alg *t_alg, *n; if (!hash_list.next) return; list_for_each_entry_safe(t_alg, n, &hash_list, entry) { crypto_engine_unregister_ahash(&t_alg->ahash_alg); list_del(&t_alg->entry); kfree(t_alg); } } static struct caam_hash_alg * caam_hash_alloc(struct caam_hash_template *template, bool keyed) { struct caam_hash_alg *t_alg; struct ahash_alg *halg; struct crypto_alg *alg; t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); if (!t_alg) return ERR_PTR(-ENOMEM); t_alg->ahash_alg.base = template->template_ahash; halg = &t_alg->ahash_alg.base; alg = &halg->halg.base; if (keyed) { snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->hmac_name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->hmac_driver_name); } else { snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->driver_name); halg->setkey = NULL; } alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; alg->cra_exit = caam_hash_cra_exit; alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding(); alg->cra_priority = CAAM_CRA_PRIORITY; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; t_alg->alg_type = template->alg_type; t_alg->ahash_alg.op.do_one_request = ahash_do_one_req; return t_alg; } int caam_algapi_hash_init(struct device *ctrldev) { int i = 0, err = 0; struct caam_drv_private *priv = dev_get_drvdata(ctrldev); unsigned int md_limit = SHA512_DIGEST_SIZE; u32 md_inst, md_vid; /* * Register crypto algorithms the device supports. First, identify * presence and attributes of MD block. */ if (priv->era < 10) { struct caam_perfmon __iomem *perfmon = &priv->jr[0]->perfmon; md_vid = (rd_reg32(&perfmon->cha_id_ls) & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; md_inst = (rd_reg32(&perfmon->cha_num_ls) & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; } else { u32 mdha = rd_reg32(&priv->jr[0]->vreg.mdha); md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; md_inst = mdha & CHA_VER_NUM_MASK; } /* * Skip registration of any hashing algorithms if MD block * is not present. */ if (!md_inst) return 0; /* Limit digest size based on LP256 */ if (md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; INIT_LIST_HEAD(&hash_list); /* register crypto algorithms the device supports */ for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { struct caam_hash_alg *t_alg; struct caam_hash_template *alg = driver_hash + i; /* If MD size is not supported by device, skip registration */ if (is_mdha(alg->alg_type) && alg->template_ahash.halg.digestsize > md_limit) continue; /* register hmac version */ t_alg = caam_hash_alloc(alg, true); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); pr_warn("%s alg allocation failed\n", alg->hmac_driver_name); continue; } err = crypto_engine_register_ahash(&t_alg->ahash_alg); if (err) { pr_warn("%s alg registration failed: %d\n", t_alg->ahash_alg.base.halg.base.cra_driver_name, err); kfree(t_alg); } else list_add_tail(&t_alg->entry, &hash_list); if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES) continue; /* register unkeyed version */ t_alg = caam_hash_alloc(alg, false); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); pr_warn("%s alg allocation failed\n", alg->driver_name); continue; } err = crypto_engine_register_ahash(&t_alg->ahash_alg); if (err) { pr_warn("%s alg registration failed: %d\n", t_alg->ahash_alg.base.halg.base.cra_driver_name, err); kfree(t_alg); } else list_add_tail(&t_alg->entry, &hash_list); } return err; }
linux-master
drivers/crypto/caam/caamhash.c
// SPDX-License-Identifier: GPL-2.0+ /* * CAAM/SEC 4.x transport/backend driver * JobR backend functionality * * Copyright 2008-2012 Freescale Semiconductor, Inc. * Copyright 2019, 2023 NXP */ #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include "compat.h" #include "ctrl.h" #include "regs.h" #include "jr.h" #include "desc.h" #include "intern.h" struct jr_driver_data { /* List of Physical JobR's with the Driver */ struct list_head jr_list; spinlock_t jr_alloc_lock; /* jr_list lock */ } ____cacheline_aligned; static struct jr_driver_data driver_data; static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; static void register_algs(struct caam_drv_private_jr *jrpriv, struct device *dev) { mutex_lock(&algs_lock); if (++active_devs != 1) goto algs_unlock; caam_algapi_init(dev); caam_algapi_hash_init(dev); caam_pkc_init(dev); jrpriv->hwrng = !caam_rng_init(dev); caam_prng_register(dev); caam_qi_algapi_init(dev); algs_unlock: mutex_unlock(&algs_lock); } static void unregister_algs(void) { mutex_lock(&algs_lock); if (--active_devs != 0) goto algs_unlock; caam_qi_algapi_exit(); caam_prng_unregister(NULL); caam_pkc_exit(); caam_algapi_hash_exit(); caam_algapi_exit(); algs_unlock: mutex_unlock(&algs_lock); } static void caam_jr_crypto_engine_exit(void *data) { struct device *jrdev = data; struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); /* Free the resources of crypto-engine */ crypto_engine_exit(jrpriv->engine); } /* * Put the CAAM in quiesce, ie stop * * Must be called with itr disabled */ static int caam_jr_stop_processing(struct device *dev, u32 jrcr_bits) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); unsigned int timeout = 100000; /* Check the current status */ if (rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_INPROGRESS) goto wait_quiesce_completion; /* Reset the field */ clrsetbits_32(&jrp->rregs->jrintstatus, JRINT_ERR_HALT_MASK, 0); /* initiate flush / park (required prior to reset) */ wr_reg32(&jrp->rregs->jrcommand, jrcr_bits); wait_quiesce_completion: while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == JRINT_ERR_HALT_INPROGRESS) && --timeout) cpu_relax(); if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != JRINT_ERR_HALT_COMPLETE || timeout == 0) { dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); return -EIO; } return 0; } /* * Flush the job ring, so the jobs running will be stopped, jobs queued will be * invalidated and the CAAM will no longer fetch fron input ring. * * Must be called with itr disabled */ static int caam_jr_flush(struct device *dev) { return caam_jr_stop_processing(dev, JRCR_RESET); } /* The resume can be used after a park or a flush if CAAM has not been reset */ static int caam_jr_restart_processing(struct device *dev) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); u32 halt_status = rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK; /* Check that the flush/park is completed */ if (halt_status != JRINT_ERR_HALT_COMPLETE) return -1; /* Resume processing of jobs */ clrsetbits_32(&jrp->rregs->jrintstatus, 0, JRINT_ERR_HALT_COMPLETE); return 0; } static int caam_reset_hw_jr(struct device *dev) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); unsigned int timeout = 100000; int err; /* * mask interrupts since we are going to poll * for reset completion status */ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK); err = caam_jr_flush(dev); if (err) return err; /* initiate reset */ wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) cpu_relax(); if (timeout == 0) { dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); return -EIO; } /* unmask interrupts */ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); return 0; } /* * Shutdown JobR independent of platform property code */ static int caam_jr_shutdown(struct device *dev) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); int ret; ret = caam_reset_hw_jr(dev); tasklet_kill(&jrp->irqtask); return ret; } static int caam_jr_remove(struct platform_device *pdev) { int ret; struct device *jrdev; struct caam_drv_private_jr *jrpriv; jrdev = &pdev->dev; jrpriv = dev_get_drvdata(jrdev); if (jrpriv->hwrng) caam_rng_exit(jrdev->parent); /* * Return EBUSY if job ring already allocated. */ if (atomic_read(&jrpriv->tfm_count)) { dev_err(jrdev, "Device is busy\n"); return -EBUSY; } /* Unregister JR-based RNG & crypto algorithms */ unregister_algs(); /* Remove the node from Physical JobR list maintained by driver */ spin_lock(&driver_data.jr_alloc_lock); list_del(&jrpriv->list_node); spin_unlock(&driver_data.jr_alloc_lock); /* Release ring */ ret = caam_jr_shutdown(jrdev); if (ret) dev_err(jrdev, "Failed to shut down job ring\n"); return ret; } static void caam_jr_platform_shutdown(struct platform_device *pdev) { caam_jr_remove(pdev); } /* Main per-ring interrupt handler */ static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) { struct device *dev = st_dev; struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); u32 irqstate; /* * Check the output ring for ready responses, kick * tasklet if jobs done. */ irqstate = rd_reg32(&jrp->rregs->jrintstatus); if (!(irqstate & JRINT_JR_INT)) return IRQ_NONE; /* * If JobR error, we got more development work to do * Flag a bug now, but we really need to shut down and * restart the queue (and fix code). */ if (irqstate & JRINT_JR_ERROR) { dev_err(dev, "job ring error: irqstate: %08x\n", irqstate); BUG(); } /* mask valid interrupts */ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK); /* Have valid interrupt at this point, just ACK and trigger */ wr_reg32(&jrp->rregs->jrintstatus, irqstate); preempt_disable(); tasklet_schedule(&jrp->irqtask); preempt_enable(); return IRQ_HANDLED; } /* Deferred service handler, run as interrupt-fired tasklet */ static void caam_jr_dequeue(unsigned long devarg) { int hw_idx, sw_idx, i, head, tail; struct caam_jr_dequeue_params *params = (void *)devarg; struct device *dev = params->dev; struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); u32 *userdesc, userstatus; void *userarg; u32 outring_used = 0; while (outring_used || (outring_used = rd_reg32(&jrp->rregs->outring_used))) { head = READ_ONCE(jrp->head); sw_idx = tail = jrp->tail; hw_idx = jrp->out_ring_read_index; for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { sw_idx = (tail + i) & (JOBR_DEPTH - 1); if (jr_outentry_desc(jrp->outring, hw_idx) == caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma)) break; /* found */ } /* we should never fail to find a matching descriptor */ BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); /* Unmap just-run descriptor so we can post-process */ dma_unmap_single(dev, caam_dma_to_cpu(jr_outentry_desc(jrp->outring, hw_idx)), jrp->entinfo[sw_idx].desc_size, DMA_TO_DEVICE); /* mark completed, avoid matching on a recycled desc addr */ jrp->entinfo[sw_idx].desc_addr_dma = 0; /* Stash callback params */ usercall = jrp->entinfo[sw_idx].callbk; userarg = jrp->entinfo[sw_idx].cbkarg; userdesc = jrp->entinfo[sw_idx].desc_addr_virt; userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring, hw_idx)); /* * Make sure all information from the job has been obtained * before telling CAAM that the job has been removed from the * output ring. */ mb(); /* set done */ wr_reg32(&jrp->rregs->outring_rmvd, 1); jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & (JOBR_DEPTH - 1); /* * if this job completed out-of-order, do not increment * the tail. Otherwise, increment tail by 1 plus the * number of subsequent jobs already completed out-of-order */ if (sw_idx == tail) { do { tail = (tail + 1) & (JOBR_DEPTH - 1); } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && jrp->entinfo[tail].desc_addr_dma == 0); jrp->tail = tail; } /* Finally, execute user's callback */ usercall(dev, userdesc, userstatus, userarg); outring_used--; } if (params->enable_itr) /* reenable / unmask IRQs */ clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0); } /** * caam_jr_alloc() - Alloc a job ring for someone to use as needed. * * returns : pointer to the newly allocated physical * JobR dev can be written to if successful. **/ struct device *caam_jr_alloc(void) { struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL; struct device *dev = ERR_PTR(-ENODEV); int min_tfm_cnt = INT_MAX; int tfm_cnt; spin_lock(&driver_data.jr_alloc_lock); if (list_empty(&driver_data.jr_list)) { spin_unlock(&driver_data.jr_alloc_lock); return ERR_PTR(-ENODEV); } list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) { tfm_cnt = atomic_read(&jrpriv->tfm_count); if (tfm_cnt < min_tfm_cnt) { min_tfm_cnt = tfm_cnt; min_jrpriv = jrpriv; } if (!min_tfm_cnt) break; } if (min_jrpriv) { atomic_inc(&min_jrpriv->tfm_count); dev = min_jrpriv->dev; } spin_unlock(&driver_data.jr_alloc_lock); return dev; } EXPORT_SYMBOL(caam_jr_alloc); /** * caam_jr_free() - Free the Job Ring * @rdev: points to the dev that identifies the Job ring to * be released. **/ void caam_jr_free(struct device *rdev) { struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); atomic_dec(&jrpriv->tfm_count); } EXPORT_SYMBOL(caam_jr_free); /** * caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS * if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's * descriptor. * @dev: struct device of the job ring to be used * @desc: points to a job descriptor that execute our request. All * descriptors (and all referenced data) must be in a DMAable * region, and all data references must be physical addresses * accessible to CAAM (i.e. within a PAMU window granted * to it). * @cbk: pointer to a callback function to be invoked upon completion * of this request. This has the form: * callback(struct device *dev, u32 *desc, u32 stat, void *arg) * where: * dev: contains the job ring device that processed this * response. * desc: descriptor that initiated the request, same as * "desc" being argued to caam_jr_enqueue(). * status: untranslated status received from CAAM. See the * reference manual for a detailed description of * error meaning, or see the JRSTA definitions in the * register header file * areq: optional pointer to an argument passed with the * original request * @areq: optional pointer to a user argument for use at callback * time. **/ int caam_jr_enqueue(struct device *dev, u32 *desc, void (*cbk)(struct device *dev, u32 *desc, u32 status, void *areq), void *areq) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); struct caam_jrentry_info *head_entry; int head, tail, desc_size; dma_addr_t desc_dma; desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32); desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, desc_dma)) { dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n"); return -EIO; } spin_lock_bh(&jrp->inplock); head = jrp->head; tail = READ_ONCE(jrp->tail); if (!jrp->inpring_avail || CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { spin_unlock_bh(&jrp->inplock); dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); return -ENOSPC; } head_entry = &jrp->entinfo[head]; head_entry->desc_addr_virt = desc; head_entry->desc_size = desc_size; head_entry->callbk = (void *)cbk; head_entry->cbkarg = areq; head_entry->desc_addr_dma = desc_dma; jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma)); /* * Guarantee that the descriptor's DMA address has been written to * the next slot in the ring before the write index is updated, since * other cores may update this index independently. * * Under heavy DDR load, smp_wmb() or dma_wmb() fail to make the input * ring be updated before the CAAM starts reading it. So, CAAM will * process, again, an old descriptor address and will put it in the * output ring. This will make caam_jr_dequeue() to fail, since this * old descriptor is not in the software ring. * To fix this, use wmb() which works on the full system instead of * inner/outer shareable domains. */ wmb(); jrp->head = (head + 1) & (JOBR_DEPTH - 1); /* * Ensure that all job information has been written before * notifying CAAM that a new job was added to the input ring * using a memory barrier. The wr_reg32() uses api iowrite32() * to do the register write. iowrite32() issues a memory barrier * before the write operation. */ wr_reg32(&jrp->rregs->inpring_jobadd, 1); jrp->inpring_avail--; if (!jrp->inpring_avail) jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail); spin_unlock_bh(&jrp->inplock); return -EINPROGRESS; } EXPORT_SYMBOL(caam_jr_enqueue); static void caam_jr_init_hw(struct device *dev, dma_addr_t inpbusaddr, dma_addr_t outbusaddr) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); wr_reg64(&jrp->rregs->inpring_base, inpbusaddr); wr_reg64(&jrp->rregs->outring_base, outbusaddr); wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH); wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH); /* Select interrupt coalescing parameters */ clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC | (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); } static void caam_jr_reset_index(struct caam_drv_private_jr *jrp) { jrp->out_ring_read_index = 0; jrp->head = 0; jrp->tail = 0; } /* * Init JobR independent of platform property detection */ static int caam_jr_init(struct device *dev) { struct caam_drv_private_jr *jrp; dma_addr_t inpbusaddr, outbusaddr; int i, error; jrp = dev_get_drvdata(dev); error = caam_reset_hw_jr(dev); if (error) return error; jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY * JOBR_DEPTH, &inpbusaddr, GFP_KERNEL); if (!jrp->inpring) return -ENOMEM; jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY * JOBR_DEPTH, &outbusaddr, GFP_KERNEL); if (!jrp->outring) return -ENOMEM; jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo), GFP_KERNEL); if (!jrp->entinfo) return -ENOMEM; for (i = 0; i < JOBR_DEPTH; i++) jrp->entinfo[i].desc_addr_dma = !0; /* Setup rings */ caam_jr_reset_index(jrp); jrp->inpring_avail = JOBR_DEPTH; caam_jr_init_hw(dev, inpbusaddr, outbusaddr); spin_lock_init(&jrp->inplock); jrp->tasklet_params.dev = dev; jrp->tasklet_params.enable_itr = 1; tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)&jrp->tasklet_params); /* Connect job ring interrupt handler. */ error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED, dev_name(dev), dev); if (error) { dev_err(dev, "can't connect JobR %d interrupt (%d)\n", jrp->ridx, jrp->irq); tasklet_kill(&jrp->irqtask); } return error; } static void caam_jr_irq_dispose_mapping(void *data) { irq_dispose_mapping((unsigned long)data); } /* * Probe routine for each detected JobR subsystem. */ static int caam_jr_probe(struct platform_device *pdev) { struct device *jrdev; struct device_node *nprop; struct caam_job_ring __iomem *ctrl; struct caam_drv_private_jr *jrpriv; static int total_jobrs; struct resource *r; int error; jrdev = &pdev->dev; jrpriv = devm_kzalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL); if (!jrpriv) return -ENOMEM; dev_set_drvdata(jrdev, jrpriv); /* save ring identity relative to detection */ jrpriv->ridx = total_jobrs++; nprop = pdev->dev.of_node; /* Get configuration properties from device tree */ /* First, get register page */ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { dev_err(jrdev, "platform_get_resource() failed\n"); return -ENOMEM; } ctrl = devm_ioremap(jrdev, r->start, resource_size(r)); if (!ctrl) { dev_err(jrdev, "devm_ioremap() failed\n"); return -ENOMEM; } jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl; error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev)); if (error) { dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n", error); return error; } /* Initialize crypto engine */ jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL, false, CRYPTO_ENGINE_MAX_QLEN); if (!jrpriv->engine) { dev_err(jrdev, "Could not init crypto-engine\n"); return -ENOMEM; } error = devm_add_action_or_reset(jrdev, caam_jr_crypto_engine_exit, jrdev); if (error) return error; /* Start crypto engine */ error = crypto_engine_start(jrpriv->engine); if (error) { dev_err(jrdev, "Could not start crypto-engine\n"); return error; } /* Identify the interrupt */ jrpriv->irq = irq_of_parse_and_map(nprop, 0); if (!jrpriv->irq) { dev_err(jrdev, "irq_of_parse_and_map failed\n"); return -EINVAL; } error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping, (void *)(unsigned long)jrpriv->irq); if (error) return error; /* Now do the platform independent part */ error = caam_jr_init(jrdev); /* now turn on hardware */ if (error) return error; jrpriv->dev = jrdev; spin_lock(&driver_data.jr_alloc_lock); list_add_tail(&jrpriv->list_node, &driver_data.jr_list); spin_unlock(&driver_data.jr_alloc_lock); atomic_set(&jrpriv->tfm_count, 0); device_init_wakeup(&pdev->dev, 1); device_set_wakeup_enable(&pdev->dev, false); register_algs(jrpriv, jrdev->parent); return 0; } static void caam_jr_get_hw_state(struct device *dev) { struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); jrp->state.inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); jrp->state.outbusaddr = rd_reg64(&jrp->rregs->outring_base); } static int caam_jr_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev); struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent); struct caam_jr_dequeue_params suspend_params = { .dev = dev, .enable_itr = 0, }; /* Remove the node from Physical JobR list maintained by driver */ spin_lock(&driver_data.jr_alloc_lock); list_del(&jrpriv->list_node); spin_unlock(&driver_data.jr_alloc_lock); if (jrpriv->hwrng) caam_rng_exit(dev->parent); if (ctrlpriv->caam_off_during_pm) { int err; tasklet_disable(&jrpriv->irqtask); /* mask itr to call flush */ clrsetbits_32(&jrpriv->rregs->rconfig_lo, 0, JRCFG_IMSK); /* Invalid job in process */ err = caam_jr_flush(dev); if (err) { dev_err(dev, "Failed to flush\n"); return err; } /* Dequeing jobs flushed */ caam_jr_dequeue((unsigned long)&suspend_params); /* Save state */ caam_jr_get_hw_state(dev); } else if (device_may_wakeup(&pdev->dev)) { enable_irq_wake(jrpriv->irq); } return 0; } static int caam_jr_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev); struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent); if (ctrlpriv->caam_off_during_pm) { u64 inp_addr; int err; /* * Check if the CAAM has been resetted checking the address of * the input ring */ inp_addr = rd_reg64(&jrpriv->rregs->inpring_base); if (inp_addr != 0) { /* JR still has some configuration */ if (inp_addr == jrpriv->state.inpbusaddr) { /* JR has not been resetted */ err = caam_jr_restart_processing(dev); if (err) { dev_err(dev, "Restart processing failed\n"); return err; } tasklet_enable(&jrpriv->irqtask); clrsetbits_32(&jrpriv->rregs->rconfig_lo, JRCFG_IMSK, 0); goto add_jr; } else if (ctrlpriv->optee_en) { /* JR has been used by OPTEE, reset it */ err = caam_reset_hw_jr(dev); if (err) { dev_err(dev, "Failed to reset JR\n"); return err; } } else { /* No explanation, return error */ return -EIO; } } caam_jr_reset_index(jrpriv); caam_jr_init_hw(dev, jrpriv->state.inpbusaddr, jrpriv->state.outbusaddr); tasklet_enable(&jrpriv->irqtask); } else if (device_may_wakeup(&pdev->dev)) { disable_irq_wake(jrpriv->irq); } add_jr: spin_lock(&driver_data.jr_alloc_lock); list_add_tail(&jrpriv->list_node, &driver_data.jr_list); spin_unlock(&driver_data.jr_alloc_lock); if (jrpriv->hwrng) jrpriv->hwrng = !caam_rng_init(dev->parent); return 0; } static DEFINE_SIMPLE_DEV_PM_OPS(caam_jr_pm_ops, caam_jr_suspend, caam_jr_resume); static const struct of_device_id caam_jr_match[] = { { .compatible = "fsl,sec-v4.0-job-ring", }, { .compatible = "fsl,sec4.0-job-ring", }, {}, }; MODULE_DEVICE_TABLE(of, caam_jr_match); static struct platform_driver caam_jr_driver = { .driver = { .name = "caam_jr", .of_match_table = caam_jr_match, .pm = pm_ptr(&caam_jr_pm_ops), }, .probe = caam_jr_probe, .remove = caam_jr_remove, .shutdown = caam_jr_platform_shutdown, }; static int __init jr_driver_init(void) { spin_lock_init(&driver_data.jr_alloc_lock); INIT_LIST_HEAD(&driver_data.jr_list); return platform_driver_register(&caam_jr_driver); } static void __exit jr_driver_exit(void) { platform_driver_unregister(&caam_jr_driver); } module_init(jr_driver_init); module_exit(jr_driver_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("FSL CAAM JR request backend"); MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
linux-master
drivers/crypto/caam/jr.c
// SPDX-License-Identifier: GPL-2.0+ /* * CAAM control-plane driver backend * Controller-level driver, kernel property detection, initialization * * Copyright 2008-2012 Freescale Semiconductor, Inc. * Copyright 2018-2019, 2023 NXP */ #include <linux/device.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/sys_soc.h> #include <linux/fsl/mc.h> #include "compat.h" #include "debugfs.h" #include "regs.h" #include "intern.h" #include "jr.h" #include "desc_constr.h" #include "ctrl.h" bool caam_dpaa2; EXPORT_SYMBOL(caam_dpaa2); #ifdef CONFIG_CAAM_QI #include "qi.h" #endif /* * Descriptor to instantiate RNG State Handle 0 in normal mode and * load the JDKEK, TDKEK and TDSK registers */ static void build_instantiation_desc(u32 *desc, int handle, int do_sk) { u32 *jump_cmd, op_flags; init_job_desc(desc, 0); op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT | OP_ALG_PR_ON; /* INIT RNG in non-test mode */ append_operation(desc, op_flags); if (!handle && do_sk) { /* * For SH0, Secure Keys must be generated as well */ /* wait for done */ jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1); set_jump_tgt_here(desc, jump_cmd); /* * load 1 to clear written reg: * resets the done interrupt and returns the RNG to idle. */ append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW); /* Initialize State Handle */ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | OP_ALG_AAI_RNG4_SK); } append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); } /* Descriptor for deinstantiation of State Handle 0 of the RNG block. */ static void build_deinstantiation_desc(u32 *desc, int handle) { init_job_desc(desc, 0); /* Uninstantiate State Handle 0 */ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL); append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); } static const struct of_device_id imx8m_machine_match[] = { { .compatible = "fsl,imx8mm", }, { .compatible = "fsl,imx8mn", }, { .compatible = "fsl,imx8mp", }, { .compatible = "fsl,imx8mq", }, { .compatible = "fsl,imx8ulp", }, { } }; /* * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of * the software (no JR/QI used). * @ctrldev - pointer to device * @status - descriptor status, after being run * * Return: - 0 if no error occurred * - -ENODEV if the DECO couldn't be acquired * - -EAGAIN if an error occurred while executing the descriptor */ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc, u32 *status) { struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; struct caam_deco __iomem *deco = ctrlpriv->deco; unsigned int timeout = 100000; u32 deco_dbg_reg, deco_state, flags; int i; if (ctrlpriv->virt_en == 1 || /* * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1 * and the following steps should be performed regardless */ of_match_node(imx8m_machine_match, of_root)) { clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0); while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) && --timeout) cpu_relax(); timeout = 100000; } clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE); while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) && --timeout) cpu_relax(); if (!timeout) { dev_err(ctrldev, "failed to acquire DECO 0\n"); clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0); return -ENODEV; } for (i = 0; i < desc_len(desc); i++) wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i))); flags = DECO_JQCR_WHL; /* * If the descriptor length is longer than 4 words, then the * FOUR bit in JRCTRL register must be set. */ if (desc_len(desc) >= 4) flags |= DECO_JQCR_FOUR; /* Instruct the DECO to execute it */ clrsetbits_32(&deco->jr_ctl_hi, 0, flags); timeout = 10000000; do { deco_dbg_reg = rd_reg32(&deco->desc_dbg); if (ctrlpriv->era < 10) deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >> DESC_DBG_DECO_STAT_SHIFT; else deco_state = (rd_reg32(&deco->dbg_exec) & DESC_DER_DECO_STAT_MASK) >> DESC_DER_DECO_STAT_SHIFT; /* * If an error occurred in the descriptor, then * the DECO status field will be set to 0x0D */ if (deco_state == DECO_STAT_HOST_ERR) break; cpu_relax(); } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout); *status = rd_reg32(&deco->op_status_hi) & DECO_OP_STATUS_HI_ERR_MASK; if (ctrlpriv->virt_en == 1) clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0); /* Mark the DECO as free */ clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0); if (!timeout) return -EAGAIN; return 0; } /* * deinstantiate_rng - builds and executes a descriptor on DECO0, * which deinitializes the RNG block. * @ctrldev - pointer to device * @state_handle_mask - bitmask containing the instantiation status * for the RNG4 state handles which exist in * the RNG4 block: 1 if it's been instantiated * * Return: - 0 if no error occurred * - -ENOMEM if there isn't enough memory to allocate the descriptor * - -ENODEV if DECO0 couldn't be acquired * - -EAGAIN if an error occurred when executing the descriptor */ static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask) { u32 *desc, status; int sh_idx, ret = 0; desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL); if (!desc) return -ENOMEM; for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { /* * If the corresponding bit is set, then it means the state * handle was initialized by us, and thus it needs to be * deinitialized as well */ if ((1 << sh_idx) & state_handle_mask) { /* * Create the descriptor for deinstantating this state * handle */ build_deinstantiation_desc(desc, sh_idx); /* Try to run it through DECO0 */ ret = run_descriptor_deco0(ctrldev, desc, &status); if (ret || (status && status != JRSTA_SSRC_JUMP_HALT_CC)) { dev_err(ctrldev, "Failed to deinstantiate RNG4 SH%d\n", sh_idx); break; } dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx); } } kfree(desc); return ret; } static void devm_deinstantiate_rng(void *data) { struct device *ctrldev = data; struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); /* * De-initialize RNG state handles initialized by this driver. * In case of SoCs with Management Complex, RNG is managed by MC f/w. */ if (ctrlpriv->rng4_sh_init) deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init); } /* * instantiate_rng - builds and executes a descriptor on DECO0, * which initializes the RNG block. * @ctrldev - pointer to device * @state_handle_mask - bitmask containing the instantiation status * for the RNG4 state handles which exist in * the RNG4 block: 1 if it's been instantiated * by an external entry, 0 otherwise. * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK; * Caution: this can be done only once; if the keys need to be * regenerated, a POR is required * * Return: - 0 if no error occurred * - -ENOMEM if there isn't enough memory to allocate the descriptor * - -ENODEV if DECO0 couldn't be acquired * - -EAGAIN if an error occurred when executing the descriptor * f.i. there was a RNG hardware error due to not "good enough" * entropy being acquired. */ static int instantiate_rng(struct device *ctrldev, int state_handle_mask, int gen_sk) { struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); struct caam_ctrl __iomem *ctrl; u32 *desc, status = 0, rdsta_val; int ret = 0, sh_idx; ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL); if (!desc) return -ENOMEM; for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) { const u32 rdsta_if = RDSTA_IF0 << sh_idx; const u32 rdsta_pr = RDSTA_PR0 << sh_idx; const u32 rdsta_mask = rdsta_if | rdsta_pr; /* Clear the contents before using the descriptor */ memset(desc, 0x00, CAAM_CMD_SZ * 7); /* * If the corresponding bit is set, this state handle * was initialized by somebody else, so it's left alone. */ if (rdsta_if & state_handle_mask) { if (rdsta_pr & state_handle_mask) continue; dev_info(ctrldev, "RNG4 SH%d was previously instantiated without prediction resistance. Tearing it down\n", sh_idx); ret = deinstantiate_rng(ctrldev, rdsta_if); if (ret) break; } /* Create the descriptor for instantiating RNG State Handle */ build_instantiation_desc(desc, sh_idx, gen_sk); /* Try to run it through DECO0 */ ret = run_descriptor_deco0(ctrldev, desc, &status); /* * If ret is not 0, or descriptor status is not 0, then * something went wrong. No need to try the next state * handle (if available), bail out here. * Also, if for some reason, the State Handle didn't get * instantiated although the descriptor has finished * without any error (HW optimizations for later * CAAM eras), then try again. */ if (ret) break; rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK; if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) || (rdsta_val & rdsta_mask) != rdsta_mask) { ret = -EAGAIN; break; } dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx); } kfree(desc); if (ret) return ret; return devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng, ctrldev); } /* * kick_trng - sets the various parameters for enabling the initialization * of the RNG4 block in CAAM * @dev - pointer to the controller device * @ent_delay - Defines the length (in system clocks) of each entropy sample. */ static void kick_trng(struct device *dev, int ent_delay) { struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); struct caam_ctrl __iomem *ctrl; struct rng4tst __iomem *r4tst; u32 val, rtsdctl; ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl; r4tst = &ctrl->r4tst[0]; /* * Setting both RTMCTL:PRGM and RTMCTL:TRNG_ACC causes TRNG to * properly invalidate the entropy in the entropy register and * force re-generation. */ clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM | RTMCTL_ACC); /* * Performance-wise, it does not make sense to * set the delay to a value that is lower * than the last one that worked (i.e. the state handles * were instantiated properly). */ rtsdctl = rd_reg32(&r4tst->rtsdctl); val = (rtsdctl & RTSDCTL_ENT_DLY_MASK) >> RTSDCTL_ENT_DLY_SHIFT; if (ent_delay > val) { val = ent_delay; /* min. freq. count, equal to 1/4 of the entropy sample length */ wr_reg32(&r4tst->rtfrqmin, val >> 2); /* disable maximum frequency count */ wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE); } wr_reg32(&r4tst->rtsdctl, (val << RTSDCTL_ENT_DLY_SHIFT) | RTSDCTL_SAMP_SIZE_VAL); /* * To avoid reprogramming the self-test parameters over and over again, * use RTSDCTL[SAMP_SIZE] as an indicator. */ if ((rtsdctl & RTSDCTL_SAMP_SIZE_MASK) != RTSDCTL_SAMP_SIZE_VAL) { wr_reg32(&r4tst->rtscmisc, (2 << 16) | 32); wr_reg32(&r4tst->rtpkrrng, 570); wr_reg32(&r4tst->rtpkrmax, 1600); wr_reg32(&r4tst->rtscml, (122 << 16) | 317); wr_reg32(&r4tst->rtscrl[0], (80 << 16) | 107); wr_reg32(&r4tst->rtscrl[1], (57 << 16) | 62); wr_reg32(&r4tst->rtscrl[2], (39 << 16) | 39); wr_reg32(&r4tst->rtscrl[3], (27 << 16) | 26); wr_reg32(&r4tst->rtscrl[4], (19 << 16) | 18); wr_reg32(&r4tst->rtscrl[5], (18 << 16) | 17); } /* * select raw sampling in both entropy shifter * and statistical checker; ; put RNG4 into run mode */ clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM | RTMCTL_ACC, RTMCTL_SAMP_MODE_RAW_ES_SC); } static int caam_get_era_from_hw(struct caam_perfmon __iomem *perfmon) { static const struct { u16 ip_id; u8 maj_rev; u8 era; } id[] = { {0x0A10, 1, 1}, {0x0A10, 2, 2}, {0x0A12, 1, 3}, {0x0A14, 1, 3}, {0x0A14, 2, 4}, {0x0A16, 1, 4}, {0x0A10, 3, 4}, {0x0A11, 1, 4}, {0x0A18, 1, 4}, {0x0A11, 2, 5}, {0x0A12, 2, 5}, {0x0A13, 1, 5}, {0x0A1C, 1, 5} }; u32 ccbvid, id_ms; u8 maj_rev, era; u16 ip_id; int i; ccbvid = rd_reg32(&perfmon->ccb_id); era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT; if (era) /* This is '0' prior to CAAM ERA-6 */ return era; id_ms = rd_reg32(&perfmon->caam_id_ms); ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT; maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT; for (i = 0; i < ARRAY_SIZE(id); i++) if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev) return id[i].era; return -ENOTSUPP; } /** * caam_get_era() - Return the ERA of the SEC on SoC, based * on "sec-era" optional property in the DTS. This property is updated * by u-boot. * In case this property is not passed an attempt to retrieve the CAAM * era via register reads will be made. * * @perfmon: Performance Monitor Registers */ static int caam_get_era(struct caam_perfmon __iomem *perfmon) { struct device_node *caam_node; int ret; u32 prop; caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop); of_node_put(caam_node); if (!ret) return prop; else return caam_get_era_from_hw(perfmon); } /* * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6QP) * have an issue wherein AXI bus transactions may not occur in the correct * order. This isn't a problem running single descriptors, but can be if * running multiple concurrent descriptors. Reworking the driver to throttle * to single requests is impractical, thus the workaround is to limit the AXI * pipeline to a depth of 1 (from it's default of 4) to preclude this situation * from occurring. */ static void handle_imx6_err005766(u32 __iomem *mcr) { if (of_machine_is_compatible("fsl,imx6q") || of_machine_is_compatible("fsl,imx6dl") || of_machine_is_compatible("fsl,imx6qp")) clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK, 1 << MCFGR_AXIPIPE_SHIFT); } static const struct of_device_id caam_match[] = { { .compatible = "fsl,sec-v4.0", }, { .compatible = "fsl,sec4.0", }, {}, }; MODULE_DEVICE_TABLE(of, caam_match); struct caam_imx_data { const struct clk_bulk_data *clks; int num_clks; }; static const struct clk_bulk_data caam_imx6_clks[] = { { .id = "ipg" }, { .id = "mem" }, { .id = "aclk" }, { .id = "emi_slow" }, }; static const struct caam_imx_data caam_imx6_data = { .clks = caam_imx6_clks, .num_clks = ARRAY_SIZE(caam_imx6_clks), }; static const struct clk_bulk_data caam_imx7_clks[] = { { .id = "ipg" }, { .id = "aclk" }, }; static const struct caam_imx_data caam_imx7_data = { .clks = caam_imx7_clks, .num_clks = ARRAY_SIZE(caam_imx7_clks), }; static const struct clk_bulk_data caam_imx6ul_clks[] = { { .id = "ipg" }, { .id = "mem" }, { .id = "aclk" }, }; static const struct caam_imx_data caam_imx6ul_data = { .clks = caam_imx6ul_clks, .num_clks = ARRAY_SIZE(caam_imx6ul_clks), }; static const struct clk_bulk_data caam_vf610_clks[] = { { .id = "ipg" }, }; static const struct caam_imx_data caam_vf610_data = { .clks = caam_vf610_clks, .num_clks = ARRAY_SIZE(caam_vf610_clks), }; static const struct soc_device_attribute caam_imx_soc_table[] = { { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data }, { .soc_id = "i.MX6*", .data = &caam_imx6_data }, { .soc_id = "i.MX7*", .data = &caam_imx7_data }, { .soc_id = "i.MX8M*", .data = &caam_imx7_data }, { .soc_id = "VF*", .data = &caam_vf610_data }, { .family = "Freescale i.MX" }, { /* sentinel */ } }; static void disable_clocks(void *data) { struct caam_drv_private *ctrlpriv = data; clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks); } static int init_clocks(struct device *dev, const struct caam_imx_data *data) { struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); int ret; ctrlpriv->num_clks = data->num_clks; ctrlpriv->clks = devm_kmemdup(dev, data->clks, data->num_clks * sizeof(data->clks[0]), GFP_KERNEL); if (!ctrlpriv->clks) return -ENOMEM; ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks); if (ret) { dev_err(dev, "Failed to request all necessary clocks\n"); return ret; } ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks); if (ret) { dev_err(dev, "Failed to prepare/enable all necessary clocks\n"); return ret; } return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv); } static void caam_remove_debugfs(void *root) { debugfs_remove_recursive(root); } #ifdef CONFIG_FSL_MC_BUS static bool check_version(struct fsl_mc_version *mc_version, u32 major, u32 minor, u32 revision) { if (mc_version->major > major) return true; if (mc_version->major == major) { if (mc_version->minor > minor) return true; if (mc_version->minor == minor && mc_version->revision > revision) return true; } return false; } #endif static bool needs_entropy_delay_adjustment(void) { if (of_machine_is_compatible("fsl,imx6sx")) return true; return false; } static int caam_ctrl_rng_init(struct device *dev) { struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; int ret, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; u8 rng_vid; if (ctrlpriv->era < 10) { struct caam_perfmon __iomem *perfmon; perfmon = ctrlpriv->total_jobrs ? (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon : (struct caam_perfmon __iomem *)&ctrl->perfmon; rng_vid = (rd_reg32(&perfmon->cha_id_ls) & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; } else { struct version_regs __iomem *vreg; vreg = ctrlpriv->total_jobrs ? (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg : (struct version_regs __iomem *)&ctrl->vreg; rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; } /* * If SEC has RNG version >= 4 and RNG state handle has not been * already instantiated, do RNG instantiation * In case of SoCs with Management Complex, RNG is managed by MC f/w. */ if (!(ctrlpriv->mc_en && ctrlpriv->pr_support) && rng_vid >= 4) { ctrlpriv->rng4_sh_init = rd_reg32(&ctrl->r4tst[0].rdsta); /* * If the secure keys (TDKEK, JDKEK, TDSK), were already * generated, signal this to the function that is instantiating * the state handles. An error would occur if RNG4 attempts * to regenerate these keys before the next POR. */ gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1; ctrlpriv->rng4_sh_init &= RDSTA_MASK; do { int inst_handles = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK; /* * If either SH were instantiated by somebody else * (e.g. u-boot) then it is assumed that the entropy * parameters are properly set and thus the function * setting these (kick_trng(...)) is skipped. * Also, if a handle was instantiated, do not change * the TRNG parameters. */ if (needs_entropy_delay_adjustment()) ent_delay = 12000; if (!(ctrlpriv->rng4_sh_init || inst_handles)) { dev_info(dev, "Entropy delay = %u\n", ent_delay); kick_trng(dev, ent_delay); ent_delay += 400; } /* * if instantiate_rng(...) fails, the loop will rerun * and the kick_trng(...) function will modify the * upper and lower limits of the entropy sampling * interval, leading to a successful initialization of * the RNG. */ ret = instantiate_rng(dev, inst_handles, gen_sk); /* * Entropy delay is determined via TRNG characterization. * TRNG characterization is run across different voltages * and temperatures. * If worst case value for ent_dly is identified, * the loop can be skipped for that platform. */ if (needs_entropy_delay_adjustment()) break; if (ret == -EAGAIN) /* * if here, the loop will rerun, * so don't hog the CPU */ cpu_relax(); } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX)); if (ret) { dev_err(dev, "failed to instantiate RNG"); return ret; } /* * Set handles initialized by this module as the complement of * the already initialized ones */ ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK; /* Enable RDB bit so that RNG works faster */ clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE); } return 0; } /* Indicate if the internal state of the CAAM is lost during PM */ static int caam_off_during_pm(void) { bool not_off_during_pm = of_machine_is_compatible("fsl,imx6q") || of_machine_is_compatible("fsl,imx6qp") || of_machine_is_compatible("fsl,imx6dl"); return not_off_during_pm ? 0 : 1; } static void caam_state_save(struct device *dev) { struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); struct caam_ctl_state *state = &ctrlpriv->state; struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; u32 deco_inst, jr_inst; int i; state->mcr = rd_reg32(&ctrl->mcr); state->scfgr = rd_reg32(&ctrl->scfgr); deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) & CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT; for (i = 0; i < deco_inst; i++) { state->deco_mid[i].liodn_ms = rd_reg32(&ctrl->deco_mid[i].liodn_ms); state->deco_mid[i].liodn_ls = rd_reg32(&ctrl->deco_mid[i].liodn_ls); } jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) & CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT; for (i = 0; i < jr_inst; i++) { state->jr_mid[i].liodn_ms = rd_reg32(&ctrl->jr_mid[i].liodn_ms); state->jr_mid[i].liodn_ls = rd_reg32(&ctrl->jr_mid[i].liodn_ls); } } static void caam_state_restore(const struct device *dev) { const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); const struct caam_ctl_state *state = &ctrlpriv->state; struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl; u32 deco_inst, jr_inst; int i; wr_reg32(&ctrl->mcr, state->mcr); wr_reg32(&ctrl->scfgr, state->scfgr); deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) & CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT; for (i = 0; i < deco_inst; i++) { wr_reg32(&ctrl->deco_mid[i].liodn_ms, state->deco_mid[i].liodn_ms); wr_reg32(&ctrl->deco_mid[i].liodn_ls, state->deco_mid[i].liodn_ls); } jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) & CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT; for (i = 0; i < jr_inst; i++) { wr_reg32(&ctrl->jr_mid[i].liodn_ms, state->jr_mid[i].liodn_ms); wr_reg32(&ctrl->jr_mid[i].liodn_ls, state->jr_mid[i].liodn_ls); } if (ctrlpriv->virt_en == 1) clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START | JRSTART_JR1_START | JRSTART_JR2_START | JRSTART_JR3_START); } static int caam_ctrl_suspend(struct device *dev) { const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) caam_state_save(dev); return 0; } static int caam_ctrl_resume(struct device *dev) { struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev); int ret = 0; if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) { caam_state_restore(dev); /* HW and rng will be reset so deinstantiation can be removed */ devm_remove_action(dev, devm_deinstantiate_rng, dev); ret = caam_ctrl_rng_init(dev); } return ret; } static DEFINE_SIMPLE_DEV_PM_OPS(caam_ctrl_pm_ops, caam_ctrl_suspend, caam_ctrl_resume); /* Probe routine for CAAM top (controller) level */ static int caam_probe(struct platform_device *pdev) { int ret, ring; u64 caam_id; const struct soc_device_attribute *imx_soc_match; struct device *dev; struct device_node *nprop, *np; struct caam_ctrl __iomem *ctrl; struct caam_drv_private *ctrlpriv; struct caam_perfmon __iomem *perfmon; struct dentry *dfs_root; u32 scfgr, comp_params; int pg_size; int BLOCK_OFFSET = 0; bool reg_access = true; ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL); if (!ctrlpriv) return -ENOMEM; dev = &pdev->dev; dev_set_drvdata(dev, ctrlpriv); nprop = pdev->dev.of_node; imx_soc_match = soc_device_match(caam_imx_soc_table); if (!imx_soc_match && of_match_node(imx8m_machine_match, of_root)) return -EPROBE_DEFER; caam_imx = (bool)imx_soc_match; ctrlpriv->caam_off_during_pm = caam_imx && caam_off_during_pm(); if (imx_soc_match) { /* * Until Layerscape and i.MX OP-TEE get in sync, * only i.MX OP-TEE use cases disallow access to * caam page 0 (controller) registers. */ np = of_find_compatible_node(NULL, NULL, "linaro,optee-tz"); ctrlpriv->optee_en = !!np; of_node_put(np); reg_access = !ctrlpriv->optee_en; if (!imx_soc_match->data) { dev_err(dev, "No clock data provided for i.MX SoC"); return -EINVAL; } ret = init_clocks(dev, imx_soc_match->data); if (ret) return ret; } /* Get configuration properties from device tree */ /* First, get register page */ ctrl = devm_of_iomap(dev, nprop, 0, NULL); ret = PTR_ERR_OR_ZERO(ctrl); if (ret) { dev_err(dev, "caam: of_iomap() failed\n"); return ret; } ring = 0; for_each_available_child_of_node(nprop, np) if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") || of_device_is_compatible(np, "fsl,sec4.0-job-ring")) { u32 reg; if (of_property_read_u32_index(np, "reg", 0, &reg)) { dev_err(dev, "%s read reg property error\n", np->full_name); continue; } ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *) ((__force uint8_t *)ctrl + reg); ctrlpriv->total_jobrs++; ring++; } /* * Wherever possible, instead of accessing registers from the global page, * use the alias registers in the first (cf. DT nodes order) * job ring's page. */ perfmon = ring ? (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon : (struct caam_perfmon __iomem *)&ctrl->perfmon; caam_little_end = !(bool)(rd_reg32(&perfmon->status) & (CSTA_PLEND | CSTA_ALT_PLEND)); comp_params = rd_reg32(&perfmon->comp_parms_ms); if (reg_access && comp_params & CTPR_MS_PS && rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR) caam_ptr_sz = sizeof(u64); else caam_ptr_sz = sizeof(u32); caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2); ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK); #ifdef CONFIG_CAAM_QI /* If (DPAA 1.x) QI present, check whether dependencies are available */ if (ctrlpriv->qi_present && !caam_dpaa2) { ret = qman_is_probed(); if (!ret) { return -EPROBE_DEFER; } else if (ret < 0) { dev_err(dev, "failing probe due to qman probe error\n"); return -ENODEV; } ret = qman_portals_probed(); if (!ret) { return -EPROBE_DEFER; } else if (ret < 0) { dev_err(dev, "failing probe due to qman portals probe error\n"); return -ENODEV; } } #endif /* Allocating the BLOCK_OFFSET based on the supported page size on * the platform */ pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT; if (pg_size == 0) BLOCK_OFFSET = PG_SIZE_4K; else BLOCK_OFFSET = PG_SIZE_64K; ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl; ctrlpriv->assure = (struct caam_assurance __iomem __force *) ((__force uint8_t *)ctrl + BLOCK_OFFSET * ASSURE_BLOCK_NUMBER ); ctrlpriv->deco = (struct caam_deco __iomem __force *) ((__force uint8_t *)ctrl + BLOCK_OFFSET * DECO_BLOCK_NUMBER ); /* Get the IRQ of the controller (for security violations only) */ ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0); np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc"); ctrlpriv->mc_en = !!np; of_node_put(np); #ifdef CONFIG_FSL_MC_BUS if (ctrlpriv->mc_en) { struct fsl_mc_version *mc_version; mc_version = fsl_mc_get_version(); if (mc_version) ctrlpriv->pr_support = check_version(mc_version, 10, 20, 0); else return -EPROBE_DEFER; } #endif if (!reg_access) goto set_dma_mask; /* * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel, * long pointers in master configuration register. * In case of SoCs with Management Complex, MC f/w performs * the configuration. */ if (!ctrlpriv->mc_en) clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST); handle_imx6_err005766(&ctrl->mcr); /* * Read the Compile Time parameters and SCFGR to determine * if virtualization is enabled for this platform */ scfgr = rd_reg32(&ctrl->scfgr); ctrlpriv->virt_en = 0; if (comp_params & CTPR_MS_VIRT_EN_INCL) { /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1 */ if ((comp_params & CTPR_MS_VIRT_EN_POR) || (!(comp_params & CTPR_MS_VIRT_EN_POR) && (scfgr & SCFGR_VIRT_EN))) ctrlpriv->virt_en = 1; } else { /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */ if (comp_params & CTPR_MS_VIRT_EN_POR) ctrlpriv->virt_en = 1; } if (ctrlpriv->virt_en == 1) clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START | JRSTART_JR1_START | JRSTART_JR2_START | JRSTART_JR3_START); set_dma_mask: ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev)); if (ret) { dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret); return ret; } ctrlpriv->era = caam_get_era(perfmon); ctrlpriv->domain = iommu_get_domain_for_dev(dev); dfs_root = debugfs_create_dir(dev_name(dev), NULL); if (IS_ENABLED(CONFIG_DEBUG_FS)) { ret = devm_add_action_or_reset(dev, caam_remove_debugfs, dfs_root); if (ret) return ret; } caam_debugfs_init(ctrlpriv, perfmon, dfs_root); /* Check to see if (DPAA 1.x) QI present. If so, enable */ if (ctrlpriv->qi_present && !caam_dpaa2) { ctrlpriv->qi = (struct caam_queue_if __iomem __force *) ((__force uint8_t *)ctrl + BLOCK_OFFSET * QI_BLOCK_NUMBER ); /* This is all that's required to physically enable QI */ wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN); /* If QMAN driver is present, init CAAM-QI backend */ #ifdef CONFIG_CAAM_QI ret = caam_qi_init(pdev); if (ret) dev_err(dev, "caam qi i/f init failed: %d\n", ret); #endif } /* If no QI and no rings specified, quit and go home */ if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) { dev_err(dev, "no queues configured, terminating\n"); return -ENOMEM; } comp_params = rd_reg32(&perfmon->comp_parms_ls); ctrlpriv->blob_present = !!(comp_params & CTPR_LS_BLOB); /* * Some SoCs like the LS1028A (non-E) indicate CTPR_LS_BLOB support, * but fail when actually using it due to missing AES support, so * check both here. */ if (ctrlpriv->era < 10) { ctrlpriv->blob_present = ctrlpriv->blob_present && (rd_reg32(&perfmon->cha_num_ls) & CHA_ID_LS_AES_MASK); } else { struct version_regs __iomem *vreg; vreg = ctrlpriv->total_jobrs ? (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg : (struct version_regs __iomem *)&ctrl->vreg; ctrlpriv->blob_present = ctrlpriv->blob_present && (rd_reg32(&vreg->aesa) & CHA_VER_MISC_AES_NUM_MASK); } if (reg_access) { ret = caam_ctrl_rng_init(dev); if (ret) return ret; } caam_id = (u64)rd_reg32(&perfmon->caam_id_ms) << 32 | (u64)rd_reg32(&perfmon->caam_id_ls); /* Report "alive" for developer to see */ dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id, ctrlpriv->era); dev_info(dev, "job rings = %d, qi = %d\n", ctrlpriv->total_jobrs, ctrlpriv->qi_present); ret = devm_of_platform_populate(dev); if (ret) dev_err(dev, "JR platform devices creation error\n"); return ret; } static struct platform_driver caam_driver = { .driver = { .name = "caam", .of_match_table = caam_match, .pm = pm_ptr(&caam_ctrl_pm_ops), }, .probe = caam_probe, }; module_platform_driver(caam_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("FSL CAAM request backend"); MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
linux-master
drivers/crypto/caam/ctrl.c
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* Copyright 2019, 2023 NXP */ #include <linux/debugfs.h> #include "compat.h" #include "debugfs.h" #include "regs.h" #include "intern.h" static int caam_debugfs_u64_get(void *data, u64 *val) { *val = caam64_to_cpu(*(u64 *)data); return 0; } static int caam_debugfs_u32_get(void *data, u64 *val) { *val = caam32_to_cpu(*(u32 *)data); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u32_ro, caam_debugfs_u32_get, NULL, "%llu\n"); DEFINE_DEBUGFS_ATTRIBUTE(caam_fops_u64_ro, caam_debugfs_u64_get, NULL, "%llu\n"); #ifdef CONFIG_CAAM_QI /* * This is a counter for the number of times the congestion group (where all * the request and response queueus are) reached congestion. Incremented * each time the congestion callback is called with congested == true. */ static u64 times_congested; void caam_debugfs_qi_congested(void) { times_congested++; } void caam_debugfs_qi_init(struct caam_drv_private *ctrlpriv) { debugfs_create_file("qi_congested", 0444, ctrlpriv->ctl, &times_congested, &caam_fops_u64_ro); } #endif void caam_debugfs_init(struct caam_drv_private *ctrlpriv, struct caam_perfmon __force *perfmon, struct dentry *root) { /* * FIXME: needs better naming distinction, as some amalgamation of * "caam" and nprop->full_name. The OF name isn't distinctive, * but does separate instances */ ctrlpriv->ctl = debugfs_create_dir("ctl", root); debugfs_create_file("rq_dequeued", 0444, ctrlpriv->ctl, &perfmon->req_dequeued, &caam_fops_u64_ro); debugfs_create_file("ob_rq_encrypted", 0444, ctrlpriv->ctl, &perfmon->ob_enc_req, &caam_fops_u64_ro); debugfs_create_file("ib_rq_decrypted", 0444, ctrlpriv->ctl, &perfmon->ib_dec_req, &caam_fops_u64_ro); debugfs_create_file("ob_bytes_encrypted", 0444, ctrlpriv->ctl, &perfmon->ob_enc_bytes, &caam_fops_u64_ro); debugfs_create_file("ob_bytes_protected", 0444, ctrlpriv->ctl, &perfmon->ob_prot_bytes, &caam_fops_u64_ro); debugfs_create_file("ib_bytes_decrypted", 0444, ctrlpriv->ctl, &perfmon->ib_dec_bytes, &caam_fops_u64_ro); debugfs_create_file("ib_bytes_validated", 0444, ctrlpriv->ctl, &perfmon->ib_valid_bytes, &caam_fops_u64_ro); /* Controller level - global status values */ debugfs_create_file("fault_addr", 0444, ctrlpriv->ctl, &perfmon->faultaddr, &caam_fops_u32_ro); debugfs_create_file("fault_detail", 0444, ctrlpriv->ctl, &perfmon->faultdetail, &caam_fops_u32_ro); debugfs_create_file("fault_status", 0444, ctrlpriv->ctl, &perfmon->status, &caam_fops_u32_ro); if (ctrlpriv->optee_en) return; /* Internal covering keys (useful in non-secure mode only) */ ctrlpriv->ctl_kek_wrap.data = (__force void *)&ctrlpriv->ctrl->kek[0]; ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32); debugfs_create_blob("kek", 0444, ctrlpriv->ctl, &ctrlpriv->ctl_kek_wrap); ctrlpriv->ctl_tkek_wrap.data = (__force void *)&ctrlpriv->ctrl->tkek[0]; ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32); debugfs_create_blob("tkek", 0444, ctrlpriv->ctl, &ctrlpriv->ctl_tkek_wrap); ctrlpriv->ctl_tdsk_wrap.data = (__force void *)&ctrlpriv->ctrl->tdsk[0]; ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32); debugfs_create_blob("tdsk", 0444, ctrlpriv->ctl, &ctrlpriv->ctl_tdsk_wrap); }
linux-master
drivers/crypto/caam/debugfs.c
// SPDX-License-Identifier: GPL-2.0 /* * CAAM/SEC 4.x functions for handling key-generation jobs * * Copyright 2008-2011 Freescale Semiconductor, Inc. * */ #include "compat.h" #include "jr.h" #include "error.h" #include "desc_constr.h" #include "key_gen.h" void split_key_done(struct device *dev, u32 *desc, u32 err, void *context) { struct split_key_result *res = context; int ecode = 0; dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); if (err) ecode = caam_jr_strstatus(dev, err); res->err = ecode; complete(&res->completion); } EXPORT_SYMBOL(split_key_done); /* get a split ipad/opad key Split key generation----------------------------------------------- [00] 0xb0810008 jobdesc: stidx=1 share=never len=8 [01] 0x04000014 key: class2->keyreg len=20 @0xffe01000 [03] 0x84410014 operation: cls2-op sha1 hmac init dec [04] 0x24940000 fifold: class2 msgdata-last2 len=0 imm [05] 0xa4000001 jump: class2 local all ->1 [06] [06] 0x64260028 fifostr: class2 mdsplit-jdk len=40 @0xffe04000 */ int gen_split_key(struct device *jrdev, u8 *key_out, struct alginfo * const adata, const u8 *key_in, u32 keylen, int max_keylen) { u32 *desc; struct split_key_result result; dma_addr_t dma_addr; unsigned int local_max; int ret = -ENOMEM; adata->keylen = split_key_len(adata->algtype & OP_ALG_ALGSEL_MASK); adata->keylen_pad = split_key_pad_len(adata->algtype & OP_ALG_ALGSEL_MASK); local_max = max(keylen, adata->keylen_pad); dev_dbg(jrdev, "split keylen %d split keylen padded %d\n", adata->keylen, adata->keylen_pad); print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key_in, keylen, 1); if (local_max > max_keylen) return -EINVAL; desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL); if (!desc) { dev_err(jrdev, "unable to allocate key input memory\n"); return ret; } memcpy(key_out, key_in, keylen); dma_addr = dma_map_single(jrdev, key_out, local_max, DMA_BIDIRECTIONAL); if (dma_mapping_error(jrdev, dma_addr)) { dev_err(jrdev, "unable to map key memory\n"); goto out_free; } init_job_desc(desc, 0); append_key(desc, dma_addr, keylen, CLASS_2 | KEY_DEST_CLASS_REG); /* Sets MDHA up into an HMAC-INIT */ append_operation(desc, (adata->algtype & OP_ALG_ALGSEL_MASK) | OP_ALG_AAI_HMAC | OP_TYPE_CLASS2_ALG | OP_ALG_DECRYPT | OP_ALG_AS_INIT); /* * do a FIFO_LOAD of zero, this will trigger the internal key expansion * into both pads inside MDHA */ append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST2); /* * FIFO_STORE with the explicit split-key content store * (0x26 output type) */ append_fifo_store(desc, dma_addr, adata->keylen, LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); result.err = 0; init_completion(&result.completion); ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (ret == -EINPROGRESS) { /* in progress */ wait_for_completion(&result.completion); ret = result.err; print_hex_dump_debug("ctx.key@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key_out, adata->keylen_pad, 1); } dma_unmap_single(jrdev, dma_addr, local_max, DMA_BIDIRECTIONAL); out_free: kfree(desc); return ret; } EXPORT_SYMBOL(gen_split_key);
linux-master
drivers/crypto/caam/key_gen.c
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* * Copyright 2015-2016 Freescale Semiconductor Inc. * Copyright 2017-2019 NXP */ #include "compat.h" #include "regs.h" #include "caamalg_qi2.h" #include "dpseci_cmd.h" #include "desc_constr.h" #include "error.h" #include "sg_sw_sec4.h" #include "sg_sw_qm2.h" #include "key_gen.h" #include "caamalg_desc.h" #include "caamhash_desc.h" #include "dpseci-debugfs.h" #include <linux/dma-mapping.h> #include <linux/fsl/mc.h> #include <linux/kernel.h> #include <soc/fsl/dpaa2-io.h> #include <soc/fsl/dpaa2-fd.h> #include <crypto/xts.h> #include <asm/unaligned.h> #define CAAM_CRA_PRIORITY 2000 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \ SHA512_DIGEST_SIZE * 2) /* * This is a cache of buffers, from which the users of CAAM QI driver * can allocate short buffers. It's speedier than doing kmalloc on the hotpath. * NOTE: A more elegant solution would be to have some headroom in the frames * being processed. This can be added by the dpaa2-eth driver. This would * pose a problem for userspace application processing which cannot * know of this limitation. So for now, this will work. * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here */ static struct kmem_cache *qi_cache; struct caam_alg_entry { struct device *dev; int class1_alg_type; int class2_alg_type; bool rfc3686; bool geniv; bool nodkp; }; struct caam_aead_alg { struct aead_alg aead; struct caam_alg_entry caam; bool registered; }; struct caam_skcipher_alg { struct skcipher_alg skcipher; struct caam_alg_entry caam; bool registered; }; /** * struct caam_ctx - per-session context * @flc: Flow Contexts array * @key: [authentication key], encryption key * @flc_dma: I/O virtual addresses of the Flow Contexts * @key_dma: I/O virtual address of the key * @dir: DMA direction for mapping key and Flow Contexts * @dev: dpseci device * @adata: authentication algorithm details * @cdata: encryption algorithm details * @authsize: authentication tag (a.k.a. ICV / MAC) size * @xts_key_fallback: true if fallback tfm needs to be used due * to unsupported xts key lengths * @fallback: xts fallback tfm */ struct caam_ctx { struct caam_flc flc[NUM_OP]; u8 key[CAAM_MAX_KEY_SIZE]; dma_addr_t flc_dma[NUM_OP]; dma_addr_t key_dma; enum dma_data_direction dir; struct device *dev; struct alginfo adata; struct alginfo cdata; unsigned int authsize; bool xts_key_fallback; struct crypto_skcipher *fallback; }; static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv, dma_addr_t iova_addr) { phys_addr_t phys_addr; phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) : iova_addr; return phys_to_virt(phys_addr); } /* * qi_cache_zalloc - Allocate buffers from CAAM-QI cache * * Allocate data on the hotpath. Instead of using kzalloc, one can use the * services of the CAAM QI memory cache (backed by kmem_cache). The buffers * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for * hosting 16 SG entries. * * @flags - flags that would be used for the equivalent kmalloc(..) call * * Returns a pointer to a retrieved buffer on success or NULL on failure. */ static inline void *qi_cache_zalloc(gfp_t flags) { return kmem_cache_zalloc(qi_cache, flags); } /* * qi_cache_free - Frees buffers allocated from CAAM-QI cache * * @obj - buffer previously allocated by qi_cache_zalloc * * No checking is being done, the call is a passthrough call to * kmem_cache_free(...) */ static inline void qi_cache_free(void *obj) { kmem_cache_free(qi_cache, obj); } static struct caam_request *to_caam_req(struct crypto_async_request *areq) { switch (crypto_tfm_alg_type(areq->tfm)) { case CRYPTO_ALG_TYPE_SKCIPHER: return skcipher_request_ctx_dma(skcipher_request_cast(areq)); case CRYPTO_ALG_TYPE_AEAD: return aead_request_ctx_dma( container_of(areq, struct aead_request, base)); case CRYPTO_ALG_TYPE_AHASH: return ahash_request_ctx_dma(ahash_request_cast(areq)); default: return ERR_PTR(-EINVAL); } } static void caam_unmap(struct device *dev, struct scatterlist *src, struct scatterlist *dst, int src_nents, int dst_nents, dma_addr_t iv_dma, int ivsize, enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, int qm_sg_bytes) { if (dst != src) { if (src_nents) dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); if (dst_nents) dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); } else { dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); } if (iv_dma) dma_unmap_single(dev, iv_dma, ivsize, iv_dir); if (qm_sg_bytes) dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); } static int aead_set_sh_desc(struct crypto_aead *aead) { struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), typeof(*alg), aead); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); struct device *dev = ctx->dev; struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); struct caam_flc *flc; u32 *desc; u32 ctx1_iv_off = 0; u32 *nonce = NULL; unsigned int data_len[2]; u32 inl_mask; const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CTR_MOD128); const bool is_rfc3686 = alg->caam.rfc3686; if (!ctx->cdata.keylen || !ctx->authsize) return 0; /* * AES-CTR needs to load IV in CONTEXT1 reg * at an offset of 128bits (16bytes) * CONTEXT1[255:128] = IV */ if (ctr_mode) ctx1_iv_off = 16; /* * RFC3686 specific: * CONTEXT1[255:128] = {NONCE, IV, COUNTER} */ if (is_rfc3686) { ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); } /* * In case |user key| > |derived key|, using DKP<imm,imm> would result * in invalid opcodes (last bytes of user key) in the resulting * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key * addresses are needed. */ ctx->adata.key_virt = ctx->key; ctx->adata.key_dma = ctx->key_dma; ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; data_len[0] = ctx->adata.keylen_pad; data_len[1] = ctx->cdata.keylen; /* aead_encrypt shared descriptor */ if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN : DESC_QI_AEAD_ENC_LEN) + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), DESC_JOB_IO_LEN, data_len, &inl_mask, ARRAY_SIZE(data_len)) < 0) return -EINVAL; ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); flc = &ctx->flc[ENCRYPT]; desc = flc->sh_desc; if (alg->caam.geniv) cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, true, priv->sec_attr.era); else cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, true, priv->sec_attr.era); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); /* aead_decrypt shared descriptor */ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), DESC_JOB_IO_LEN, data_len, &inl_mask, ARRAY_SIZE(data_len)) < 0) return -EINVAL; ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); flc = &ctx->flc[DECRYPT]; desc = flc->sh_desc; cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, alg->caam.geniv, is_rfc3686, nonce, ctx1_iv_off, true, priv->sec_attr.era); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); return 0; } static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); ctx->authsize = authsize; aead_set_sh_desc(authenc); return 0; } static int aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; struct crypto_authenc_keys keys; if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto badkey; dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n", keys.authkeylen + keys.enckeylen, keys.enckeylen, keys.authkeylen); print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ctx->adata.keylen = keys.authkeylen; ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & OP_ALG_ALGSEL_MASK); if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) goto badkey; memcpy(ctx->key, keys.authkey, keys.authkeylen); memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad + keys.enckeylen, ctx->dir); print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ctx->adata.keylen_pad + keys.enckeylen, 1); ctx->cdata.keylen = keys.enckeylen; memzero_explicit(&keys, sizeof(keys)); return aead_set_sh_desc(aead); badkey: memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_authenc_keys keys; int err; err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) goto out; err = -EINVAL; if (keys.enckeylen != DES3_EDE_KEY_SIZE) goto out; err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?: aead_setkey(aead, key, keylen); out: memzero_explicit(&keys, sizeof(keys)); return err; } static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_request *req_ctx = aead_request_ctx_dma(req); struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), typeof(*alg), aead); struct device *dev = ctx->dev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; int src_len, dst_len = 0; struct aead_edesc *edesc; dma_addr_t qm_sg_dma, iv_dma = 0; int ivsize = 0; unsigned int authsize = ctx->authsize; int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes; int in_len, out_len; struct dpaa2_sg_entry *sg_table; /* allocate space for base edesc, link tables and IV */ edesc = qi_cache_zalloc(flags); if (unlikely(!edesc)) { dev_err(dev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); } if (unlikely(req->dst != req->src)) { src_len = req->assoclen + req->cryptlen; dst_len = src_len + (encrypt ? authsize : (-authsize)); src_nents = sg_nents_for_len(req->src, src_len); if (unlikely(src_nents < 0)) { dev_err(dev, "Insufficient bytes (%d) in src S/G\n", src_len); qi_cache_free(edesc); return ERR_PTR(src_nents); } dst_nents = sg_nents_for_len(req->dst, dst_len); if (unlikely(dst_nents < 0)) { dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", dst_len); qi_cache_free(edesc); return ERR_PTR(dst_nents); } if (src_nents) { mapped_src_nents = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE); if (unlikely(!mapped_src_nents)) { dev_err(dev, "unable to map source\n"); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } } else { mapped_src_nents = 0; } if (dst_nents) { mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); if (unlikely(!mapped_dst_nents)) { dev_err(dev, "unable to map destination\n"); dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } } else { mapped_dst_nents = 0; } } else { src_len = req->assoclen + req->cryptlen + (encrypt ? authsize : 0); src_nents = sg_nents_for_len(req->src, src_len); if (unlikely(src_nents < 0)) { dev_err(dev, "Insufficient bytes (%d) in src S/G\n", src_len); qi_cache_free(edesc); return ERR_PTR(src_nents); } mapped_src_nents = dma_map_sg(dev, req->src, src_nents, DMA_BIDIRECTIONAL); if (unlikely(!mapped_src_nents)) { dev_err(dev, "unable to map source\n"); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } } if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) ivsize = crypto_aead_ivsize(aead); /* * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. * Input is not contiguous. * HW reads 4 S/G entries at a time; make sure the reads don't go beyond * the end of the table by allocating more S/G entries. Logic: * if (src != dst && output S/G) * pad output S/G, if needed * else if (src == dst && S/G) * overlapping S/Gs; pad one of them * else if (input S/G) ... * pad input S/G, if needed */ qm_sg_nents = 1 + !!ivsize + mapped_src_nents; if (mapped_dst_nents > 1) qm_sg_nents += pad_sg_nents(mapped_dst_nents); else if ((req->src == req->dst) && (mapped_src_nents > 1)) qm_sg_nents = max(pad_sg_nents(qm_sg_nents), 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); else qm_sg_nents = pad_sg_nents(qm_sg_nents); sg_table = &edesc->sgt[0]; qm_sg_bytes = qm_sg_nents * sizeof(*sg_table); if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > CAAM_QI_MEMCACHE_SIZE)) { dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", qm_sg_nents, ivsize); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 0, DMA_NONE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } if (ivsize) { u8 *iv = (u8 *)(sg_table + qm_sg_nents); /* Make sure IV is located in a DMAable area */ memcpy(iv, req->iv, ivsize); iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); if (dma_mapping_error(dev, iv_dma)) { dev_err(dev, "unable to map IV\n"); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 0, DMA_NONE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE) /* * The associated data comes already with the IV but we need * to skip it when we authenticate or encrypt... */ edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize); else edesc->assoclen = cpu_to_caam32(req->assoclen); edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4, DMA_TO_DEVICE); if (dma_mapping_error(dev, edesc->assoclen_dma)) { dev_err(dev, "unable to map assoclen\n"); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); qm_sg_index++; if (ivsize) { dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); qm_sg_index++; } sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); qm_sg_index += mapped_src_nents; if (mapped_dst_nents > 1) sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(dev, qm_sg_dma)) { dev_err(dev, "unable to map S/G table\n"); dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } edesc->qm_sg_dma = qm_sg_dma; edesc->qm_sg_bytes = qm_sg_bytes; out_len = req->assoclen + req->cryptlen + (encrypt ? ctx->authsize : (-ctx->authsize)); in_len = 4 + ivsize + req->assoclen + req->cryptlen; memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); dpaa2_fl_set_final(in_fle, true); dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); dpaa2_fl_set_addr(in_fle, qm_sg_dma); dpaa2_fl_set_len(in_fle, in_len); if (req->dst == req->src) { if (mapped_src_nents == 1) { dpaa2_fl_set_format(out_fle, dpaa2_fl_single); dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src)); } else { dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); dpaa2_fl_set_addr(out_fle, qm_sg_dma + (1 + !!ivsize) * sizeof(*sg_table)); } } else if (!mapped_dst_nents) { /* * crypto engine requires the output entry to be present when * "frame list" FD is used. * Since engine does not support FMT=2'b11 (unused entry type), * leaving out_fle zeroized is the best option. */ goto skip_out_fle; } else if (mapped_dst_nents == 1) { dpaa2_fl_set_format(out_fle, dpaa2_fl_single); dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst)); } else { dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index * sizeof(*sg_table)); } dpaa2_fl_set_len(out_fle, out_len); skip_out_fle: return edesc; } static int chachapoly_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); struct device *dev = ctx->dev; struct caam_flc *flc; u32 *desc; if (!ctx->cdata.keylen || !ctx->authsize) return 0; flc = &ctx->flc[ENCRYPT]; desc = flc->sh_desc; cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, true, true); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); flc = &ctx->flc[DECRYPT]; desc = flc->sh_desc; cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, false, true); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); return 0; } static int chachapoly_setauthsize(struct crypto_aead *aead, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); if (authsize != POLY1305_DIGEST_SIZE) return -EINVAL; ctx->authsize = authsize; return chachapoly_set_sh_desc(aead); } static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize; if (keylen != CHACHA_KEY_SIZE + saltlen) return -EINVAL; ctx->cdata.key_virt = key; ctx->cdata.keylen = keylen - saltlen; return chachapoly_set_sh_desc(aead); } static int gcm_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_flc *flc; u32 *desc; int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - ctx->cdata.keylen; if (!ctx->cdata.keylen || !ctx->authsize) return 0; /* * AES GCM encrypt shared descriptor * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { ctx->cdata.key_inline = true; ctx->cdata.key_virt = ctx->key; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } flc = &ctx->flc[ENCRYPT]; desc = flc->sh_desc; cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { ctx->cdata.key_inline = true; ctx->cdata.key_virt = ctx->key; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } flc = &ctx->flc[DECRYPT]; desc = flc->sh_desc; cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); return 0; } static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_gcm_check_authsize(authsize); if (err) return err; ctx->authsize = authsize; gcm_set_sh_desc(authenc); return 0; } static int gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; int ret; ret = aes_check_keylen(keylen); if (ret) return ret; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir); ctx->cdata.keylen = keylen; return gcm_set_sh_desc(aead); } static int rfc4106_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_flc *flc; u32 *desc; int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - ctx->cdata.keylen; if (!ctx->cdata.keylen || !ctx->authsize) return 0; ctx->cdata.key_virt = ctx->key; /* * RFC4106 encrypt shared descriptor * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { ctx->cdata.key_inline = true; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } flc = &ctx->flc[ENCRYPT]; desc = flc->sh_desc; cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { ctx->cdata.key_inline = true; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } flc = &ctx->flc[DECRYPT]; desc = flc->sh_desc; cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); return 0; } static int rfc4106_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_rfc4106_check_authsize(authsize); if (err) return err; ctx->authsize = authsize; rfc4106_set_sh_desc(authenc); return 0; } static int rfc4106_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; int ret; ret = aes_check_keylen(keylen - 4); if (ret) return ret; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); /* * The last four bytes of the key material are used as the salt value * in the nonce. Update the AES key length. */ ctx->cdata.keylen = keylen - 4; dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen, ctx->dir); return rfc4106_set_sh_desc(aead); } static int rfc4543_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; unsigned int ivsize = crypto_aead_ivsize(aead); struct caam_flc *flc; u32 *desc; int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - ctx->cdata.keylen; if (!ctx->cdata.keylen || !ctx->authsize) return 0; ctx->cdata.key_virt = ctx->key; /* * RFC4543 encrypt shared descriptor * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { ctx->cdata.key_inline = true; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } flc = &ctx->flc[ENCRYPT]; desc = flc->sh_desc; cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); /* * Job Descriptor and Shared Descriptors * must all fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { ctx->cdata.key_inline = true; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } flc = &ctx->flc[DECRYPT]; desc = flc->sh_desc; cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); return 0; } static int rfc4543_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); if (authsize != 16) return -EINVAL; ctx->authsize = authsize; rfc4543_set_sh_desc(authenc); return 0; } static int rfc4543_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *dev = ctx->dev; int ret; ret = aes_check_keylen(keylen - 4); if (ret) return ret; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); /* * The last four bytes of the key material are used as the salt value * in the nonce. Update the AES key length. */ ctx->cdata.keylen = keylen - 4; dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen, ctx->dir); return rfc4543_set_sh_desc(aead); } static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen, const u32 ctx1_iv_off) { struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_skcipher_alg *alg = container_of(crypto_skcipher_alg(skcipher), struct caam_skcipher_alg, skcipher); struct device *dev = ctx->dev; struct caam_flc *flc; unsigned int ivsize = crypto_skcipher_ivsize(skcipher); u32 *desc; const bool is_rfc3686 = alg->caam.rfc3686; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; /* skcipher_encrypt shared descriptor */ flc = &ctx->flc[ENCRYPT]; desc = flc->sh_desc; cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686, ctx1_iv_off); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); /* skcipher_decrypt shared descriptor */ flc = &ctx->flc[DECRYPT]; desc = flc->sh_desc; cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686, ctx1_iv_off); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); return 0; } static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { int err; err = aes_check_keylen(keylen); if (err) return err; return skcipher_setkey(skcipher, key, keylen, 0); } static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { u32 ctx1_iv_off; int err; /* * RFC3686 specific: * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} * | *key = {KEY, NONCE} */ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; keylen -= CTR_RFC3686_NONCE_SIZE; err = aes_check_keylen(keylen); if (err) return err; return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { u32 ctx1_iv_off; int err; /* * AES-CTR needs to load IV in CONTEXT1 reg * at an offset of 128bits (16bytes) * CONTEXT1[255:128] = IV */ ctx1_iv_off = 16; err = aes_check_keylen(keylen); if (err) return err; return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { if (keylen != CHACHA_KEY_SIZE) return -EINVAL; return skcipher_setkey(skcipher, key, keylen, 0); } static int des_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { return verify_skcipher_des_key(skcipher, key) ?: skcipher_setkey(skcipher, key, keylen, 0); } static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { return verify_skcipher_des3_key(skcipher, key) ?: skcipher_setkey(skcipher, key, keylen, 0); } static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *dev = ctx->dev; struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); struct caam_flc *flc; u32 *desc; int err; err = xts_verify_key(skcipher, key, keylen); if (err) { dev_dbg(dev, "key size mismatch\n"); return err; } if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) ctx->xts_key_fallback = true; if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) { err = crypto_skcipher_setkey(ctx->fallback, key, keylen); if (err) return err; } ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; /* xts_skcipher_encrypt shared descriptor */ flc = &ctx->flc[ENCRYPT]; desc = flc->sh_desc; cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); /* xts_skcipher_decrypt shared descriptor */ flc = &ctx->flc[DECRYPT]; desc = flc->sh_desc; cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT], sizeof(flc->flc) + desc_bytes(desc), ctx->dir); return 0; } static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_request *req_ctx = skcipher_request_ctx_dma(req); struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *dev = ctx->dev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; struct skcipher_edesc *edesc; dma_addr_t iv_dma; u8 *iv; int ivsize = crypto_skcipher_ivsize(skcipher); int dst_sg_idx, qm_sg_ents, qm_sg_bytes; struct dpaa2_sg_entry *sg_table; src_nents = sg_nents_for_len(req->src, req->cryptlen); if (unlikely(src_nents < 0)) { dev_err(dev, "Insufficient bytes (%d) in src S/G\n", req->cryptlen); return ERR_PTR(src_nents); } if (unlikely(req->dst != req->src)) { dst_nents = sg_nents_for_len(req->dst, req->cryptlen); if (unlikely(dst_nents < 0)) { dev_err(dev, "Insufficient bytes (%d) in dst S/G\n", req->cryptlen); return ERR_PTR(dst_nents); } mapped_src_nents = dma_map_sg(dev, req->src, src_nents, DMA_TO_DEVICE); if (unlikely(!mapped_src_nents)) { dev_err(dev, "unable to map source\n"); return ERR_PTR(-ENOMEM); } mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); if (unlikely(!mapped_dst_nents)) { dev_err(dev, "unable to map destination\n"); dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE); return ERR_PTR(-ENOMEM); } } else { mapped_src_nents = dma_map_sg(dev, req->src, src_nents, DMA_BIDIRECTIONAL); if (unlikely(!mapped_src_nents)) { dev_err(dev, "unable to map source\n"); return ERR_PTR(-ENOMEM); } } qm_sg_ents = 1 + mapped_src_nents; dst_sg_idx = qm_sg_ents; /* * Input, output HW S/G tables: [IV, src][dst, IV] * IV entries point to the same buffer * If src == dst, S/G entries are reused (S/G tables overlap) * * HW reads 4 S/G entries at a time; make sure the reads don't go beyond * the end of the table by allocating more S/G entries. */ if (req->src != req->dst) qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); else qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry); if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes + ivsize > CAAM_QI_MEMCACHE_SIZE)) { dev_err(dev, "No space for %d S/G entries and/or %dB IV\n", qm_sg_ents, ivsize); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } /* allocate space for base edesc, link tables and IV */ edesc = qi_cache_zalloc(flags); if (unlikely(!edesc)) { dev_err(dev, "could not allocate extended descriptor\n"); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } /* Make sure IV is located in a DMAable area */ sg_table = &edesc->sgt[0]; iv = (u8 *)(sg_table + qm_sg_ents); memcpy(iv, req->iv, ivsize); iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, iv_dma)) { dev_err(dev, "unable to map IV\n"); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0, 0, DMA_NONE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; edesc->qm_sg_bytes = qm_sg_bytes; dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); if (req->src != req->dst) sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, ivsize, 0); edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(dev, edesc->qm_sg_dma)) { dev_err(dev, "unable to map S/G table\n"); caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); dpaa2_fl_set_final(in_fle, true); dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize); dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize); dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); dpaa2_fl_set_format(out_fle, dpaa2_fl_sg); if (req->src == req->dst) dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + sizeof(*sg_table)); else dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx * sizeof(*sg_table)); return edesc; } static void aead_unmap(struct device *dev, struct aead_edesc *edesc, struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); int ivsize = crypto_aead_ivsize(aead); caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, edesc->qm_sg_bytes); dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); } static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); int ivsize = crypto_skcipher_ivsize(skcipher); caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, edesc->qm_sg_bytes); } static void aead_encrypt_done(void *cbk_ctx, u32 status) { struct crypto_async_request *areq = cbk_ctx; struct aead_request *req = container_of(areq, struct aead_request, base); struct caam_request *req_ctx = to_caam_req(areq); struct aead_edesc *edesc = req_ctx->edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); if (unlikely(status)) ecode = caam_qi2_strstatus(ctx->dev, status); aead_unmap(ctx->dev, edesc, req); qi_cache_free(edesc); aead_request_complete(req, ecode); } static void aead_decrypt_done(void *cbk_ctx, u32 status) { struct crypto_async_request *areq = cbk_ctx; struct aead_request *req = container_of(areq, struct aead_request, base); struct caam_request *req_ctx = to_caam_req(areq); struct aead_edesc *edesc = req_ctx->edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); if (unlikely(status)) ecode = caam_qi2_strstatus(ctx->dev, status); aead_unmap(ctx->dev, edesc, req); qi_cache_free(edesc); aead_request_complete(req, ecode); } static int aead_encrypt(struct aead_request *req) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct caam_request *caam_req = aead_request_ctx_dma(req); int ret; /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, true); if (IS_ERR(edesc)) return PTR_ERR(edesc); caam_req->flc = &ctx->flc[ENCRYPT]; caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; caam_req->cbk = aead_encrypt_done; caam_req->ctx = &req->base; caam_req->edesc = edesc; ret = dpaa2_caam_enqueue(ctx->dev, caam_req); if (ret != -EINPROGRESS && !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { aead_unmap(ctx->dev, edesc, req); qi_cache_free(edesc); } return ret; } static int aead_decrypt(struct aead_request *req) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct caam_request *caam_req = aead_request_ctx_dma(req); int ret; /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, false); if (IS_ERR(edesc)) return PTR_ERR(edesc); caam_req->flc = &ctx->flc[DECRYPT]; caam_req->flc_dma = ctx->flc_dma[DECRYPT]; caam_req->cbk = aead_decrypt_done; caam_req->ctx = &req->base; caam_req->edesc = edesc; ret = dpaa2_caam_enqueue(ctx->dev, caam_req); if (ret != -EINPROGRESS && !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { aead_unmap(ctx->dev, edesc, req); qi_cache_free(edesc); } return ret; } static int ipsec_gcm_encrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req); } static int ipsec_gcm_decrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req); } static void skcipher_encrypt_done(void *cbk_ctx, u32 status) { struct crypto_async_request *areq = cbk_ctx; struct skcipher_request *req = skcipher_request_cast(areq); struct caam_request *req_ctx = to_caam_req(areq); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct skcipher_edesc *edesc = req_ctx->edesc; int ecode = 0; int ivsize = crypto_skcipher_ivsize(skcipher); dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); if (unlikely(status)) ecode = caam_qi2_strstatus(ctx->dev, status); print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, edesc->src_nents > 1 ? 100 : ivsize, 1); caam_dump_sg("dst @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->dst, edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); skcipher_unmap(ctx->dev, edesc, req); /* * The crypto API expects us to set the IV (req->iv) to the last * ciphertext block (CBC mode) or last counter (CTR mode). * This is used e.g. by the CTS mode. */ if (!ecode) memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); qi_cache_free(edesc); skcipher_request_complete(req, ecode); } static void skcipher_decrypt_done(void *cbk_ctx, u32 status) { struct crypto_async_request *areq = cbk_ctx; struct skcipher_request *req = skcipher_request_cast(areq); struct caam_request *req_ctx = to_caam_req(areq); struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct skcipher_edesc *edesc = req_ctx->edesc; int ecode = 0; int ivsize = crypto_skcipher_ivsize(skcipher); dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); if (unlikely(status)) ecode = caam_qi2_strstatus(ctx->dev, status); print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, edesc->src_nents > 1 ? 100 : ivsize, 1); caam_dump_sg("dst @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->dst, edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); skcipher_unmap(ctx->dev, edesc, req); /* * The crypto API expects us to set the IV (req->iv) to the last * ciphertext block (CBC mode) or last counter (CTR mode). * This is used e.g. by the CTS mode. */ if (!ecode) memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, ivsize); qi_cache_free(edesc); skcipher_request_complete(req, ecode); } static inline bool xts_skcipher_ivsize(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); unsigned int ivsize = crypto_skcipher_ivsize(skcipher); return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); } static int skcipher_encrypt(struct skcipher_request *req) { struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_request *caam_req = skcipher_request_ctx_dma(req); struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); int ret; /* * XTS is expected to return an error even for input length = 0 * Note that the case input length < block size will be caught during * HW offloading and return an error. */ if (!req->cryptlen && !ctx->fallback) return 0; if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) || ctx->xts_key_fallback)) { skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback); skcipher_request_set_callback(&caam_req->fallback_req, req->base.flags, req->base.complete, req->base.data); skcipher_request_set_crypt(&caam_req->fallback_req, req->src, req->dst, req->cryptlen, req->iv); return crypto_skcipher_encrypt(&caam_req->fallback_req); } /* allocate extended descriptor */ edesc = skcipher_edesc_alloc(req); if (IS_ERR(edesc)) return PTR_ERR(edesc); caam_req->flc = &ctx->flc[ENCRYPT]; caam_req->flc_dma = ctx->flc_dma[ENCRYPT]; caam_req->cbk = skcipher_encrypt_done; caam_req->ctx = &req->base; caam_req->edesc = edesc; ret = dpaa2_caam_enqueue(ctx->dev, caam_req); if (ret != -EINPROGRESS && !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { skcipher_unmap(ctx->dev, edesc, req); qi_cache_free(edesc); } return ret; } static int skcipher_decrypt(struct skcipher_request *req) { struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_request *caam_req = skcipher_request_ctx_dma(req); struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); int ret; /* * XTS is expected to return an error even for input length = 0 * Note that the case input length < block size will be caught during * HW offloading and return an error. */ if (!req->cryptlen && !ctx->fallback) return 0; if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) || ctx->xts_key_fallback)) { skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback); skcipher_request_set_callback(&caam_req->fallback_req, req->base.flags, req->base.complete, req->base.data); skcipher_request_set_crypt(&caam_req->fallback_req, req->src, req->dst, req->cryptlen, req->iv); return crypto_skcipher_decrypt(&caam_req->fallback_req); } /* allocate extended descriptor */ edesc = skcipher_edesc_alloc(req); if (IS_ERR(edesc)) return PTR_ERR(edesc); caam_req->flc = &ctx->flc[DECRYPT]; caam_req->flc_dma = ctx->flc_dma[DECRYPT]; caam_req->cbk = skcipher_decrypt_done; caam_req->ctx = &req->base; caam_req->edesc = edesc; ret = dpaa2_caam_enqueue(ctx->dev, caam_req); if (ret != -EINPROGRESS && !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { skcipher_unmap(ctx->dev, edesc, req); qi_cache_free(edesc); } return ret; } static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam, bool uses_dkp) { dma_addr_t dma_addr; int i; /* copy descriptor header template value */ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; ctx->dev = caam->dev; ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, offsetof(struct caam_ctx, flc_dma), ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(ctx->dev, dma_addr)) { dev_err(ctx->dev, "unable to map key, shared descriptors\n"); return -ENOMEM; } for (i = 0; i < NUM_OP; i++) ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]); return 0; } static int caam_cra_init_skcipher(struct crypto_skcipher *tfm) { struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct caam_skcipher_alg *caam_alg = container_of(alg, typeof(*caam_alg), skcipher); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; int ret = 0; if (alg_aai == OP_ALG_AAI_XTS) { const char *tfm_name = crypto_tfm_alg_name(&tfm->base); struct crypto_skcipher *fallback; fallback = crypto_alloc_skcipher(tfm_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback)) { dev_err(caam_alg->caam.dev, "Failed to allocate %s fallback: %ld\n", tfm_name, PTR_ERR(fallback)); return PTR_ERR(fallback); } ctx->fallback = fallback; crypto_skcipher_set_reqsize_dma( tfm, sizeof(struct caam_request) + crypto_skcipher_reqsize(fallback)); } else { crypto_skcipher_set_reqsize_dma(tfm, sizeof(struct caam_request)); } ret = caam_cra_init(ctx, &caam_alg->caam, false); if (ret && ctx->fallback) crypto_free_skcipher(ctx->fallback); return ret; } static int caam_cra_init_aead(struct crypto_aead *tfm) { struct aead_alg *alg = crypto_aead_alg(tfm); struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), aead); crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request)); return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam, !caam_alg->caam.nodkp); } static void caam_exit_common(struct caam_ctx *ctx) { dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], offsetof(struct caam_ctx, flc_dma), ctx->dir, DMA_ATTR_SKIP_CPU_SYNC); } static void caam_cra_exit(struct crypto_skcipher *tfm) { struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); if (ctx->fallback) crypto_free_skcipher(ctx->fallback); caam_exit_common(ctx); } static void caam_cra_exit_aead(struct crypto_aead *tfm) { caam_exit_common(crypto_aead_ctx_dma(tfm)); } static struct caam_skcipher_alg driver_algs[] = { { .skcipher = { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aes_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, }, { .skcipher = { .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-3des-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, }, { .skcipher = { .base = { .cra_name = "cbc(des)", .cra_driver_name = "cbc-des-caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = des_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, }, { .skcipher = { .base = { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = ctr_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .chunksize = AES_BLOCK_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, }, { .skcipher = { .base = { .cra_name = "rfc3686(ctr(aes))", .cra_driver_name = "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = rfc3686_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .ivsize = CTR_RFC3686_IV_SIZE, .chunksize = AES_BLOCK_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .rfc3686 = true, }, }, { .skcipher = { .base = { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-caam-qi2", .cra_flags = CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = xts_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, }, { .skcipher = { .base = { .cra_name = "chacha20", .cra_driver_name = "chacha20-caam-qi2", .cra_blocksize = 1, }, .setkey = chacha20_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = CHACHA_KEY_SIZE, .max_keysize = CHACHA_KEY_SIZE, .ivsize = CHACHA_IV_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20, }, }; static struct caam_aead_alg driver_aeads[] = { { .aead = { .base = { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "rfc4106-gcm-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = rfc4106_setkey, .setauthsize = rfc4106_setauthsize, .encrypt = ipsec_gcm_encrypt, .decrypt = ipsec_gcm_decrypt, .ivsize = 8, .maxauthsize = AES_BLOCK_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, .nodkp = true, }, }, { .aead = { .base = { .cra_name = "rfc4543(gcm(aes))", .cra_driver_name = "rfc4543-gcm-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = rfc4543_setkey, .setauthsize = rfc4543_setauthsize, .encrypt = ipsec_gcm_encrypt, .decrypt = ipsec_gcm_decrypt, .ivsize = 8, .maxauthsize = AES_BLOCK_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, .nodkp = true, }, }, /* Galois Counter Mode */ { .aead = { .base = { .cra_name = "gcm(aes)", .cra_driver_name = "gcm-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = gcm_setkey, .setauthsize = gcm_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = 12, .maxauthsize = AES_BLOCK_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, .nodkp = true, } }, /* single-pass ipsec_esp descriptor */ { .aead = { .base = { .cra_name = "authenc(hmac(md5),cbc(aes))", .cra_driver_name = "authenc-hmac-md5-" "cbc-aes-caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(md5)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-hmac-md5-" "cbc-aes-caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-" "cbc-aes-caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha1)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha1-cbc-aes-caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead = { .base = { .cra_name = "authenc(hmac(sha224),cbc(aes))", .cra_driver_name = "authenc-hmac-sha224-" "cbc-aes-caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha224)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha224-cbc-aes-caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-" "cbc-aes-caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha256-cbc-aes-" "caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha384),cbc(aes))", .cra_driver_name = "authenc-hmac-sha384-" "cbc-aes-caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha384)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha384-cbc-aes-" "caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "authenc-hmac-sha512-" "cbc-aes-caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha512)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha512-cbc-aes-" "caam-qi2", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-md5-" "cbc-des3_ede-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(md5)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-hmac-md5-" "cbc-des3_ede-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha1)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha1-" "cbc-des3_ede-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha1)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha1-" "cbc-des3_ede-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha224)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha224-" "cbc-des3_ede-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha224)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha224-" "cbc-des3_ede-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha256)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha256-" "cbc-des3_ede-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha256-" "cbc-des3_ede-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha384)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha384-" "cbc-des3_ede-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha384)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha384-" "cbc-des3_ede-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha512)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha512-" "cbc-des3_ede-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha512)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha512-" "cbc-des3_ede-caam-qi2", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(md5),cbc(des))", .cra_driver_name = "authenc-hmac-md5-" "cbc-des-caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(md5)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-hmac-md5-" "cbc-des-caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha1),cbc(des))", .cra_driver_name = "authenc-hmac-sha1-" "cbc-des-caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha1)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha1-cbc-des-caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha224),cbc(des))", .cra_driver_name = "authenc-hmac-sha224-" "cbc-des-caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha224)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha224-cbc-des-" "caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha256),cbc(des))", .cra_driver_name = "authenc-hmac-sha256-" "cbc-des-caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha256-cbc-des-" "caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead = { .base = { .cra_name = "authenc(hmac(sha384),cbc(des))", .cra_driver_name = "authenc-hmac-sha384-" "cbc-des-caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha384)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha384-cbc-des-" "caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha512),cbc(des))", .cra_driver_name = "authenc-hmac-sha512-" "cbc-des-caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha512)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha512-cbc-des-" "caam-qi2", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(md5)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-md5-" "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, }, }, { .aead = { .base = { .cra_name = "seqiv(authenc(" "hmac(md5),rfc3686(ctr(aes))))", .cra_driver_name = "seqiv-authenc-hmac-md5-" "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, .geniv = true, }, }, { .aead = { .base = { .cra_name = "authenc(hmac(sha1)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha1-" "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, }, }, { .aead = { .base = { .cra_name = "seqiv(authenc(" "hmac(sha1),rfc3686(ctr(aes))))", .cra_driver_name = "seqiv-authenc-hmac-sha1-" "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, .geniv = true, }, }, { .aead = { .base = { .cra_name = "authenc(hmac(sha224)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha224-" "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, }, }, { .aead = { .base = { .cra_name = "seqiv(authenc(" "hmac(sha224),rfc3686(ctr(aes))))", .cra_driver_name = "seqiv-authenc-hmac-sha224-" "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, .geniv = true, }, }, { .aead = { .base = { .cra_name = "authenc(hmac(sha256)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha256-" "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, }, }, { .aead = { .base = { .cra_name = "seqiv(authenc(hmac(sha256)," "rfc3686(ctr(aes))))", .cra_driver_name = "seqiv-authenc-hmac-sha256-" "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, .geniv = true, }, }, { .aead = { .base = { .cra_name = "authenc(hmac(sha384)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha384-" "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, }, }, { .aead = { .base = { .cra_name = "seqiv(authenc(hmac(sha384)," "rfc3686(ctr(aes))))", .cra_driver_name = "seqiv-authenc-hmac-sha384-" "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, .geniv = true, }, }, { .aead = { .base = { .cra_name = "rfc7539(chacha20,poly1305)", .cra_driver_name = "rfc7539-chacha20-poly1305-" "caam-qi2", .cra_blocksize = 1, }, .setkey = chachapoly_setkey, .setauthsize = chachapoly_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CHACHAPOLY_IV_SIZE, .maxauthsize = POLY1305_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | OP_ALG_AAI_AEAD, .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | OP_ALG_AAI_AEAD, .nodkp = true, }, }, { .aead = { .base = { .cra_name = "rfc7539esp(chacha20,poly1305)", .cra_driver_name = "rfc7539esp-chacha20-" "poly1305-caam-qi2", .cra_blocksize = 1, }, .setkey = chachapoly_setkey, .setauthsize = chachapoly_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = 8, .maxauthsize = POLY1305_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 | OP_ALG_AAI_AEAD, .class2_alg_type = OP_ALG_ALGSEL_POLY1305 | OP_ALG_AAI_AEAD, .nodkp = true, }, }, { .aead = { .base = { .cra_name = "authenc(hmac(sha512)," "rfc3686(ctr(aes)))", .cra_driver_name = "authenc-hmac-sha512-" "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, }, }, { .aead = { .base = { .cra_name = "seqiv(authenc(hmac(sha512)," "rfc3686(ctr(aes))))", .cra_driver_name = "seqiv-authenc-hmac-sha512-" "rfc3686-ctr-aes-caam-qi2", .cra_blocksize = 1, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .rfc3686 = true, .geniv = true, }, }, }; static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) { struct skcipher_alg *alg = &t_alg->skcipher; alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY); alg->init = caam_cra_init_skcipher; alg->exit = caam_cra_exit; } static void caam_aead_alg_init(struct caam_aead_alg *t_alg) { struct aead_alg *alg = &t_alg->aead; alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_cra_init_aead; alg->exit = caam_cra_exit_aead; } /* max hash key is max split key size */ #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2) #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE /* caam context sizes for hashes: running digest + 8 */ #define HASH_MSG_LEN 8 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE) enum hash_optype { UPDATE = 0, UPDATE_FIRST, FINALIZE, DIGEST, HASH_NUM_OP }; /** * struct caam_hash_ctx - ahash per-session context * @flc: Flow Contexts array * @key: authentication key * @flc_dma: I/O virtual addresses of the Flow Contexts * @dev: dpseci device * @ctx_len: size of Context Register * @adata: hashing algorithm details */ struct caam_hash_ctx { struct caam_flc flc[HASH_NUM_OP]; u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; dma_addr_t flc_dma[HASH_NUM_OP]; struct device *dev; int ctx_len; struct alginfo adata; }; /* ahash state */ struct caam_hash_state { struct caam_request caam_req; dma_addr_t buf_dma; dma_addr_t ctx_dma; int ctx_dma_len; u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned; int buflen; int next_buflen; u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned; int (*update)(struct ahash_request *req); int (*final)(struct ahash_request *req); int (*finup)(struct ahash_request *req); }; struct caam_export_state { u8 buf[CAAM_MAX_HASH_BLOCK_SIZE]; u8 caam_ctx[MAX_CTX_LEN]; int buflen; int (*update)(struct ahash_request *req); int (*final)(struct ahash_request *req); int (*finup)(struct ahash_request *req); }; /* Map current buffer in state (if length > 0) and put it in link table */ static inline int buf_map_to_qm_sg(struct device *dev, struct dpaa2_sg_entry *qm_sg, struct caam_hash_state *state) { int buflen = state->buflen; if (!buflen) return 0; state->buf_dma = dma_map_single(dev, state->buf, buflen, DMA_TO_DEVICE); if (dma_mapping_error(dev, state->buf_dma)) { dev_err(dev, "unable to map buf\n"); state->buf_dma = 0; return -ENOMEM; } dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0); return 0; } /* Map state->caam_ctx, and add it to link table */ static inline int ctx_map_to_qm_sg(struct device *dev, struct caam_hash_state *state, int ctx_len, struct dpaa2_sg_entry *qm_sg, u32 flag) { state->ctx_dma_len = ctx_len; state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag); if (dma_mapping_error(dev, state->ctx_dma)) { dev_err(dev, "unable to map ctx\n"); state->ctx_dma = 0; return -ENOMEM; } dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0); return 0; } static int ahash_set_sh_desc(struct crypto_ahash *ahash) { struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev); struct caam_flc *flc; u32 *desc; /* ahash_update shared descriptor */ flc = &ctx->flc[UPDATE]; desc = flc->sh_desc; cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len, ctx->ctx_len, true, priv->sec_attr.era); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE], desc_bytes(desc), DMA_BIDIRECTIONAL); print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); /* ahash_update_first shared descriptor */ flc = &ctx->flc[UPDATE_FIRST]; desc = flc->sh_desc; cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len, ctx->ctx_len, false, priv->sec_attr.era); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST], desc_bytes(desc), DMA_BIDIRECTIONAL); print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); /* ahash_final shared descriptor */ flc = &ctx->flc[FINALIZE]; desc = flc->sh_desc; cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize, ctx->ctx_len, true, priv->sec_attr.era); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE], desc_bytes(desc), DMA_BIDIRECTIONAL); print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); /* ahash_digest shared descriptor */ flc = &ctx->flc[DIGEST]; desc = flc->sh_desc; cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize, ctx->ctx_len, false, priv->sec_attr.era); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST], desc_bytes(desc), DMA_BIDIRECTIONAL); print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return 0; } struct split_key_sh_result { struct completion completion; int err; struct device *dev; }; static void split_key_sh_done(void *cbk_ctx, u32 err) { struct split_key_sh_result *res = cbk_ctx; dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); res->err = err ? caam_qi2_strstatus(res->dev, err) : 0; complete(&res->completion); } /* Digest hash size if it is too large */ static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key, u32 digestsize) { struct caam_request *req_ctx; u32 *desc; struct split_key_sh_result result; dma_addr_t key_dma; struct caam_flc *flc; dma_addr_t flc_dma; int ret = -ENOMEM; struct dpaa2_fl_entry *in_fle, *out_fle; req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL); if (!req_ctx) return -ENOMEM; in_fle = &req_ctx->fd_flt[1]; out_fle = &req_ctx->fd_flt[0]; flc = kzalloc(sizeof(*flc), GFP_KERNEL); if (!flc) goto err_flc; key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL); if (dma_mapping_error(ctx->dev, key_dma)) { dev_err(ctx->dev, "unable to map key memory\n"); goto err_key_dma; } desc = flc->sh_desc; init_sh_desc(desc, 0); /* descriptor to perform unkeyed hash on key_in */ append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT | OP_ALG_AS_INITFINAL); append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG); append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */ flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) + desc_bytes(desc), DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, flc_dma)) { dev_err(ctx->dev, "unable to map shared descriptor\n"); goto err_flc_dma; } dpaa2_fl_set_final(in_fle, true); dpaa2_fl_set_format(in_fle, dpaa2_fl_single); dpaa2_fl_set_addr(in_fle, key_dma); dpaa2_fl_set_len(in_fle, *keylen); dpaa2_fl_set_format(out_fle, dpaa2_fl_single); dpaa2_fl_set_addr(out_fle, key_dma); dpaa2_fl_set_len(out_fle, digestsize); print_hex_dump_debug("key_in@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1); print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); result.err = 0; init_completion(&result.completion); result.dev = ctx->dev; req_ctx->flc = flc; req_ctx->flc_dma = flc_dma; req_ctx->cbk = split_key_sh_done; req_ctx->ctx = &result; ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); if (ret == -EINPROGRESS) { /* in progress */ wait_for_completion(&result.completion); ret = result.err; print_hex_dump_debug("digested key@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, digestsize, 1); } dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc), DMA_TO_DEVICE); err_flc_dma: dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL); err_key_dma: kfree(flc); err_flc: kfree(req_ctx); *keylen = digestsize; return ret; } static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base); unsigned int digestsize = crypto_ahash_digestsize(ahash); int ret; u8 *hashed_key = NULL; dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize); if (keylen > blocksize) { unsigned int aligned_len = ALIGN(keylen, dma_get_cache_alignment()); if (aligned_len < keylen) return -EOVERFLOW; hashed_key = kmemdup(key, aligned_len, GFP_KERNEL); if (!hashed_key) return -ENOMEM; ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize); if (ret) goto bad_free_key; key = hashed_key; } ctx->adata.keylen = keylen; ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & OP_ALG_ALGSEL_MASK); if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE) goto bad_free_key; ctx->adata.key_virt = key; ctx->adata.key_inline = true; /* * In case |user key| > |derived key|, using DKP<imm,imm> would result * in invalid opcodes (last bytes of user key) in the resulting * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key * addresses are needed. */ if (keylen > ctx->adata.keylen_pad) { memcpy(ctx->key, key, keylen); dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma, ctx->adata.keylen_pad, DMA_TO_DEVICE); } ret = ahash_set_sh_desc(ahash); kfree(hashed_key); return ret; bad_free_key: kfree(hashed_key); return -EINVAL; } static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req) { struct caam_hash_state *state = ahash_request_ctx_dma(req); if (edesc->src_nents) dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); if (edesc->qm_sg_bytes) dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes, DMA_TO_DEVICE); if (state->buf_dma) { dma_unmap_single(dev, state->buf_dma, state->buflen, DMA_TO_DEVICE); state->buf_dma = 0; } } static inline void ahash_unmap_ctx(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, u32 flag) { struct caam_hash_state *state = ahash_request_ctx_dma(req); if (state->ctx_dma) { dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag); state->ctx_dma = 0; } ahash_unmap(dev, edesc, req); } static void ahash_done(void *cbk_ctx, u32 status) { struct crypto_async_request *areq = cbk_ctx; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->caam_req.edesc; struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); if (unlikely(status)) ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); memcpy(req->result, state->caam_ctx, digestsize); qi_cache_free(edesc); print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); ahash_request_complete(req, ecode); } static void ahash_done_bi(void *cbk_ctx, u32 status) { struct crypto_async_request *areq = cbk_ctx; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->caam_req.edesc; struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); if (unlikely(status)) ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); qi_cache_free(edesc); scatterwalk_map_and_copy(state->buf, req->src, req->nbytes - state->next_buflen, state->next_buflen, 0); state->buflen = state->next_buflen; print_hex_dump_debug("buf@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->buf, state->buflen, 1); print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); if (req->result) print_hex_dump_debug("result@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->result, crypto_ahash_digestsize(ahash), 1); ahash_request_complete(req, ecode); } static void ahash_done_ctx_src(void *cbk_ctx, u32 status) { struct crypto_async_request *areq = cbk_ctx; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->caam_req.edesc; struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int digestsize = crypto_ahash_digestsize(ahash); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); if (unlikely(status)) ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); memcpy(req->result, state->caam_ctx, digestsize); qi_cache_free(edesc); print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); ahash_request_complete(req, ecode); } static void ahash_done_ctx_dst(void *cbk_ctx, u32 status) { struct crypto_async_request *areq = cbk_ctx; struct ahash_request *req = ahash_request_cast(areq); struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct ahash_edesc *edesc = state->caam_req.edesc; struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); int ecode = 0; dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status); if (unlikely(status)) ecode = caam_qi2_strstatus(ctx->dev, status); ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); qi_cache_free(edesc); scatterwalk_map_and_copy(state->buf, req->src, req->nbytes - state->next_buflen, state->next_buflen, 0); state->buflen = state->next_buflen; print_hex_dump_debug("buf@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->buf, state->buflen, 1); print_hex_dump_debug("ctx@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx, ctx->ctx_len, 1); if (req->result) print_hex_dump_debug("result@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->result, crypto_ahash_digestsize(ahash), 1); ahash_request_complete(req, ecode); } static int ahash_update_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; u8 *buf = state->buf; int *buflen = &state->buflen; int *next_buflen = &state->next_buflen; int in_len = *buflen + req->nbytes, to_hash; int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index; struct ahash_edesc *edesc; int ret = 0; *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); to_hash = in_len - *next_buflen; if (to_hash) { struct dpaa2_sg_entry *sg_table; int src_len = req->nbytes - *next_buflen; src_nents = sg_nents_for_len(req->src, src_len); if (src_nents < 0) { dev_err(ctx->dev, "Invalid number of src SG.\n"); return src_nents; } if (src_nents) { mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); if (!mapped_nents) { dev_err(ctx->dev, "unable to DMA map source\n"); return -ENOMEM; } } else { mapped_nents = 0; } /* allocate space for base edesc and link tables */ edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } edesc->src_nents = src_nents; qm_sg_src_index = 1 + (*buflen ? 1 : 0); qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) * sizeof(*sg_table); sg_table = &edesc->sgt[0]; ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); if (ret) goto unmap_ctx; if (mapped_nents) { sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_src_index, 0); } else { dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1, true); } edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { dev_err(ctx->dev, "unable to map S/G table\n"); ret = -ENOMEM; goto unmap_ctx; } edesc->qm_sg_bytes = qm_sg_bytes; memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); dpaa2_fl_set_final(in_fle, true); dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash); dpaa2_fl_set_format(out_fle, dpaa2_fl_single); dpaa2_fl_set_addr(out_fle, state->ctx_dma); dpaa2_fl_set_len(out_fle, ctx->ctx_len); req_ctx->flc = &ctx->flc[UPDATE]; req_ctx->flc_dma = ctx->flc_dma[UPDATE]; req_ctx->cbk = ahash_done_bi; req_ctx->ctx = &req->base; req_ctx->edesc = edesc; ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); if (ret != -EINPROGRESS && !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) goto unmap_ctx; } else if (*next_buflen) { scatterwalk_map_and_copy(buf + *buflen, req->src, 0, req->nbytes, 0); *buflen = *next_buflen; print_hex_dump_debug("buf@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); } return ret; unmap_ctx: ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); qi_cache_free(edesc); return ret; } static int ahash_final_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int buflen = state->buflen; int qm_sg_bytes; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; struct dpaa2_sg_entry *sg_table; int ret; /* allocate space for base edesc and link tables */ edesc = qi_cache_zalloc(flags); if (!edesc) return -ENOMEM; qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table); sg_table = &edesc->sgt[0]; ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); if (ret) goto unmap_ctx; dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true); edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { dev_err(ctx->dev, "unable to map S/G table\n"); ret = -ENOMEM; goto unmap_ctx; } edesc->qm_sg_bytes = qm_sg_bytes; memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); dpaa2_fl_set_final(in_fle, true); dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen); dpaa2_fl_set_format(out_fle, dpaa2_fl_single); dpaa2_fl_set_addr(out_fle, state->ctx_dma); dpaa2_fl_set_len(out_fle, digestsize); req_ctx->flc = &ctx->flc[FINALIZE]; req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; req_ctx->cbk = ahash_done_ctx_src; req_ctx->ctx = &req->base; req_ctx->edesc = edesc; ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); if (ret == -EINPROGRESS || (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) return ret; unmap_ctx: ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); qi_cache_free(edesc); return ret; } static int ahash_finup_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int buflen = state->buflen; int qm_sg_bytes, qm_sg_src_index; int src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; struct dpaa2_sg_entry *sg_table; int ret; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { dev_err(ctx->dev, "Invalid number of src SG.\n"); return src_nents; } if (src_nents) { mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); if (!mapped_nents) { dev_err(ctx->dev, "unable to DMA map source\n"); return -ENOMEM; } } else { mapped_nents = 0; } /* allocate space for base edesc and link tables */ edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } edesc->src_nents = src_nents; qm_sg_src_index = 1 + (buflen ? 1 : 0); qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) * sizeof(*sg_table); sg_table = &edesc->sgt[0]; ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table, DMA_BIDIRECTIONAL); if (ret) goto unmap_ctx; ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state); if (ret) goto unmap_ctx; sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0); edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { dev_err(ctx->dev, "unable to map S/G table\n"); ret = -ENOMEM; goto unmap_ctx; } edesc->qm_sg_bytes = qm_sg_bytes; memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); dpaa2_fl_set_final(in_fle, true); dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes); dpaa2_fl_set_format(out_fle, dpaa2_fl_single); dpaa2_fl_set_addr(out_fle, state->ctx_dma); dpaa2_fl_set_len(out_fle, digestsize); req_ctx->flc = &ctx->flc[FINALIZE]; req_ctx->flc_dma = ctx->flc_dma[FINALIZE]; req_ctx->cbk = ahash_done_ctx_src; req_ctx->ctx = &req->base; req_ctx->edesc = edesc; ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); if (ret == -EINPROGRESS || (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) return ret; unmap_ctx: ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL); qi_cache_free(edesc); return ret; } static int ahash_digest(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int digestsize = crypto_ahash_digestsize(ahash); int src_nents, mapped_nents; struct ahash_edesc *edesc; int ret = -ENOMEM; state->buf_dma = 0; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { dev_err(ctx->dev, "Invalid number of src SG.\n"); return src_nents; } if (src_nents) { mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); if (!mapped_nents) { dev_err(ctx->dev, "unable to map source for DMA\n"); return ret; } } else { mapped_nents = 0; } /* allocate space for base edesc and link tables */ edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); return ret; } edesc->src_nents = src_nents; memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); if (mapped_nents > 1) { int qm_sg_bytes; struct dpaa2_sg_entry *sg_table = &edesc->sgt[0]; qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table); sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0); edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { dev_err(ctx->dev, "unable to map S/G table\n"); goto unmap; } edesc->qm_sg_bytes = qm_sg_bytes; dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); } else { dpaa2_fl_set_format(in_fle, dpaa2_fl_single); dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); } state->ctx_dma_len = digestsize; state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, DMA_FROM_DEVICE); if (dma_mapping_error(ctx->dev, state->ctx_dma)) { dev_err(ctx->dev, "unable to map ctx\n"); state->ctx_dma = 0; goto unmap; } dpaa2_fl_set_final(in_fle, true); dpaa2_fl_set_len(in_fle, req->nbytes); dpaa2_fl_set_format(out_fle, dpaa2_fl_single); dpaa2_fl_set_addr(out_fle, state->ctx_dma); dpaa2_fl_set_len(out_fle, digestsize); req_ctx->flc = &ctx->flc[DIGEST]; req_ctx->flc_dma = ctx->flc_dma[DIGEST]; req_ctx->cbk = ahash_done; req_ctx->ctx = &req->base; req_ctx->edesc = edesc; ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); if (ret == -EINPROGRESS || (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) return ret; unmap: ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); qi_cache_free(edesc); return ret; } static int ahash_final_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; u8 *buf = state->buf; int buflen = state->buflen; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; int ret = -ENOMEM; /* allocate space for base edesc and link tables */ edesc = qi_cache_zalloc(flags); if (!edesc) return ret; if (buflen) { state->buf_dma = dma_map_single(ctx->dev, buf, buflen, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, state->buf_dma)) { dev_err(ctx->dev, "unable to map src\n"); goto unmap; } } state->ctx_dma_len = digestsize; state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, DMA_FROM_DEVICE); if (dma_mapping_error(ctx->dev, state->ctx_dma)) { dev_err(ctx->dev, "unable to map ctx\n"); state->ctx_dma = 0; goto unmap; } memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); dpaa2_fl_set_final(in_fle, true); /* * crypto engine requires the input entry to be present when * "frame list" FD is used. * Since engine does not support FMT=2'b11 (unused entry type), leaving * in_fle zeroized (except for "Final" flag) is the best option. */ if (buflen) { dpaa2_fl_set_format(in_fle, dpaa2_fl_single); dpaa2_fl_set_addr(in_fle, state->buf_dma); dpaa2_fl_set_len(in_fle, buflen); } dpaa2_fl_set_format(out_fle, dpaa2_fl_single); dpaa2_fl_set_addr(out_fle, state->ctx_dma); dpaa2_fl_set_len(out_fle, digestsize); req_ctx->flc = &ctx->flc[DIGEST]; req_ctx->flc_dma = ctx->flc_dma[DIGEST]; req_ctx->cbk = ahash_done; req_ctx->ctx = &req->base; req_ctx->edesc = edesc; ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); if (ret == -EINPROGRESS || (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) return ret; unmap: ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); qi_cache_free(edesc); return ret; } static int ahash_update_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; u8 *buf = state->buf; int *buflen = &state->buflen; int *next_buflen = &state->next_buflen; int in_len = *buflen + req->nbytes, to_hash; int qm_sg_bytes, src_nents, mapped_nents; struct ahash_edesc *edesc; int ret = 0; *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1); to_hash = in_len - *next_buflen; if (to_hash) { struct dpaa2_sg_entry *sg_table; int src_len = req->nbytes - *next_buflen; src_nents = sg_nents_for_len(req->src, src_len); if (src_nents < 0) { dev_err(ctx->dev, "Invalid number of src SG.\n"); return src_nents; } if (src_nents) { mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); if (!mapped_nents) { dev_err(ctx->dev, "unable to DMA map source\n"); return -ENOMEM; } } else { mapped_nents = 0; } /* allocate space for base edesc and link tables */ edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } edesc->src_nents = src_nents; qm_sg_bytes = pad_sg_nents(1 + mapped_nents) * sizeof(*sg_table); sg_table = &edesc->sgt[0]; ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); if (ret) goto unmap_ctx; sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0); edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { dev_err(ctx->dev, "unable to map S/G table\n"); ret = -ENOMEM; goto unmap_ctx; } edesc->qm_sg_bytes = qm_sg_bytes; state->ctx_dma_len = ctx->ctx_len; state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, ctx->ctx_len, DMA_FROM_DEVICE); if (dma_mapping_error(ctx->dev, state->ctx_dma)) { dev_err(ctx->dev, "unable to map ctx\n"); state->ctx_dma = 0; ret = -ENOMEM; goto unmap_ctx; } memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); dpaa2_fl_set_final(in_fle, true); dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); dpaa2_fl_set_len(in_fle, to_hash); dpaa2_fl_set_format(out_fle, dpaa2_fl_single); dpaa2_fl_set_addr(out_fle, state->ctx_dma); dpaa2_fl_set_len(out_fle, ctx->ctx_len); req_ctx->flc = &ctx->flc[UPDATE_FIRST]; req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; req_ctx->cbk = ahash_done_ctx_dst; req_ctx->ctx = &req->base; req_ctx->edesc = edesc; ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); if (ret != -EINPROGRESS && !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) goto unmap_ctx; state->update = ahash_update_ctx; state->finup = ahash_finup_ctx; state->final = ahash_final_ctx; } else if (*next_buflen) { scatterwalk_map_and_copy(buf + *buflen, req->src, 0, req->nbytes, 0); *buflen = *next_buflen; print_hex_dump_debug("buf@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); } return ret; unmap_ctx: ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE); qi_cache_free(edesc); return ret; } static int ahash_finup_no_ctx(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int buflen = state->buflen; int qm_sg_bytes, src_nents, mapped_nents; int digestsize = crypto_ahash_digestsize(ahash); struct ahash_edesc *edesc; struct dpaa2_sg_entry *sg_table; int ret = -ENOMEM; src_nents = sg_nents_for_len(req->src, req->nbytes); if (src_nents < 0) { dev_err(ctx->dev, "Invalid number of src SG.\n"); return src_nents; } if (src_nents) { mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); if (!mapped_nents) { dev_err(ctx->dev, "unable to DMA map source\n"); return ret; } } else { mapped_nents = 0; } /* allocate space for base edesc and link tables */ edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); return ret; } edesc->src_nents = src_nents; qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table); sg_table = &edesc->sgt[0]; ret = buf_map_to_qm_sg(ctx->dev, sg_table, state); if (ret) goto unmap; sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0); edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { dev_err(ctx->dev, "unable to map S/G table\n"); ret = -ENOMEM; goto unmap; } edesc->qm_sg_bytes = qm_sg_bytes; state->ctx_dma_len = digestsize; state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize, DMA_FROM_DEVICE); if (dma_mapping_error(ctx->dev, state->ctx_dma)) { dev_err(ctx->dev, "unable to map ctx\n"); state->ctx_dma = 0; ret = -ENOMEM; goto unmap; } memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); dpaa2_fl_set_final(in_fle, true); dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); dpaa2_fl_set_len(in_fle, buflen + req->nbytes); dpaa2_fl_set_format(out_fle, dpaa2_fl_single); dpaa2_fl_set_addr(out_fle, state->ctx_dma); dpaa2_fl_set_len(out_fle, digestsize); req_ctx->flc = &ctx->flc[DIGEST]; req_ctx->flc_dma = ctx->flc_dma[DIGEST]; req_ctx->cbk = ahash_done; req_ctx->ctx = &req->base; req_ctx->edesc = edesc; ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); if (ret != -EINPROGRESS && !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) goto unmap; return ret; unmap: ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE); qi_cache_free(edesc); return ret; } static int ahash_update_first(struct ahash_request *req) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_request *req_ctx = &state->caam_req; struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1]; struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0]; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; u8 *buf = state->buf; int *buflen = &state->buflen; int *next_buflen = &state->next_buflen; int to_hash; int src_nents, mapped_nents; struct ahash_edesc *edesc; int ret = 0; *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - 1); to_hash = req->nbytes - *next_buflen; if (to_hash) { struct dpaa2_sg_entry *sg_table; int src_len = req->nbytes - *next_buflen; src_nents = sg_nents_for_len(req->src, src_len); if (src_nents < 0) { dev_err(ctx->dev, "Invalid number of src SG.\n"); return src_nents; } if (src_nents) { mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); if (!mapped_nents) { dev_err(ctx->dev, "unable to map source for DMA\n"); return -ENOMEM; } } else { mapped_nents = 0; } /* allocate space for base edesc and link tables */ edesc = qi_cache_zalloc(flags); if (!edesc) { dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE); return -ENOMEM; } edesc->src_nents = src_nents; sg_table = &edesc->sgt[0]; memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt)); dpaa2_fl_set_final(in_fle, true); dpaa2_fl_set_len(in_fle, to_hash); if (mapped_nents > 1) { int qm_sg_bytes; sg_to_qm_sg_last(req->src, src_len, sg_table, 0); qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table); edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) { dev_err(ctx->dev, "unable to map S/G table\n"); ret = -ENOMEM; goto unmap_ctx; } edesc->qm_sg_bytes = qm_sg_bytes; dpaa2_fl_set_format(in_fle, dpaa2_fl_sg); dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma); } else { dpaa2_fl_set_format(in_fle, dpaa2_fl_single); dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src)); } state->ctx_dma_len = ctx->ctx_len; state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, ctx->ctx_len, DMA_FROM_DEVICE); if (dma_mapping_error(ctx->dev, state->ctx_dma)) { dev_err(ctx->dev, "unable to map ctx\n"); state->ctx_dma = 0; ret = -ENOMEM; goto unmap_ctx; } dpaa2_fl_set_format(out_fle, dpaa2_fl_single); dpaa2_fl_set_addr(out_fle, state->ctx_dma); dpaa2_fl_set_len(out_fle, ctx->ctx_len); req_ctx->flc = &ctx->flc[UPDATE_FIRST]; req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST]; req_ctx->cbk = ahash_done_ctx_dst; req_ctx->ctx = &req->base; req_ctx->edesc = edesc; ret = dpaa2_caam_enqueue(ctx->dev, req_ctx); if (ret != -EINPROGRESS && !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) goto unmap_ctx; state->update = ahash_update_ctx; state->finup = ahash_finup_ctx; state->final = ahash_final_ctx; } else if (*next_buflen) { state->update = ahash_update_no_ctx; state->finup = ahash_finup_no_ctx; state->final = ahash_final_no_ctx; scatterwalk_map_and_copy(buf, req->src, 0, req->nbytes, 0); *buflen = *next_buflen; print_hex_dump_debug("buf@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, buf, *buflen, 1); } return ret; unmap_ctx: ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE); qi_cache_free(edesc); return ret; } static int ahash_finup_first(struct ahash_request *req) { return ahash_digest(req); } static int ahash_init(struct ahash_request *req) { struct caam_hash_state *state = ahash_request_ctx_dma(req); state->update = ahash_update_first; state->finup = ahash_finup_first; state->final = ahash_final_no_ctx; state->ctx_dma = 0; state->ctx_dma_len = 0; state->buf_dma = 0; state->buflen = 0; state->next_buflen = 0; return 0; } static int ahash_update(struct ahash_request *req) { struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->update(req); } static int ahash_finup(struct ahash_request *req) { struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->finup(req); } static int ahash_final(struct ahash_request *req) { struct caam_hash_state *state = ahash_request_ctx_dma(req); return state->final(req); } static int ahash_export(struct ahash_request *req, void *out) { struct caam_hash_state *state = ahash_request_ctx_dma(req); struct caam_export_state *export = out; u8 *buf = state->buf; int len = state->buflen; memcpy(export->buf, buf, len); memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx)); export->buflen = len; export->update = state->update; export->final = state->final; export->finup = state->finup; return 0; } static int ahash_import(struct ahash_request *req, const void *in) { struct caam_hash_state *state = ahash_request_ctx_dma(req); const struct caam_export_state *export = in; memset(state, 0, sizeof(*state)); memcpy(state->buf, export->buf, export->buflen); memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx)); state->buflen = export->buflen; state->update = export->update; state->final = export->final; state->finup = export->finup; return 0; } struct caam_hash_template { char name[CRYPTO_MAX_ALG_NAME]; char driver_name[CRYPTO_MAX_ALG_NAME]; char hmac_name[CRYPTO_MAX_ALG_NAME]; char hmac_driver_name[CRYPTO_MAX_ALG_NAME]; unsigned int blocksize; struct ahash_alg template_ahash; u32 alg_type; }; /* ahash descriptors */ static struct caam_hash_template driver_hash[] = { { .name = "sha1", .driver_name = "sha1-caam-qi2", .hmac_name = "hmac(sha1)", .hmac_driver_name = "hmac-sha1-caam-qi2", .blocksize = SHA1_BLOCK_SIZE, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = ahash_setkey, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_SHA1, }, { .name = "sha224", .driver_name = "sha224-caam-qi2", .hmac_name = "hmac(sha224)", .hmac_driver_name = "hmac-sha224-caam-qi2", .blocksize = SHA224_BLOCK_SIZE, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = ahash_setkey, .halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_SHA224, }, { .name = "sha256", .driver_name = "sha256-caam-qi2", .hmac_name = "hmac(sha256)", .hmac_driver_name = "hmac-sha256-caam-qi2", .blocksize = SHA256_BLOCK_SIZE, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = ahash_setkey, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_SHA256, }, { .name = "sha384", .driver_name = "sha384-caam-qi2", .hmac_name = "hmac(sha384)", .hmac_driver_name = "hmac-sha384-caam-qi2", .blocksize = SHA384_BLOCK_SIZE, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = ahash_setkey, .halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_SHA384, }, { .name = "sha512", .driver_name = "sha512-caam-qi2", .hmac_name = "hmac(sha512)", .hmac_driver_name = "hmac-sha512-caam-qi2", .blocksize = SHA512_BLOCK_SIZE, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = ahash_setkey, .halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_SHA512, }, { .name = "md5", .driver_name = "md5-caam-qi2", .hmac_name = "hmac(md5)", .hmac_driver_name = "hmac-md5-caam-qi2", .blocksize = MD5_BLOCK_WORDS * 4, .template_ahash = { .init = ahash_init, .update = ahash_update, .final = ahash_final, .finup = ahash_finup, .digest = ahash_digest, .export = ahash_export, .import = ahash_import, .setkey = ahash_setkey, .halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = sizeof(struct caam_export_state), }, }, .alg_type = OP_ALG_ALGSEL_MD5, } }; struct caam_hash_alg { struct list_head entry; struct device *dev; int alg_type; struct ahash_alg ahash_alg; }; static int caam_hash_cra_init(struct crypto_tfm *tfm) { struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); struct crypto_alg *base = tfm->__crt_alg; struct hash_alg_common *halg = container_of(base, struct hash_alg_common, base); struct ahash_alg *alg = container_of(halg, struct ahash_alg, halg); struct caam_hash_alg *caam_hash = container_of(alg, struct caam_hash_alg, ahash_alg); struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */ static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE, HASH_MSG_LEN + SHA1_DIGEST_SIZE, HASH_MSG_LEN + 32, HASH_MSG_LEN + SHA256_DIGEST_SIZE, HASH_MSG_LEN + 64, HASH_MSG_LEN + SHA512_DIGEST_SIZE }; dma_addr_t dma_addr; int i; ctx->dev = caam_hash->dev; if (alg->setkey) { ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key, ARRAY_SIZE(ctx->key), DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) { dev_err(ctx->dev, "unable to map key\n"); return -ENOMEM; } } dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc), DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); if (dma_mapping_error(ctx->dev, dma_addr)) { dev_err(ctx->dev, "unable to map shared descriptors\n"); if (ctx->adata.key_dma) dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, ARRAY_SIZE(ctx->key), DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); return -ENOMEM; } for (i = 0; i < HASH_NUM_OP; i++) ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]); /* copy descriptor header template value */ ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; ctx->ctx_len = runninglen[(ctx->adata.algtype & OP_ALG_ALGSEL_SUBMASK) >> OP_ALG_ALGSEL_SHIFT]; crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state)); /* * For keyed hash algorithms shared descriptors * will be created later in setkey() callback */ return alg->setkey ? 0 : ahash_set_sh_desc(ahash); } static void caam_hash_cra_exit(struct crypto_tfm *tfm) { struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc), DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC); if (ctx->adata.key_dma) dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma, ARRAY_SIZE(ctx->key), DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); } static struct caam_hash_alg *caam_hash_alloc(struct device *dev, struct caam_hash_template *template, bool keyed) { struct caam_hash_alg *t_alg; struct ahash_alg *halg; struct crypto_alg *alg; t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); if (!t_alg) return ERR_PTR(-ENOMEM); t_alg->ahash_alg = template->template_ahash; halg = &t_alg->ahash_alg; alg = &halg->halg.base; if (keyed) { snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->hmac_name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->hmac_driver_name); } else { snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->driver_name); t_alg->ahash_alg.setkey = NULL; } alg->cra_module = THIS_MODULE; alg->cra_init = caam_hash_cra_init; alg->cra_exit = caam_hash_cra_exit; alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding(); alg->cra_priority = CAAM_CRA_PRIORITY; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY; t_alg->alg_type = template->alg_type; t_alg->dev = dev; return t_alg; } static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx) { struct dpaa2_caam_priv_per_cpu *ppriv; ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx); napi_schedule_irqoff(&ppriv->napi); } static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv) { struct device *dev = priv->dev; struct dpaa2_io_notification_ctx *nctx; struct dpaa2_caam_priv_per_cpu *ppriv; int err, i = 0, cpu; for_each_online_cpu(cpu) { ppriv = per_cpu_ptr(priv->ppriv, cpu); ppriv->priv = priv; nctx = &ppriv->nctx; nctx->is_cdan = 0; nctx->id = ppriv->rsp_fqid; nctx->desired_cpu = cpu; nctx->cb = dpaa2_caam_fqdan_cb; /* Register notification callbacks */ ppriv->dpio = dpaa2_io_service_select(cpu); err = dpaa2_io_service_register(ppriv->dpio, nctx, dev); if (unlikely(err)) { dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu); nctx->cb = NULL; /* * If no affine DPIO for this core, there's probably * none available for next cores either. Signal we want * to retry later, in case the DPIO devices weren't * probed yet. */ err = -EPROBE_DEFER; goto err; } ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE, dev); if (unlikely(!ppriv->store)) { dev_err(dev, "dpaa2_io_store_create() failed\n"); err = -ENOMEM; goto err; } if (++i == priv->num_pairs) break; } return 0; err: for_each_online_cpu(cpu) { ppriv = per_cpu_ptr(priv->ppriv, cpu); if (!ppriv->nctx.cb) break; dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev); } for_each_online_cpu(cpu) { ppriv = per_cpu_ptr(priv->ppriv, cpu); if (!ppriv->store) break; dpaa2_io_store_destroy(ppriv->store); } return err; } static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv) { struct dpaa2_caam_priv_per_cpu *ppriv; int i = 0, cpu; for_each_online_cpu(cpu) { ppriv = per_cpu_ptr(priv->ppriv, cpu); dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, priv->dev); dpaa2_io_store_destroy(ppriv->store); if (++i == priv->num_pairs) return; } } static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv) { struct dpseci_rx_queue_cfg rx_queue_cfg; struct device *dev = priv->dev; struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); struct dpaa2_caam_priv_per_cpu *ppriv; int err = 0, i = 0, cpu; /* Configure Rx queues */ for_each_online_cpu(cpu) { ppriv = per_cpu_ptr(priv->ppriv, cpu); rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST | DPSECI_QUEUE_OPT_USER_CTX; rx_queue_cfg.order_preservation_en = 0; rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO; rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; /* * Rx priority (WQ) doesn't really matter, since we use * pull mode, i.e. volatile dequeues from specific FQs */ rx_queue_cfg.dest_cfg.priority = 0; rx_queue_cfg.user_ctx = ppriv->nctx.qman64; err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, &rx_queue_cfg); if (err) { dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n", err); return err; } if (++i == priv->num_pairs) break; } return err; } static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv) { struct device *dev = priv->dev; if (!priv->cscn_mem) return; dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); kfree(priv->cscn_mem); } static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv) { struct device *dev = priv->dev; struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); int err; if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); if (err) dev_err(dev, "dpseci_reset() failed\n"); } dpaa2_dpseci_congestion_free(priv); dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); } static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv, const struct dpaa2_fd *fd) { struct caam_request *req; u32 fd_err; if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) { dev_err(priv->dev, "Only Frame List FD format is supported!\n"); return; } fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK; if (unlikely(fd_err)) dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err); /* * FD[ADDR] is guaranteed to be valid, irrespective of errors reported * in FD[ERR] or FD[FRC]. */ req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd)); dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt), DMA_BIDIRECTIONAL); req->cbk(req->ctx, dpaa2_fd_get_frc(fd)); } static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv) { int err; /* Retry while portal is busy */ do { err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid, ppriv->store); } while (err == -EBUSY); if (unlikely(err)) dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err); return err; } static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv) { struct dpaa2_dq *dq; int cleaned = 0, is_last; do { dq = dpaa2_io_store_next(ppriv->store, &is_last); if (unlikely(!dq)) { if (unlikely(!is_last)) { dev_dbg(ppriv->priv->dev, "FQ %d returned no valid frames\n", ppriv->rsp_fqid); /* * MUST retry until we get some sort of * valid response token (be it "empty dequeue" * or a valid frame). */ continue; } break; } /* Process FD */ dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq)); cleaned++; } while (!is_last); return cleaned; } static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget) { struct dpaa2_caam_priv_per_cpu *ppriv; struct dpaa2_caam_priv *priv; int err, cleaned = 0, store_cleaned; ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi); priv = ppriv->priv; if (unlikely(dpaa2_caam_pull_fq(ppriv))) return 0; do { store_cleaned = dpaa2_caam_store_consume(ppriv); cleaned += store_cleaned; if (store_cleaned == 0 || cleaned > budget - DPAA2_CAAM_STORE_SIZE) break; /* Try to dequeue some more */ err = dpaa2_caam_pull_fq(ppriv); if (unlikely(err)) break; } while (1); if (cleaned < budget) { napi_complete_done(napi, cleaned); err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx); if (unlikely(err)) dev_err(priv->dev, "Notification rearm failed: %d\n", err); } return cleaned; } static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv, u16 token) { struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 }; struct device *dev = priv->dev; unsigned int alignmask; int err; /* * Congestion group feature supported starting with DPSECI API v5.1 * and only when object has been created with this capability. */ if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) || !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG)) return 0; alignmask = DPAA2_CSCN_ALIGN - 1; alignmask |= dma_get_cache_alignment() - 1; priv->cscn_mem = kzalloc(ALIGN(DPAA2_CSCN_SIZE, alignmask + 1), GFP_KERNEL); if (!priv->cscn_mem) return -ENOMEM; priv->cscn_dma = dma_map_single(dev, priv->cscn_mem, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); if (dma_mapping_error(dev, priv->cscn_dma)) { dev_err(dev, "Error mapping CSCN memory area\n"); err = -ENOMEM; goto err_dma_map; } cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES; cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH; cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH; cong_notif_cfg.message_ctx = (uintptr_t)priv; cong_notif_cfg.message_iova = priv->cscn_dma; cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER | DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT | DPSECI_CGN_MODE_COHERENT_WRITE; err = dpseci_set_congestion_notification(priv->mc_io, 0, token, &cong_notif_cfg); if (err) { dev_err(dev, "dpseci_set_congestion_notification failed\n"); goto err_set_cong; } return 0; err_set_cong: dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); err_dma_map: kfree(priv->cscn_mem); return err; } static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) { struct device *dev = &ls_dev->dev; struct dpaa2_caam_priv *priv; struct dpaa2_caam_priv_per_cpu *ppriv; int err, cpu; u8 i; priv = dev_get_drvdata(dev); priv->dev = dev; priv->dpsec_id = ls_dev->obj_desc.id; /* Get a handle for the DPSECI this interface is associate with */ err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle); if (err) { dev_err(dev, "dpseci_open() failed: %d\n", err); goto err_open; } err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver, &priv->minor_ver); if (err) { dev_err(dev, "dpseci_get_api_version() failed\n"); goto err_get_vers; } dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver); if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) { err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle); if (err) { dev_err(dev, "dpseci_reset() failed\n"); goto err_get_vers; } } err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle, &priv->dpseci_attr); if (err) { dev_err(dev, "dpseci_get_attributes() failed\n"); goto err_get_vers; } err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle, &priv->sec_attr); if (err) { dev_err(dev, "dpseci_get_sec_attr() failed\n"); goto err_get_vers; } err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle); if (err) { dev_err(dev, "setup_congestion() failed\n"); goto err_get_vers; } priv->num_pairs = min(priv->dpseci_attr.num_rx_queues, priv->dpseci_attr.num_tx_queues); if (priv->num_pairs > num_online_cpus()) { dev_warn(dev, "%d queues won't be used\n", priv->num_pairs - num_online_cpus()); priv->num_pairs = num_online_cpus(); } for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) { err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, &priv->rx_queue_attr[i]); if (err) { dev_err(dev, "dpseci_get_rx_queue() failed\n"); goto err_get_rx_queue; } } for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) { err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i, &priv->tx_queue_attr[i]); if (err) { dev_err(dev, "dpseci_get_tx_queue() failed\n"); goto err_get_rx_queue; } } i = 0; for_each_online_cpu(cpu) { u8 j; j = i % priv->num_pairs; ppriv = per_cpu_ptr(priv->ppriv, cpu); ppriv->req_fqid = priv->tx_queue_attr[j].fqid; /* * Allow all cores to enqueue, while only some of them * will take part in dequeuing. */ if (++i > priv->num_pairs) continue; ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid; ppriv->prio = j; dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j, priv->rx_queue_attr[j].fqid, priv->tx_queue_attr[j].fqid); ppriv->net_dev.dev = *dev; INIT_LIST_HEAD(&ppriv->net_dev.napi_list); netif_napi_add_tx_weight(&ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, DPAA2_CAAM_NAPI_WEIGHT); } return 0; err_get_rx_queue: dpaa2_dpseci_congestion_free(priv); err_get_vers: dpseci_close(priv->mc_io, 0, ls_dev->mc_handle); err_open: return err; } static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv) { struct device *dev = priv->dev; struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); struct dpaa2_caam_priv_per_cpu *ppriv; int i; for (i = 0; i < priv->num_pairs; i++) { ppriv = per_cpu_ptr(priv->ppriv, i); napi_enable(&ppriv->napi); } return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle); } static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv) { struct device *dev = priv->dev; struct dpaa2_caam_priv_per_cpu *ppriv; struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev); int i, err = 0, enabled; err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle); if (err) { dev_err(dev, "dpseci_disable() failed\n"); return err; } err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled); if (err) { dev_err(dev, "dpseci_is_enabled() failed\n"); return err; } dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true"); for (i = 0; i < priv->num_pairs; i++) { ppriv = per_cpu_ptr(priv->ppriv, i); napi_disable(&ppriv->napi); netif_napi_del(&ppriv->napi); } return 0; } static struct list_head hash_list; static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev) { struct device *dev; struct dpaa2_caam_priv *priv; int i, err = 0; bool registered = false; /* * There is no way to get CAAM endianness - there is no direct register * space access and MC f/w does not provide this attribute. * All DPAA2-based SoCs have little endian CAAM, thus hard-code this * property. */ caam_little_end = true; caam_imx = false; dev = &dpseci_dev->dev; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; dev_set_drvdata(dev, priv); priv->domain = iommu_get_domain_for_dev(dev); qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE, 0, 0, NULL); if (!qi_cache) { dev_err(dev, "Can't allocate SEC cache\n"); return -ENOMEM; } err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49)); if (err) { dev_err(dev, "dma_set_mask_and_coherent() failed\n"); goto err_dma_mask; } /* Obtain a MC portal */ err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io); if (err) { if (err == -ENXIO) err = -EPROBE_DEFER; else dev_err(dev, "MC portal allocation failed\n"); goto err_dma_mask; } priv->ppriv = alloc_percpu(*priv->ppriv); if (!priv->ppriv) { dev_err(dev, "alloc_percpu() failed\n"); err = -ENOMEM; goto err_alloc_ppriv; } /* DPSECI initialization */ err = dpaa2_dpseci_setup(dpseci_dev); if (err) { dev_err(dev, "dpaa2_dpseci_setup() failed\n"); goto err_dpseci_setup; } /* DPIO */ err = dpaa2_dpseci_dpio_setup(priv); if (err) { dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n"); goto err_dpio_setup; } /* DPSECI binding to DPIO */ err = dpaa2_dpseci_bind(priv); if (err) { dev_err(dev, "dpaa2_dpseci_bind() failed\n"); goto err_bind; } /* DPSECI enable */ err = dpaa2_dpseci_enable(priv); if (err) { dev_err(dev, "dpaa2_dpseci_enable() failed\n"); goto err_bind; } dpaa2_dpseci_debugfs_init(priv); /* register crypto algorithms the device supports */ for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { struct caam_skcipher_alg *t_alg = driver_algs + i; u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; /* Skip DES algorithms if not supported by device */ if (!priv->sec_attr.des_acc_num && (alg_sel == OP_ALG_ALGSEL_3DES || alg_sel == OP_ALG_ALGSEL_DES)) continue; /* Skip AES algorithms if not supported by device */ if (!priv->sec_attr.aes_acc_num && alg_sel == OP_ALG_ALGSEL_AES) continue; /* Skip CHACHA20 algorithms if not supported by device */ if (alg_sel == OP_ALG_ALGSEL_CHACHA20 && !priv->sec_attr.ccha_acc_num) continue; t_alg->caam.dev = dev; caam_skcipher_alg_init(t_alg); err = crypto_register_skcipher(&t_alg->skcipher); if (err) { dev_warn(dev, "%s alg registration failed: %d\n", t_alg->skcipher.base.cra_driver_name, err); continue; } t_alg->registered = true; registered = true; } for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { struct caam_aead_alg *t_alg = driver_aeads + i; u32 c1_alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; u32 c2_alg_sel = t_alg->caam.class2_alg_type & OP_ALG_ALGSEL_MASK; /* Skip DES algorithms if not supported by device */ if (!priv->sec_attr.des_acc_num && (c1_alg_sel == OP_ALG_ALGSEL_3DES || c1_alg_sel == OP_ALG_ALGSEL_DES)) continue; /* Skip AES algorithms if not supported by device */ if (!priv->sec_attr.aes_acc_num && c1_alg_sel == OP_ALG_ALGSEL_AES) continue; /* Skip CHACHA20 algorithms if not supported by device */ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !priv->sec_attr.ccha_acc_num) continue; /* Skip POLY1305 algorithms if not supported by device */ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !priv->sec_attr.ptha_acc_num) continue; /* * Skip algorithms requiring message digests * if MD not supported by device. */ if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 && !priv->sec_attr.md_acc_num) continue; t_alg->caam.dev = dev; caam_aead_alg_init(t_alg); err = crypto_register_aead(&t_alg->aead); if (err) { dev_warn(dev, "%s alg registration failed: %d\n", t_alg->aead.base.cra_driver_name, err); continue; } t_alg->registered = true; registered = true; } if (registered) dev_info(dev, "algorithms registered in /proc/crypto\n"); /* register hash algorithms the device supports */ INIT_LIST_HEAD(&hash_list); /* * Skip registration of any hashing algorithms if MD block * is not present. */ if (!priv->sec_attr.md_acc_num) return 0; for (i = 0; i < ARRAY_SIZE(driver_hash); i++) { struct caam_hash_alg *t_alg; struct caam_hash_template *alg = driver_hash + i; /* register hmac version */ t_alg = caam_hash_alloc(dev, alg, true); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); dev_warn(dev, "%s hash alg allocation failed: %d\n", alg->hmac_driver_name, err); continue; } err = crypto_register_ahash(&t_alg->ahash_alg); if (err) { dev_warn(dev, "%s alg registration failed: %d\n", t_alg->ahash_alg.halg.base.cra_driver_name, err); kfree(t_alg); } else { list_add_tail(&t_alg->entry, &hash_list); } /* register unkeyed version */ t_alg = caam_hash_alloc(dev, alg, false); if (IS_ERR(t_alg)) { err = PTR_ERR(t_alg); dev_warn(dev, "%s alg allocation failed: %d\n", alg->driver_name, err); continue; } err = crypto_register_ahash(&t_alg->ahash_alg); if (err) { dev_warn(dev, "%s alg registration failed: %d\n", t_alg->ahash_alg.halg.base.cra_driver_name, err); kfree(t_alg); } else { list_add_tail(&t_alg->entry, &hash_list); } } if (!list_empty(&hash_list)) dev_info(dev, "hash algorithms registered in /proc/crypto\n"); return err; err_bind: dpaa2_dpseci_dpio_free(priv); err_dpio_setup: dpaa2_dpseci_free(priv); err_dpseci_setup: free_percpu(priv->ppriv); err_alloc_ppriv: fsl_mc_portal_free(priv->mc_io); err_dma_mask: kmem_cache_destroy(qi_cache); return err; } static void __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev) { struct device *dev; struct dpaa2_caam_priv *priv; int i; dev = &ls_dev->dev; priv = dev_get_drvdata(dev); dpaa2_dpseci_debugfs_exit(priv); for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { struct caam_aead_alg *t_alg = driver_aeads + i; if (t_alg->registered) crypto_unregister_aead(&t_alg->aead); } for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { struct caam_skcipher_alg *t_alg = driver_algs + i; if (t_alg->registered) crypto_unregister_skcipher(&t_alg->skcipher); } if (hash_list.next) { struct caam_hash_alg *t_hash_alg, *p; list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) { crypto_unregister_ahash(&t_hash_alg->ahash_alg); list_del(&t_hash_alg->entry); kfree(t_hash_alg); } } dpaa2_dpseci_disable(priv); dpaa2_dpseci_dpio_free(priv); dpaa2_dpseci_free(priv); free_percpu(priv->ppriv); fsl_mc_portal_free(priv->mc_io); kmem_cache_destroy(qi_cache); } int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req) { struct dpaa2_fd fd; struct dpaa2_caam_priv *priv = dev_get_drvdata(dev); struct dpaa2_caam_priv_per_cpu *ppriv; int err = 0, i; if (IS_ERR(req)) return PTR_ERR(req); if (priv->cscn_mem) { dma_sync_single_for_cpu(priv->dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE); if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem))) { dev_dbg_ratelimited(dev, "Dropping request\n"); return -EBUSY; } } dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma); req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, req->fd_flt_dma)) { dev_err(dev, "DMA mapping error for QI enqueue request\n"); goto err_out; } memset(&fd, 0, sizeof(fd)); dpaa2_fd_set_format(&fd, dpaa2_fd_list); dpaa2_fd_set_addr(&fd, req->fd_flt_dma); dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1])); dpaa2_fd_set_flc(&fd, req->flc_dma); ppriv = raw_cpu_ptr(priv->ppriv); for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) { err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid, &fd); if (err != -EBUSY) break; cpu_relax(); } if (unlikely(err)) { dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err); goto err_out; } return -EINPROGRESS; err_out: dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt), DMA_BIDIRECTIONAL); return -EIO; } EXPORT_SYMBOL(dpaa2_caam_enqueue); static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = { { .vendor = FSL_MC_VENDOR_FREESCALE, .obj_type = "dpseci", }, { .vendor = 0x0 } }; MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table); static struct fsl_mc_driver dpaa2_caam_driver = { .driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, }, .probe = dpaa2_caam_probe, .remove = dpaa2_caam_remove, .match_id_table = dpaa2_caam_match_id_table }; MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Freescale Semiconductor, Inc"); MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver"); module_fsl_mc_driver(dpaa2_caam_driver);
linux-master
drivers/crypto/caam/caamalg_qi2.c
// SPDX-License-Identifier: GPL-2.0 /* * caam - Freescale FSL CAAM support for Public Key Cryptography descriptors * * Copyright 2016 Freescale Semiconductor, Inc. * * There is no Shared Descriptor for PKC so that the Job Descriptor must carry * all the desired key parameters, input and output pointers. */ #include "caampkc.h" #include "desc_constr.h" /* Descriptor for RSA Public operation */ void init_rsa_pub_desc(u32 *desc, struct rsa_pub_pdb *pdb) { init_job_desc_pdb(desc, 0, SIZEOF_RSA_PUB_PDB); append_cmd(desc, pdb->sgf); append_ptr(desc, pdb->f_dma); append_ptr(desc, pdb->g_dma); append_ptr(desc, pdb->n_dma); append_ptr(desc, pdb->e_dma); append_cmd(desc, pdb->f_len); append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSAENC_PUBKEY); } /* Descriptor for RSA Private operation - Private Key Form #1 */ void init_rsa_priv_f1_desc(u32 *desc, struct rsa_priv_f1_pdb *pdb) { init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F1_PDB); append_cmd(desc, pdb->sgf); append_ptr(desc, pdb->g_dma); append_ptr(desc, pdb->f_dma); append_ptr(desc, pdb->n_dma); append_ptr(desc, pdb->d_dma); append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | RSA_PRIV_KEY_FRM_1); } /* Descriptor for RSA Private operation - Private Key Form #2 */ void init_rsa_priv_f2_desc(u32 *desc, struct rsa_priv_f2_pdb *pdb) { init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F2_PDB); append_cmd(desc, pdb->sgf); append_ptr(desc, pdb->g_dma); append_ptr(desc, pdb->f_dma); append_ptr(desc, pdb->d_dma); append_ptr(desc, pdb->p_dma); append_ptr(desc, pdb->q_dma); append_ptr(desc, pdb->tmp1_dma); append_ptr(desc, pdb->tmp2_dma); append_cmd(desc, pdb->p_q_len); append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | RSA_PRIV_KEY_FRM_2); } /* Descriptor for RSA Private operation - Private Key Form #3 */ void init_rsa_priv_f3_desc(u32 *desc, struct rsa_priv_f3_pdb *pdb) { init_job_desc_pdb(desc, 0, SIZEOF_RSA_PRIV_F3_PDB); append_cmd(desc, pdb->sgf); append_ptr(desc, pdb->g_dma); append_ptr(desc, pdb->f_dma); append_ptr(desc, pdb->c_dma); append_ptr(desc, pdb->p_dma); append_ptr(desc, pdb->q_dma); append_ptr(desc, pdb->dp_dma); append_ptr(desc, pdb->dq_dma); append_ptr(desc, pdb->tmp1_dma); append_ptr(desc, pdb->tmp2_dma); append_cmd(desc, pdb->p_q_len); append_operation(desc, OP_TYPE_UNI_PROTOCOL | OP_PCLID_RSADEC_PRVKEY | RSA_PRIV_KEY_FRM_3); }
linux-master
drivers/crypto/caam/pkc_desc.c
// SPDX-License-Identifier: GPL-2.0+ /* * Freescale FSL CAAM support for crypto API over QI backend. * Based on caamalg.c * * Copyright 2013-2016 Freescale Semiconductor, Inc. * Copyright 2016-2019 NXP */ #include "compat.h" #include "ctrl.h" #include "regs.h" #include "intern.h" #include "desc_constr.h" #include "error.h" #include "sg_sw_qm.h" #include "key_gen.h" #include "qi.h" #include "jr.h" #include "caamalg_desc.h" #include <crypto/xts.h> #include <asm/unaligned.h> #include <linux/device.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/string.h> /* * crypto alg */ #define CAAM_CRA_PRIORITY 2000 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */ #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + \ SHA512_DIGEST_SIZE * 2) #define DESC_MAX_USED_BYTES (DESC_QI_AEAD_GIVENC_LEN + \ CAAM_MAX_KEY_SIZE) #define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ) struct caam_alg_entry { int class1_alg_type; int class2_alg_type; bool rfc3686; bool geniv; bool nodkp; }; struct caam_aead_alg { struct aead_alg aead; struct caam_alg_entry caam; bool registered; }; struct caam_skcipher_alg { struct skcipher_alg skcipher; struct caam_alg_entry caam; bool registered; }; /* * per-session context */ struct caam_ctx { struct device *jrdev; u32 sh_desc_enc[DESC_MAX_USED_LEN]; u32 sh_desc_dec[DESC_MAX_USED_LEN]; u8 key[CAAM_MAX_KEY_SIZE]; dma_addr_t key_dma; enum dma_data_direction dir; struct alginfo adata; struct alginfo cdata; unsigned int authsize; struct device *qidev; spinlock_t lock; /* Protects multiple init of driver context */ struct caam_drv_ctx *drv_ctx[NUM_OP]; bool xts_key_fallback; struct crypto_skcipher *fallback; }; struct caam_skcipher_req_ctx { struct skcipher_request fallback_req; }; static int aead_set_sh_desc(struct crypto_aead *aead) { struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), typeof(*alg), aead); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); u32 ctx1_iv_off = 0; u32 *nonce = NULL; unsigned int data_len[2]; u32 inl_mask; const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == OP_ALG_AAI_CTR_MOD128); const bool is_rfc3686 = alg->caam.rfc3686; struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); if (!ctx->cdata.keylen || !ctx->authsize) return 0; /* * AES-CTR needs to load IV in CONTEXT1 reg * at an offset of 128bits (16bytes) * CONTEXT1[255:128] = IV */ if (ctr_mode) ctx1_iv_off = 16; /* * RFC3686 specific: * CONTEXT1[255:128] = {NONCE, IV, COUNTER} */ if (is_rfc3686) { ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad + ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE); } /* * In case |user key| > |derived key|, using DKP<imm,imm> would result * in invalid opcodes (last bytes of user key) in the resulting * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key * addresses are needed. */ ctx->adata.key_virt = ctx->key; ctx->adata.key_dma = ctx->key_dma; ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad; ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad; data_len[0] = ctx->adata.keylen_pad; data_len[1] = ctx->cdata.keylen; if (alg->caam.geniv) goto skip_enc; /* aead_encrypt shared descriptor */ if (desc_inline_query(DESC_QI_AEAD_ENC_LEN + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), DESC_JOB_IO_LEN, data_len, &inl_mask, ARRAY_SIZE(data_len)) < 0) return -EINVAL; ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); cnstr_shdsc_aead_encap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, true, ctrlpriv->era); skip_enc: /* aead_decrypt shared descriptor */ if (desc_inline_query(DESC_QI_AEAD_DEC_LEN + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), DESC_JOB_IO_LEN, data_len, &inl_mask, ARRAY_SIZE(data_len)) < 0) return -EINVAL; ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); cnstr_shdsc_aead_decap(ctx->sh_desc_dec, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, alg->caam.geniv, is_rfc3686, nonce, ctx1_iv_off, true, ctrlpriv->era); if (!alg->caam.geniv) goto skip_givenc; /* aead_givencrypt shared descriptor */ if (desc_inline_query(DESC_QI_AEAD_GIVENC_LEN + (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0), DESC_JOB_IO_LEN, data_len, &inl_mask, ARRAY_SIZE(data_len)) < 0) return -EINVAL; ctx->adata.key_inline = !!(inl_mask & 1); ctx->cdata.key_inline = !!(inl_mask & 2); cnstr_shdsc_aead_givencap(ctx->sh_desc_enc, &ctx->cdata, &ctx->adata, ivsize, ctx->authsize, is_rfc3686, nonce, ctx1_iv_off, true, ctrlpriv->era); skip_givenc: return 0; } static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); ctx->authsize = authsize; aead_set_sh_desc(authenc); return 0; } static int aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); struct crypto_authenc_keys keys; int ret = 0; if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) goto badkey; dev_dbg(jrdev, "keylen %d enckeylen %d authkeylen %d\n", keys.authkeylen + keys.enckeylen, keys.enckeylen, keys.authkeylen); print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); /* * If DKP is supported, use it in the shared descriptor to generate * the split key. */ if (ctrlpriv->era >= 6) { ctx->adata.keylen = keys.authkeylen; ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & OP_ALG_ALGSEL_MASK); if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE) goto badkey; memcpy(ctx->key, keys.authkey, keys.authkeylen); memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); dma_sync_single_for_device(jrdev->parent, ctx->key_dma, ctx->adata.keylen_pad + keys.enckeylen, ctx->dir); goto skip_split_key; } ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, keys.authkeylen, CAAM_MAX_KEY_SIZE - keys.enckeylen); if (ret) goto badkey; /* postpend encryption key to auth split key */ memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); dma_sync_single_for_device(jrdev->parent, ctx->key_dma, ctx->adata.keylen_pad + keys.enckeylen, ctx->dir); print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ctx->adata.keylen_pad + keys.enckeylen, 1); skip_split_key: ctx->cdata.keylen = keys.enckeylen; ret = aead_set_sh_desc(aead); if (ret) goto badkey; /* Now update the driver contexts with the new shared descriptor */ if (ctx->drv_ctx[ENCRYPT]) { ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], ctx->sh_desc_enc); if (ret) { dev_err(jrdev, "driver enc context update failed\n"); goto badkey; } } if (ctx->drv_ctx[DECRYPT]) { ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], ctx->sh_desc_dec); if (ret) { dev_err(jrdev, "driver dec context update failed\n"); goto badkey; } } memzero_explicit(&keys, sizeof(keys)); return ret; badkey: memzero_explicit(&keys, sizeof(keys)); return -EINVAL; } static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_authenc_keys keys; int err; err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) return err; err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: aead_setkey(aead, key, keylen); memzero_explicit(&keys, sizeof(keys)); return err; } static int gcm_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - ctx->cdata.keylen; if (!ctx->cdata.keylen || !ctx->authsize) return 0; /* * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_QI_GCM_ENC_LEN) { ctx->cdata.key_inline = true; ctx->cdata.key_virt = ctx->key; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } cnstr_shdsc_gcm_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, ctx->authsize, true); /* * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_QI_GCM_DEC_LEN) { ctx->cdata.key_inline = true; ctx->cdata.key_virt = ctx->key; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, ctx->authsize, true); return 0; } static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_gcm_check_authsize(authsize); if (err) return err; ctx->authsize = authsize; gcm_set_sh_desc(authenc); return 0; } static int gcm_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; int ret; ret = aes_check_keylen(keylen); if (ret) return ret; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); dma_sync_single_for_device(jrdev->parent, ctx->key_dma, keylen, ctx->dir); ctx->cdata.keylen = keylen; ret = gcm_set_sh_desc(aead); if (ret) return ret; /* Now update the driver contexts with the new shared descriptor */ if (ctx->drv_ctx[ENCRYPT]) { ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], ctx->sh_desc_enc); if (ret) { dev_err(jrdev, "driver enc context update failed\n"); return ret; } } if (ctx->drv_ctx[DECRYPT]) { ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], ctx->sh_desc_dec); if (ret) { dev_err(jrdev, "driver dec context update failed\n"); return ret; } } return 0; } static int rfc4106_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - ctx->cdata.keylen; if (!ctx->cdata.keylen || !ctx->authsize) return 0; ctx->cdata.key_virt = ctx->key; /* * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { ctx->cdata.key_inline = true; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, ctx->authsize, true); /* * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) { ctx->cdata.key_inline = true; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, ctx->authsize, true); return 0; } static int rfc4106_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); int err; err = crypto_rfc4106_check_authsize(authsize); if (err) return err; ctx->authsize = authsize; rfc4106_set_sh_desc(authenc); return 0; } static int rfc4106_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; int ret; ret = aes_check_keylen(keylen - 4); if (ret) return ret; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); /* * The last four bytes of the key material are used as the salt value * in the nonce. Update the AES key length. */ ctx->cdata.keylen = keylen - 4; dma_sync_single_for_device(jrdev->parent, ctx->key_dma, ctx->cdata.keylen, ctx->dir); ret = rfc4106_set_sh_desc(aead); if (ret) return ret; /* Now update the driver contexts with the new shared descriptor */ if (ctx->drv_ctx[ENCRYPT]) { ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], ctx->sh_desc_enc); if (ret) { dev_err(jrdev, "driver enc context update failed\n"); return ret; } } if (ctx->drv_ctx[DECRYPT]) { ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], ctx->sh_desc_dec); if (ret) { dev_err(jrdev, "driver dec context update failed\n"); return ret; } } return 0; } static int rfc4543_set_sh_desc(struct crypto_aead *aead) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); unsigned int ivsize = crypto_aead_ivsize(aead); int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - ctx->cdata.keylen; if (!ctx->cdata.keylen || !ctx->authsize) return 0; ctx->cdata.key_virt = ctx->key; /* * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) { ctx->cdata.key_inline = true; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } cnstr_shdsc_rfc4543_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, ctx->authsize, true); /* * Job Descriptor and Shared Descriptor * must fit into the 64-word Descriptor h/w Buffer */ if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) { ctx->cdata.key_inline = true; } else { ctx->cdata.key_inline = false; ctx->cdata.key_dma = ctx->key_dma; } cnstr_shdsc_rfc4543_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, ctx->authsize, true); return 0; } static int rfc4543_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc); if (authsize != 16) return -EINVAL; ctx->authsize = authsize; rfc4543_set_sh_desc(authenc); return 0; } static int rfc4543_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct device *jrdev = ctx->jrdev; int ret; ret = aes_check_keylen(keylen - 4); if (ret) return ret; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); memcpy(ctx->key, key, keylen); /* * The last four bytes of the key material are used as the salt value * in the nonce. Update the AES key length. */ ctx->cdata.keylen = keylen - 4; dma_sync_single_for_device(jrdev->parent, ctx->key_dma, ctx->cdata.keylen, ctx->dir); ret = rfc4543_set_sh_desc(aead); if (ret) return ret; /* Now update the driver contexts with the new shared descriptor */ if (ctx->drv_ctx[ENCRYPT]) { ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], ctx->sh_desc_enc); if (ret) { dev_err(jrdev, "driver enc context update failed\n"); return ret; } } if (ctx->drv_ctx[DECRYPT]) { ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], ctx->sh_desc_dec); if (ret) { dev_err(jrdev, "driver dec context update failed\n"); return ret; } } return 0; } static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen, const u32 ctx1_iv_off) { struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_skcipher_alg *alg = container_of(crypto_skcipher_alg(skcipher), typeof(*alg), skcipher); struct device *jrdev = ctx->jrdev; unsigned int ivsize = crypto_skcipher_ivsize(skcipher); const bool is_rfc3686 = alg->caam.rfc3686; int ret = 0; print_hex_dump_debug("key in @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; /* skcipher encrypt, decrypt shared descriptors */ cnstr_shdsc_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, is_rfc3686, ctx1_iv_off); cnstr_shdsc_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, is_rfc3686, ctx1_iv_off); /* Now update the driver contexts with the new shared descriptor */ if (ctx->drv_ctx[ENCRYPT]) { ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], ctx->sh_desc_enc); if (ret) { dev_err(jrdev, "driver enc context update failed\n"); return -EINVAL; } } if (ctx->drv_ctx[DECRYPT]) { ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], ctx->sh_desc_dec); if (ret) { dev_err(jrdev, "driver dec context update failed\n"); return -EINVAL; } } return ret; } static int aes_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { int err; err = aes_check_keylen(keylen); if (err) return err; return skcipher_setkey(skcipher, key, keylen, 0); } static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { u32 ctx1_iv_off; int err; /* * RFC3686 specific: * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} * | *key = {KEY, NONCE} */ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; keylen -= CTR_RFC3686_NONCE_SIZE; err = aes_check_keylen(keylen); if (err) return err; return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { u32 ctx1_iv_off; int err; /* * AES-CTR needs to load IV in CONTEXT1 reg * at an offset of 128bits (16bytes) * CONTEXT1[255:128] = IV */ ctx1_iv_off = 16; err = aes_check_keylen(keylen); if (err) return err; return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off); } static int des3_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { return verify_skcipher_des3_key(skcipher, key) ?: skcipher_setkey(skcipher, key, keylen, 0); } static int des_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { return verify_skcipher_des_key(skcipher, key) ?: skcipher_setkey(skcipher, key, keylen, 0); } static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key, unsigned int keylen) { struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *jrdev = ctx->jrdev; struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); int ret = 0; int err; err = xts_verify_key(skcipher, key, keylen); if (err) { dev_dbg(jrdev, "key size mismatch\n"); return err; } if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256) ctx->xts_key_fallback = true; if (ctrlpriv->era <= 8 || ctx->xts_key_fallback) { err = crypto_skcipher_setkey(ctx->fallback, key, keylen); if (err) return err; } ctx->cdata.keylen = keylen; ctx->cdata.key_virt = key; ctx->cdata.key_inline = true; /* xts skcipher encrypt, decrypt shared descriptors */ cnstr_shdsc_xts_skcipher_encap(ctx->sh_desc_enc, &ctx->cdata); cnstr_shdsc_xts_skcipher_decap(ctx->sh_desc_dec, &ctx->cdata); /* Now update the driver contexts with the new shared descriptor */ if (ctx->drv_ctx[ENCRYPT]) { ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], ctx->sh_desc_enc); if (ret) { dev_err(jrdev, "driver enc context update failed\n"); return -EINVAL; } } if (ctx->drv_ctx[DECRYPT]) { ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], ctx->sh_desc_dec); if (ret) { dev_err(jrdev, "driver dec context update failed\n"); return -EINVAL; } } return ret; } /* * aead_edesc - s/w-extended aead descriptor * @src_nents: number of segments in input scatterlist * @dst_nents: number of segments in output scatterlist * @iv_dma: dma address of iv for checking continuity and link table * @qm_sg_bytes: length of dma mapped h/w link table * @qm_sg_dma: bus physical mapped address of h/w link table * @assoclen: associated data length, in CAAM endianness * @assoclen_dma: bus physical mapped address of req->assoclen * @drv_req: driver-specific request structure * @sgt: the h/w link table, followed by IV */ struct aead_edesc { int src_nents; int dst_nents; dma_addr_t iv_dma; int qm_sg_bytes; dma_addr_t qm_sg_dma; unsigned int assoclen; dma_addr_t assoclen_dma; struct caam_drv_req drv_req; struct qm_sg_entry sgt[]; }; /* * skcipher_edesc - s/w-extended skcipher descriptor * @src_nents: number of segments in input scatterlist * @dst_nents: number of segments in output scatterlist * @iv_dma: dma address of iv for checking continuity and link table * @qm_sg_bytes: length of dma mapped h/w link table * @qm_sg_dma: bus physical mapped address of h/w link table * @drv_req: driver-specific request structure * @sgt: the h/w link table, followed by IV */ struct skcipher_edesc { int src_nents; int dst_nents; dma_addr_t iv_dma; int qm_sg_bytes; dma_addr_t qm_sg_dma; struct caam_drv_req drv_req; struct qm_sg_entry sgt[]; }; static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, enum optype type) { /* * This function is called on the fast path with values of 'type' * known at compile time. Invalid arguments are not expected and * thus no checks are made. */ struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; u32 *desc; if (unlikely(!drv_ctx)) { spin_lock(&ctx->lock); /* Read again to check if some other core init drv_ctx */ drv_ctx = ctx->drv_ctx[type]; if (!drv_ctx) { int cpu; if (type == ENCRYPT) desc = ctx->sh_desc_enc; else /* (type == DECRYPT) */ desc = ctx->sh_desc_dec; cpu = smp_processor_id(); drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); if (!IS_ERR(drv_ctx)) drv_ctx->op_type = type; ctx->drv_ctx[type] = drv_ctx; } spin_unlock(&ctx->lock); } return drv_ctx; } static void caam_unmap(struct device *dev, struct scatterlist *src, struct scatterlist *dst, int src_nents, int dst_nents, dma_addr_t iv_dma, int ivsize, enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma, int qm_sg_bytes) { if (dst != src) { if (src_nents) dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); if (dst_nents) dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); } else { dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); } if (iv_dma) dma_unmap_single(dev, iv_dma, ivsize, iv_dir); if (qm_sg_bytes) dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); } static void aead_unmap(struct device *dev, struct aead_edesc *edesc, struct aead_request *req) { struct crypto_aead *aead = crypto_aead_reqtfm(req); int ivsize = crypto_aead_ivsize(aead); caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma, edesc->qm_sg_bytes); dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); } static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc, struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); int ivsize = crypto_skcipher_ivsize(skcipher); caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma, edesc->qm_sg_bytes); } static void aead_done(struct caam_drv_req *drv_req, u32 status) { struct device *qidev; struct aead_edesc *edesc; struct aead_request *aead_req = drv_req->app_ctx; struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); struct caam_ctx *caam_ctx = crypto_aead_ctx_dma(aead); int ecode = 0; qidev = caam_ctx->qidev; if (unlikely(status)) ecode = caam_jr_strstatus(qidev, status); edesc = container_of(drv_req, typeof(*edesc), drv_req); aead_unmap(qidev, edesc, aead_req); aead_request_complete(aead_req, ecode); qi_cache_free(edesc); } /* * allocate and map the aead extended descriptor */ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, bool encrypt) { struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), typeof(*alg), aead); struct device *qidev = ctx->qidev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; int src_len, dst_len = 0; struct aead_edesc *edesc; dma_addr_t qm_sg_dma, iv_dma = 0; int ivsize = 0; unsigned int authsize = ctx->authsize; int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; int in_len, out_len; struct qm_sg_entry *sg_table, *fd_sgt; struct caam_drv_ctx *drv_ctx; drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); if (IS_ERR(drv_ctx)) return (struct aead_edesc *)drv_ctx; /* allocate space for base edesc and hw desc commands, link tables */ edesc = qi_cache_alloc(flags); if (unlikely(!edesc)) { dev_err(qidev, "could not allocate extended descriptor\n"); return ERR_PTR(-ENOMEM); } if (likely(req->src == req->dst)) { src_len = req->assoclen + req->cryptlen + (encrypt ? authsize : 0); src_nents = sg_nents_for_len(req->src, src_len); if (unlikely(src_nents < 0)) { dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", src_len); qi_cache_free(edesc); return ERR_PTR(src_nents); } mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, DMA_BIDIRECTIONAL); if (unlikely(!mapped_src_nents)) { dev_err(qidev, "unable to map source\n"); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } } else { src_len = req->assoclen + req->cryptlen; dst_len = src_len + (encrypt ? authsize : (-authsize)); src_nents = sg_nents_for_len(req->src, src_len); if (unlikely(src_nents < 0)) { dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", src_len); qi_cache_free(edesc); return ERR_PTR(src_nents); } dst_nents = sg_nents_for_len(req->dst, dst_len); if (unlikely(dst_nents < 0)) { dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", dst_len); qi_cache_free(edesc); return ERR_PTR(dst_nents); } if (src_nents) { mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); if (unlikely(!mapped_src_nents)) { dev_err(qidev, "unable to map source\n"); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } } else { mapped_src_nents = 0; } if (dst_nents) { mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, DMA_FROM_DEVICE); if (unlikely(!mapped_dst_nents)) { dev_err(qidev, "unable to map destination\n"); dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } } else { mapped_dst_nents = 0; } } if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) ivsize = crypto_aead_ivsize(aead); /* * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. * Input is not contiguous. * HW reads 4 S/G entries at a time; make sure the reads don't go beyond * the end of the table by allocating more S/G entries. Logic: * if (src != dst && output S/G) * pad output S/G, if needed * else if (src == dst && S/G) * overlapping S/Gs; pad one of them * else if (input S/G) ... * pad input S/G, if needed */ qm_sg_ents = 1 + !!ivsize + mapped_src_nents; if (mapped_dst_nents > 1) qm_sg_ents += pad_sg_nents(mapped_dst_nents); else if ((req->src == req->dst) && (mapped_src_nents > 1)) qm_sg_ents = max(pad_sg_nents(qm_sg_ents), 1 + !!ivsize + pad_sg_nents(mapped_src_nents)); else qm_sg_ents = pad_sg_nents(qm_sg_ents); sg_table = &edesc->sgt[0]; qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > CAAM_QI_MEMCACHE_SIZE)) { dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", qm_sg_ents, ivsize); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 0, DMA_NONE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } if (ivsize) { u8 *iv = (u8 *)(sg_table + qm_sg_ents); /* Make sure IV is located in a DMAable area */ memcpy(iv, req->iv, ivsize); iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); if (dma_mapping_error(qidev, iv_dma)) { dev_err(qidev, "unable to map IV\n"); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 0, DMA_NONE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->iv_dma = iv_dma; edesc->drv_req.app_ctx = req; edesc->drv_req.cbk = aead_done; edesc->drv_req.drv_ctx = drv_ctx; edesc->assoclen = cpu_to_caam32(req->assoclen); edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, DMA_TO_DEVICE); if (dma_mapping_error(qidev, edesc->assoclen_dma)) { dev_err(qidev, "unable to map assoclen\n"); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); qm_sg_index++; if (ivsize) { dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); qm_sg_index++; } sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0); qm_sg_index += mapped_src_nents; if (mapped_dst_nents > 1) sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0); qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(qidev, qm_sg_dma)) { dev_err(qidev, "unable to map S/G table\n"); dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, iv_dma, ivsize, DMA_TO_DEVICE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } edesc->qm_sg_dma = qm_sg_dma; edesc->qm_sg_bytes = qm_sg_bytes; out_len = req->assoclen + req->cryptlen + (encrypt ? ctx->authsize : (-ctx->authsize)); in_len = 4 + ivsize + req->assoclen + req->cryptlen; fd_sgt = &edesc->drv_req.fd_sgt[0]; dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); if (req->dst == req->src) { if (mapped_src_nents == 1) dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), out_len, 0); else dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + (1 + !!ivsize) * sizeof(*sg_table), out_len, 0); } else if (mapped_dst_nents <= 1) { dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, 0); } else { dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * qm_sg_index, out_len, 0); } return edesc; } static inline int aead_crypt(struct aead_request *req, bool encrypt) { struct aead_edesc *edesc; struct crypto_aead *aead = crypto_aead_reqtfm(req); struct caam_ctx *ctx = crypto_aead_ctx_dma(aead); int ret; if (unlikely(caam_congested)) return -EAGAIN; /* allocate extended descriptor */ edesc = aead_edesc_alloc(req, encrypt); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Create and submit job descriptor */ ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); if (!ret) { ret = -EINPROGRESS; } else { aead_unmap(ctx->qidev, edesc, req); qi_cache_free(edesc); } return ret; } static int aead_encrypt(struct aead_request *req) { return aead_crypt(req, true); } static int aead_decrypt(struct aead_request *req) { return aead_crypt(req, false); } static int ipsec_gcm_encrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, true); } static int ipsec_gcm_decrypt(struct aead_request *req) { return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_crypt(req, false); } static inline u8 *skcipher_edesc_iv(struct skcipher_edesc *edesc) { return PTR_ALIGN((u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes, dma_get_cache_alignment()); } static void skcipher_done(struct caam_drv_req *drv_req, u32 status) { struct skcipher_edesc *edesc; struct skcipher_request *req = drv_req->app_ctx; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *caam_ctx = crypto_skcipher_ctx_dma(skcipher); struct device *qidev = caam_ctx->qidev; int ivsize = crypto_skcipher_ivsize(skcipher); int ecode = 0; dev_dbg(qidev, "%s %d: status 0x%x\n", __func__, __LINE__, status); edesc = container_of(drv_req, typeof(*edesc), drv_req); if (status) ecode = caam_jr_strstatus(qidev, status); print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->iv, edesc->src_nents > 1 ? 100 : ivsize, 1); caam_dump_sg("dst @" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, req->dst, edesc->dst_nents > 1 ? 100 : req->cryptlen, 1); skcipher_unmap(qidev, edesc, req); /* * The crypto API expects us to set the IV (req->iv) to the last * ciphertext block (CBC mode) or last counter (CTR mode). * This is used e.g. by the CTS mode. */ if (!ecode) memcpy(req->iv, skcipher_edesc_iv(edesc), ivsize); qi_cache_free(edesc); skcipher_request_complete(req, ecode); } static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req, bool encrypt) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct device *qidev = ctx->qidev; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; struct skcipher_edesc *edesc; dma_addr_t iv_dma; u8 *iv; int ivsize = crypto_skcipher_ivsize(skcipher); int dst_sg_idx, qm_sg_ents, qm_sg_bytes; struct qm_sg_entry *sg_table, *fd_sgt; struct caam_drv_ctx *drv_ctx; unsigned int len; drv_ctx = get_drv_ctx(ctx, encrypt ? ENCRYPT : DECRYPT); if (IS_ERR(drv_ctx)) return (struct skcipher_edesc *)drv_ctx; src_nents = sg_nents_for_len(req->src, req->cryptlen); if (unlikely(src_nents < 0)) { dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", req->cryptlen); return ERR_PTR(src_nents); } if (unlikely(req->src != req->dst)) { dst_nents = sg_nents_for_len(req->dst, req->cryptlen); if (unlikely(dst_nents < 0)) { dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", req->cryptlen); return ERR_PTR(dst_nents); } mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); if (unlikely(!mapped_src_nents)) { dev_err(qidev, "unable to map source\n"); return ERR_PTR(-ENOMEM); } mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, DMA_FROM_DEVICE); if (unlikely(!mapped_dst_nents)) { dev_err(qidev, "unable to map destination\n"); dma_unmap_sg(qidev, req->src, src_nents, DMA_TO_DEVICE); return ERR_PTR(-ENOMEM); } } else { mapped_src_nents = dma_map_sg(qidev, req->src, src_nents, DMA_BIDIRECTIONAL); if (unlikely(!mapped_src_nents)) { dev_err(qidev, "unable to map source\n"); return ERR_PTR(-ENOMEM); } } qm_sg_ents = 1 + mapped_src_nents; dst_sg_idx = qm_sg_ents; /* * Input, output HW S/G tables: [IV, src][dst, IV] * IV entries point to the same buffer * If src == dst, S/G entries are reused (S/G tables overlap) * * HW reads 4 S/G entries at a time; make sure the reads don't go beyond * the end of the table by allocating more S/G entries. */ if (req->src != req->dst) qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1); else qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents); qm_sg_bytes = qm_sg_ents * sizeof(struct qm_sg_entry); len = offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes; len = ALIGN(len, dma_get_cache_alignment()); len += ivsize; if (unlikely(len > CAAM_QI_MEMCACHE_SIZE)) { dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", qm_sg_ents, ivsize); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } /* allocate space for base edesc, link tables and IV */ edesc = qi_cache_alloc(flags); if (unlikely(!edesc)) { dev_err(qidev, "could not allocate extended descriptor\n"); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 0, DMA_NONE, 0, 0); return ERR_PTR(-ENOMEM); } edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; edesc->qm_sg_bytes = qm_sg_bytes; edesc->drv_req.app_ctx = req; edesc->drv_req.cbk = skcipher_done; edesc->drv_req.drv_ctx = drv_ctx; /* Make sure IV is located in a DMAable area */ sg_table = &edesc->sgt[0]; iv = skcipher_edesc_iv(edesc); memcpy(iv, req->iv, ivsize); iv_dma = dma_map_single(qidev, iv, ivsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(qidev, iv_dma)) { dev_err(qidev, "unable to map IV\n"); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, 0, DMA_NONE, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } edesc->iv_dma = iv_dma; dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0); if (req->src != req->dst) sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0); dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma, ivsize, 0); edesc->qm_sg_dma = dma_map_single(qidev, sg_table, edesc->qm_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(qidev, edesc->qm_sg_dma)) { dev_err(qidev, "unable to map S/G table\n"); caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0); qi_cache_free(edesc); return ERR_PTR(-ENOMEM); } fd_sgt = &edesc->drv_req.fd_sgt[0]; dma_to_qm_sg_one_last_ext(&fd_sgt[1], edesc->qm_sg_dma, ivsize + req->cryptlen, 0); if (req->src == req->dst) dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + sizeof(*sg_table), req->cryptlen + ivsize, 0); else dma_to_qm_sg_one_ext(&fd_sgt[0], edesc->qm_sg_dma + dst_sg_idx * sizeof(*sg_table), req->cryptlen + ivsize, 0); return edesc; } static inline bool xts_skcipher_ivsize(struct skcipher_request *req) { struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); unsigned int ivsize = crypto_skcipher_ivsize(skcipher); return !!get_unaligned((u64 *)(req->iv + (ivsize / 2))); } static inline int skcipher_crypt(struct skcipher_request *req, bool encrypt) { struct skcipher_edesc *edesc; struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher); struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctx->jrdev->parent); int ret; /* * XTS is expected to return an error even for input length = 0 * Note that the case input length < block size will be caught during * HW offloading and return an error. */ if (!req->cryptlen && !ctx->fallback) return 0; if (ctx->fallback && ((ctrlpriv->era <= 8 && xts_skcipher_ivsize(req)) || ctx->xts_key_fallback)) { struct caam_skcipher_req_ctx *rctx = skcipher_request_ctx(req); skcipher_request_set_tfm(&rctx->fallback_req, ctx->fallback); skcipher_request_set_callback(&rctx->fallback_req, req->base.flags, req->base.complete, req->base.data); skcipher_request_set_crypt(&rctx->fallback_req, req->src, req->dst, req->cryptlen, req->iv); return encrypt ? crypto_skcipher_encrypt(&rctx->fallback_req) : crypto_skcipher_decrypt(&rctx->fallback_req); } if (unlikely(caam_congested)) return -EAGAIN; /* allocate extended descriptor */ edesc = skcipher_edesc_alloc(req, encrypt); if (IS_ERR(edesc)) return PTR_ERR(edesc); ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); if (!ret) { ret = -EINPROGRESS; } else { skcipher_unmap(ctx->qidev, edesc, req); qi_cache_free(edesc); } return ret; } static int skcipher_encrypt(struct skcipher_request *req) { return skcipher_crypt(req, true); } static int skcipher_decrypt(struct skcipher_request *req) { return skcipher_crypt(req, false); } static struct caam_skcipher_alg driver_algs[] = { { .skcipher = { .base = { .cra_name = "cbc(aes)", .cra_driver_name = "cbc-aes-caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aes_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, }, { .skcipher = { .base = { .cra_name = "cbc(des3_ede)", .cra_driver_name = "cbc-3des-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, }, { .skcipher = { .base = { .cra_name = "cbc(des)", .cra_driver_name = "cbc-des-caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = des_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, }, { .skcipher = { .base = { .cra_name = "ctr(aes)", .cra_driver_name = "ctr-aes-caam-qi", .cra_blocksize = 1, }, .setkey = ctr_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, .chunksize = AES_BLOCK_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, }, { .skcipher = { .base = { .cra_name = "rfc3686(ctr(aes))", .cra_driver_name = "rfc3686-ctr-aes-caam-qi", .cra_blocksize = 1, }, .setkey = rfc3686_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE, .ivsize = CTR_RFC3686_IV_SIZE, .chunksize = AES_BLOCK_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CTR_MOD128, .rfc3686 = true, }, }, { .skcipher = { .base = { .cra_name = "xts(aes)", .cra_driver_name = "xts-aes-caam-qi", .cra_flags = CRYPTO_ALG_NEED_FALLBACK, .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = xts_skcipher_setkey, .encrypt = skcipher_encrypt, .decrypt = skcipher_decrypt, .min_keysize = 2 * AES_MIN_KEY_SIZE, .max_keysize = 2 * AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS, }, }; static struct caam_aead_alg driver_aeads[] = { { .aead = { .base = { .cra_name = "rfc4106(gcm(aes))", .cra_driver_name = "rfc4106-gcm-aes-caam-qi", .cra_blocksize = 1, }, .setkey = rfc4106_setkey, .setauthsize = rfc4106_setauthsize, .encrypt = ipsec_gcm_encrypt, .decrypt = ipsec_gcm_decrypt, .ivsize = 8, .maxauthsize = AES_BLOCK_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, .nodkp = true, }, }, { .aead = { .base = { .cra_name = "rfc4543(gcm(aes))", .cra_driver_name = "rfc4543-gcm-aes-caam-qi", .cra_blocksize = 1, }, .setkey = rfc4543_setkey, .setauthsize = rfc4543_setauthsize, .encrypt = ipsec_gcm_encrypt, .decrypt = ipsec_gcm_decrypt, .ivsize = 8, .maxauthsize = AES_BLOCK_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, .nodkp = true, }, }, /* Galois Counter Mode */ { .aead = { .base = { .cra_name = "gcm(aes)", .cra_driver_name = "gcm-aes-caam-qi", .cra_blocksize = 1, }, .setkey = gcm_setkey, .setauthsize = gcm_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = 12, .maxauthsize = AES_BLOCK_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM, .nodkp = true, } }, /* single-pass ipsec_esp descriptor */ { .aead = { .base = { .cra_name = "authenc(hmac(md5),cbc(aes))", .cra_driver_name = "authenc-hmac-md5-" "cbc-aes-caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(md5)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-hmac-md5-" "cbc-aes-caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha1),cbc(aes))", .cra_driver_name = "authenc-hmac-sha1-" "cbc-aes-caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha1)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha1-cbc-aes-caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead = { .base = { .cra_name = "authenc(hmac(sha224),cbc(aes))", .cra_driver_name = "authenc-hmac-sha224-" "cbc-aes-caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha224)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha224-cbc-aes-caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha256),cbc(aes))", .cra_driver_name = "authenc-hmac-sha256-" "cbc-aes-caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha256-cbc-aes-" "caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha384),cbc(aes))", .cra_driver_name = "authenc-hmac-sha384-" "cbc-aes-caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha384)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha384-cbc-aes-" "caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha512),cbc(aes))", .cra_driver_name = "authenc-hmac-sha512-" "cbc-aes-caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha512)," "cbc(aes)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha512-cbc-aes-" "caam-qi", .cra_blocksize = AES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(md5),cbc(des3_ede))", .cra_driver_name = "authenc-hmac-md5-" "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(md5)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-hmac-md5-" "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha1)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha1-" "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha1)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha1-" "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha224)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha224-" "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha224)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha224-" "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha256)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha256-" "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha256-" "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha384)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha384-" "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha384)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha384-" "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha512)," "cbc(des3_ede))", .cra_driver_name = "authenc-hmac-sha512-" "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha512)," "cbc(des3_ede)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha512-" "cbc-des3_ede-caam-qi", .cra_blocksize = DES3_EDE_BLOCK_SIZE, }, .setkey = des3_aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(md5),cbc(des))", .cra_driver_name = "authenc-hmac-md5-" "cbc-des-caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(md5)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-hmac-md5-" "cbc-des-caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = MD5_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha1),cbc(des))", .cra_driver_name = "authenc-hmac-sha1-" "cbc-des-caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha1)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha1-cbc-des-caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA1 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha224),cbc(des))", .cra_driver_name = "authenc-hmac-sha224-" "cbc-des-caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha224)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha224-cbc-des-" "caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA224_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA224 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha256),cbc(des))", .cra_driver_name = "authenc-hmac-sha256-" "cbc-des-caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha256)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha256-cbc-des-" "caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA256 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, }, }, { .aead = { .base = { .cra_name = "authenc(hmac(sha384),cbc(des))", .cra_driver_name = "authenc-hmac-sha384-" "cbc-des-caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, }, }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha384)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha384-cbc-des-" "caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA384_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA384 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, { .aead = { .base = { .cra_name = "authenc(hmac(sha512),cbc(des))", .cra_driver_name = "authenc-hmac-sha512-" "cbc-des-caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, } }, { .aead = { .base = { .cra_name = "echainiv(authenc(hmac(sha512)," "cbc(des)))", .cra_driver_name = "echainiv-authenc-" "hmac-sha512-cbc-des-" "caam-qi", .cra_blocksize = DES_BLOCK_SIZE, }, .setkey = aead_setkey, .setauthsize = aead_setauthsize, .encrypt = aead_encrypt, .decrypt = aead_decrypt, .ivsize = DES_BLOCK_SIZE, .maxauthsize = SHA512_DIGEST_SIZE, }, .caam = { .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC, .class2_alg_type = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC_PRECOMP, .geniv = true, } }, }; static int caam_init_common(struct caam_ctx *ctx, struct caam_alg_entry *caam, bool uses_dkp) { struct caam_drv_private *priv; struct device *dev; /* * distribute tfms across job rings to ensure in-order * crypto request processing per tfm */ ctx->jrdev = caam_jr_alloc(); if (IS_ERR(ctx->jrdev)) { pr_err("Job Ring Device allocation for transform failed\n"); return PTR_ERR(ctx->jrdev); } dev = ctx->jrdev->parent; priv = dev_get_drvdata(dev); if (priv->era >= 6 && uses_dkp) ctx->dir = DMA_BIDIRECTIONAL; else ctx->dir = DMA_TO_DEVICE; ctx->key_dma = dma_map_single(dev, ctx->key, sizeof(ctx->key), ctx->dir); if (dma_mapping_error(dev, ctx->key_dma)) { dev_err(dev, "unable to map key\n"); caam_jr_free(ctx->jrdev); return -ENOMEM; } /* copy descriptor header template value */ ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type; ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type; ctx->qidev = dev; spin_lock_init(&ctx->lock); ctx->drv_ctx[ENCRYPT] = NULL; ctx->drv_ctx[DECRYPT] = NULL; return 0; } static int caam_cra_init(struct crypto_skcipher *tfm) { struct skcipher_alg *alg = crypto_skcipher_alg(tfm); struct caam_skcipher_alg *caam_alg = container_of(alg, typeof(*caam_alg), skcipher); struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; int ret = 0; if (alg_aai == OP_ALG_AAI_XTS) { const char *tfm_name = crypto_tfm_alg_name(&tfm->base); struct crypto_skcipher *fallback; fallback = crypto_alloc_skcipher(tfm_name, 0, CRYPTO_ALG_NEED_FALLBACK); if (IS_ERR(fallback)) { pr_err("Failed to allocate %s fallback: %ld\n", tfm_name, PTR_ERR(fallback)); return PTR_ERR(fallback); } ctx->fallback = fallback; crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) + crypto_skcipher_reqsize(fallback)); } ret = caam_init_common(ctx, &caam_alg->caam, false); if (ret && ctx->fallback) crypto_free_skcipher(ctx->fallback); return ret; } static int caam_aead_init(struct crypto_aead *tfm) { struct aead_alg *alg = crypto_aead_alg(tfm); struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg), aead); struct caam_ctx *ctx = crypto_aead_ctx_dma(tfm); return caam_init_common(ctx, &caam_alg->caam, !caam_alg->caam.nodkp); } static void caam_exit_common(struct caam_ctx *ctx) { caam_drv_ctx_rel(ctx->drv_ctx[ENCRYPT]); caam_drv_ctx_rel(ctx->drv_ctx[DECRYPT]); dma_unmap_single(ctx->jrdev->parent, ctx->key_dma, sizeof(ctx->key), ctx->dir); caam_jr_free(ctx->jrdev); } static void caam_cra_exit(struct crypto_skcipher *tfm) { struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm); if (ctx->fallback) crypto_free_skcipher(ctx->fallback); caam_exit_common(ctx); } static void caam_aead_exit(struct crypto_aead *tfm) { caam_exit_common(crypto_aead_ctx_dma(tfm)); } void caam_qi_algapi_exit(void) { int i; for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { struct caam_aead_alg *t_alg = driver_aeads + i; if (t_alg->registered) crypto_unregister_aead(&t_alg->aead); } for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { struct caam_skcipher_alg *t_alg = driver_algs + i; if (t_alg->registered) crypto_unregister_skcipher(&t_alg->skcipher); } } static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg) { struct skcipher_alg *alg = &t_alg->skcipher; alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY); alg->init = caam_cra_init; alg->exit = caam_cra_exit; } static void caam_aead_alg_init(struct caam_aead_alg *t_alg) { struct aead_alg *alg = &t_alg->aead; alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CAAM_CRA_PRIORITY; alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding(); alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->init = caam_aead_init; alg->exit = caam_aead_exit; } int caam_qi_algapi_init(struct device *ctrldev) { struct caam_drv_private *priv = dev_get_drvdata(ctrldev); int i = 0, err = 0; u32 aes_vid, aes_inst, des_inst, md_vid, md_inst; unsigned int md_limit = SHA512_DIGEST_SIZE; bool registered = false; /* Make sure this runs only on (DPAA 1.x) QI */ if (!priv->qi_present || caam_dpaa2) return 0; /* * Register crypto algorithms the device supports. * First, detect presence and attributes of DES, AES, and MD blocks. */ if (priv->era < 10) { u32 cha_vid, cha_inst; cha_vid = rd_reg32(&priv->ctrl->perfmon.cha_id_ls); aes_vid = cha_vid & CHA_ID_LS_AES_MASK; md_vid = (cha_vid & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; cha_inst = rd_reg32(&priv->ctrl->perfmon.cha_num_ls); des_inst = (cha_inst & CHA_ID_LS_DES_MASK) >> CHA_ID_LS_DES_SHIFT; aes_inst = cha_inst & CHA_ID_LS_AES_MASK; md_inst = (cha_inst & CHA_ID_LS_MD_MASK) >> CHA_ID_LS_MD_SHIFT; } else { u32 aesa, mdha; aesa = rd_reg32(&priv->ctrl->vreg.aesa); mdha = rd_reg32(&priv->ctrl->vreg.mdha); aes_vid = (aesa & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; md_vid = (mdha & CHA_VER_VID_MASK) >> CHA_VER_VID_SHIFT; des_inst = rd_reg32(&priv->ctrl->vreg.desa) & CHA_VER_NUM_MASK; aes_inst = aesa & CHA_VER_NUM_MASK; md_inst = mdha & CHA_VER_NUM_MASK; } /* If MD is present, limit digest size based on LP256 */ if (md_inst && md_vid == CHA_VER_VID_MD_LP256) md_limit = SHA256_DIGEST_SIZE; for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { struct caam_skcipher_alg *t_alg = driver_algs + i; u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; /* Skip DES algorithms if not supported by device */ if (!des_inst && ((alg_sel == OP_ALG_ALGSEL_3DES) || (alg_sel == OP_ALG_ALGSEL_DES))) continue; /* Skip AES algorithms if not supported by device */ if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) continue; caam_skcipher_alg_init(t_alg); err = crypto_register_skcipher(&t_alg->skcipher); if (err) { dev_warn(ctrldev, "%s alg registration failed\n", t_alg->skcipher.base.cra_driver_name); continue; } t_alg->registered = true; registered = true; } for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) { struct caam_aead_alg *t_alg = driver_aeads + i; u32 c1_alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK; u32 c2_alg_sel = t_alg->caam.class2_alg_type & OP_ALG_ALGSEL_MASK; u32 alg_aai = t_alg->caam.class1_alg_type & OP_ALG_AAI_MASK; /* Skip DES algorithms if not supported by device */ if (!des_inst && ((c1_alg_sel == OP_ALG_ALGSEL_3DES) || (c1_alg_sel == OP_ALG_ALGSEL_DES))) continue; /* Skip AES algorithms if not supported by device */ if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES)) continue; /* * Check support for AES algorithms not available * on LP devices. */ if (aes_vid == CHA_VER_VID_AES_LP && alg_aai == OP_ALG_AAI_GCM) continue; /* * Skip algorithms requiring message digests * if MD or MD size is not supported by device. */ if (c2_alg_sel && (!md_inst || (t_alg->aead.maxauthsize > md_limit))) continue; caam_aead_alg_init(t_alg); err = crypto_register_aead(&t_alg->aead); if (err) { pr_warn("%s alg registration failed\n", t_alg->aead.base.cra_driver_name); continue; } t_alg->registered = true; registered = true; } if (registered) dev_info(ctrldev, "algorithms registered in /proc/crypto\n"); return err; }
linux-master
drivers/crypto/caam/caamalg_qi.c
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* * Copyright 2013-2016 Freescale Semiconductor Inc. * Copyright 2017-2018 NXP */ #include <linux/fsl/mc.h> #include "dpseci.h" #include "dpseci_cmd.h" /** * dpseci_open() - Open a control session for the specified object * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @dpseci_id: DPSECI unique ID * @token: Returned token; use in subsequent API calls * * This function can be used to open a control session for an already created * object; an object may have been declared statically in the DPL * or created dynamically. * This function returns a unique authentication token, associated with the * specific object ID and the specific MC portal; this token must be used in all * subsequent commands for this specific object. * * Return: '0' on success, error code otherwise */ int dpseci_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpseci_id, u16 *token) { struct fsl_mc_command cmd = { 0 }; struct dpseci_cmd_open *cmd_params; int err; cmd.header = mc_encode_cmd_header(DPSECI_CMDID_OPEN, cmd_flags, 0); cmd_params = (struct dpseci_cmd_open *)cmd.params; cmd_params->dpseci_id = cpu_to_le32(dpseci_id); err = mc_send_command(mc_io, &cmd); if (err) return err; *token = mc_cmd_hdr_read_token(&cmd); return 0; } /** * dpseci_close() - Close the control session of the object * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSECI object * * After this function is called, no further operations are allowed on the * object without opening a new control session. * * Return: '0' on success, error code otherwise */ int dpseci_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { struct fsl_mc_command cmd = { 0 }; cmd.header = mc_encode_cmd_header(DPSECI_CMDID_CLOSE, cmd_flags, token); return mc_send_command(mc_io, &cmd); } /** * dpseci_enable() - Enable the DPSECI, allow sending and receiving frames * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSECI object * * Return: '0' on success, error code otherwise */ int dpseci_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { struct fsl_mc_command cmd = { 0 }; cmd.header = mc_encode_cmd_header(DPSECI_CMDID_ENABLE, cmd_flags, token); return mc_send_command(mc_io, &cmd); } /** * dpseci_disable() - Disable the DPSECI, stop sending and receiving frames * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSECI object * * Return: '0' on success, error code otherwise */ int dpseci_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { struct fsl_mc_command cmd = { 0 }; cmd.header = mc_encode_cmd_header(DPSECI_CMDID_DISABLE, cmd_flags, token); return mc_send_command(mc_io, &cmd); } /** * dpseci_reset() - Reset the DPSECI, returns the object to initial state * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSECI object * * Return: '0' on success, error code otherwise */ int dpseci_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) { struct fsl_mc_command cmd = { 0 }; cmd.header = mc_encode_cmd_header(DPSECI_CMDID_RESET, cmd_flags, token); return mc_send_command(mc_io, &cmd); } /** * dpseci_is_enabled() - Check if the DPSECI is enabled. * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSECI object * @en: Returns '1' if object is enabled; '0' otherwise * * Return: '0' on success, error code otherwise */ int dpseci_is_enabled(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, int *en) { struct fsl_mc_command cmd = { 0 }; struct dpseci_rsp_is_enabled *rsp_params; int err; cmd.header = mc_encode_cmd_header(DPSECI_CMDID_IS_ENABLED, cmd_flags, token); err = mc_send_command(mc_io, &cmd); if (err) return err; rsp_params = (struct dpseci_rsp_is_enabled *)cmd.params; *en = dpseci_get_field(rsp_params->is_enabled, ENABLE); return 0; } /** * dpseci_get_attributes() - Retrieve DPSECI attributes * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSECI object * @attr: Returned object's attributes * * Return: '0' on success, error code otherwise */ int dpseci_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, struct dpseci_attr *attr) { struct fsl_mc_command cmd = { 0 }; struct dpseci_rsp_get_attributes *rsp_params; int err; cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_ATTR, cmd_flags, token); err = mc_send_command(mc_io, &cmd); if (err) return err; rsp_params = (struct dpseci_rsp_get_attributes *)cmd.params; attr->id = le32_to_cpu(rsp_params->id); attr->num_tx_queues = rsp_params->num_tx_queues; attr->num_rx_queues = rsp_params->num_rx_queues; attr->options = le32_to_cpu(rsp_params->options); return 0; } /** * dpseci_set_rx_queue() - Set Rx queue configuration * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSECI object * @queue: Select the queue relative to number of priorities configured at * DPSECI creation; use DPSECI_ALL_QUEUES to configure all * Rx queues identically. * @cfg: Rx queue configuration * * Return: '0' on success, error code otherwise */ int dpseci_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue, const struct dpseci_rx_queue_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpseci_cmd_queue *cmd_params; cmd.header = mc_encode_cmd_header(DPSECI_CMDID_SET_RX_QUEUE, cmd_flags, token); cmd_params = (struct dpseci_cmd_queue *)cmd.params; cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); cmd_params->priority = cfg->dest_cfg.priority; cmd_params->queue = queue; dpseci_set_field(cmd_params->dest_type, DEST_TYPE, cfg->dest_cfg.dest_type); cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); cmd_params->options = cpu_to_le32(cfg->options); dpseci_set_field(cmd_params->order_preservation_en, ORDER_PRESERVATION, cfg->order_preservation_en); return mc_send_command(mc_io, &cmd); } /** * dpseci_get_rx_queue() - Retrieve Rx queue attributes * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSECI object * @queue: Select the queue relative to number of priorities configured at * DPSECI creation * @attr: Returned Rx queue attributes * * Return: '0' on success, error code otherwise */ int dpseci_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue, struct dpseci_rx_queue_attr *attr) { struct fsl_mc_command cmd = { 0 }; struct dpseci_cmd_queue *cmd_params; int err; cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_RX_QUEUE, cmd_flags, token); cmd_params = (struct dpseci_cmd_queue *)cmd.params; cmd_params->queue = queue; err = mc_send_command(mc_io, &cmd); if (err) return err; attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id); attr->dest_cfg.priority = cmd_params->priority; attr->dest_cfg.dest_type = dpseci_get_field(cmd_params->dest_type, DEST_TYPE); attr->user_ctx = le64_to_cpu(cmd_params->user_ctx); attr->fqid = le32_to_cpu(cmd_params->fqid); attr->order_preservation_en = dpseci_get_field(cmd_params->order_preservation_en, ORDER_PRESERVATION); return 0; } /** * dpseci_get_tx_queue() - Retrieve Tx queue attributes * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSECI object * @queue: Select the queue relative to number of priorities configured at * DPSECI creation * @attr: Returned Tx queue attributes * * Return: '0' on success, error code otherwise */ int dpseci_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue, struct dpseci_tx_queue_attr *attr) { struct fsl_mc_command cmd = { 0 }; struct dpseci_cmd_queue *cmd_params; struct dpseci_rsp_get_tx_queue *rsp_params; int err; cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_TX_QUEUE, cmd_flags, token); cmd_params = (struct dpseci_cmd_queue *)cmd.params; cmd_params->queue = queue; err = mc_send_command(mc_io, &cmd); if (err) return err; rsp_params = (struct dpseci_rsp_get_tx_queue *)cmd.params; attr->fqid = le32_to_cpu(rsp_params->fqid); attr->priority = rsp_params->priority; return 0; } /** * dpseci_get_sec_attr() - Retrieve SEC accelerator attributes * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSECI object * @attr: Returned SEC attributes * * Return: '0' on success, error code otherwise */ int dpseci_get_sec_attr(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, struct dpseci_sec_attr *attr) { struct fsl_mc_command cmd = { 0 }; struct dpseci_rsp_get_sec_attr *rsp_params; int err; cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_SEC_ATTR, cmd_flags, token); err = mc_send_command(mc_io, &cmd); if (err) return err; rsp_params = (struct dpseci_rsp_get_sec_attr *)cmd.params; attr->ip_id = le16_to_cpu(rsp_params->ip_id); attr->major_rev = rsp_params->major_rev; attr->minor_rev = rsp_params->minor_rev; attr->era = rsp_params->era; attr->deco_num = rsp_params->deco_num; attr->zuc_auth_acc_num = rsp_params->zuc_auth_acc_num; attr->zuc_enc_acc_num = rsp_params->zuc_enc_acc_num; attr->snow_f8_acc_num = rsp_params->snow_f8_acc_num; attr->snow_f9_acc_num = rsp_params->snow_f9_acc_num; attr->crc_acc_num = rsp_params->crc_acc_num; attr->pk_acc_num = rsp_params->pk_acc_num; attr->kasumi_acc_num = rsp_params->kasumi_acc_num; attr->rng_acc_num = rsp_params->rng_acc_num; attr->md_acc_num = rsp_params->md_acc_num; attr->arc4_acc_num = rsp_params->arc4_acc_num; attr->des_acc_num = rsp_params->des_acc_num; attr->aes_acc_num = rsp_params->aes_acc_num; attr->ccha_acc_num = rsp_params->ccha_acc_num; attr->ptha_acc_num = rsp_params->ptha_acc_num; return 0; } /** * dpseci_get_api_version() - Get Data Path SEC Interface API version * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @major_ver: Major version of data path sec API * @minor_ver: Minor version of data path sec API * * Return: '0' on success, error code otherwise */ int dpseci_get_api_version(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 *major_ver, u16 *minor_ver) { struct fsl_mc_command cmd = { 0 }; struct dpseci_rsp_get_api_version *rsp_params; int err; cmd.header = mc_encode_cmd_header(DPSECI_CMDID_GET_API_VERSION, cmd_flags, 0); err = mc_send_command(mc_io, &cmd); if (err) return err; rsp_params = (struct dpseci_rsp_get_api_version *)cmd.params; *major_ver = le16_to_cpu(rsp_params->major); *minor_ver = le16_to_cpu(rsp_params->minor); return 0; } /** * dpseci_set_congestion_notification() - Set congestion group * notification configuration * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSECI object * @cfg: congestion notification configuration * * Return: '0' on success, error code otherwise */ int dpseci_set_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, const struct dpseci_congestion_notification_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpseci_cmd_congestion_notification *cmd_params; cmd.header = mc_encode_cmd_header( DPSECI_CMDID_SET_CONGESTION_NOTIFICATION, cmd_flags, token); cmd_params = (struct dpseci_cmd_congestion_notification *)cmd.params; cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode); cmd_params->priority = cfg->dest_cfg.priority; dpseci_set_field(cmd_params->options, CGN_DEST_TYPE, cfg->dest_cfg.dest_type); dpseci_set_field(cmd_params->options, CGN_UNITS, cfg->units); cmd_params->message_iova = cpu_to_le64(cfg->message_iova); cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx); cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry); cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit); return mc_send_command(mc_io, &cmd); } /** * dpseci_get_congestion_notification() - Get congestion group notification * configuration * @mc_io: Pointer to MC portal's I/O object * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @token: Token of DPSECI object * @cfg: congestion notification configuration * * Return: '0' on success, error code otherwise */ int dpseci_get_congestion_notification(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, struct dpseci_congestion_notification_cfg *cfg) { struct fsl_mc_command cmd = { 0 }; struct dpseci_cmd_congestion_notification *rsp_params; int err; cmd.header = mc_encode_cmd_header( DPSECI_CMDID_GET_CONGESTION_NOTIFICATION, cmd_flags, token); err = mc_send_command(mc_io, &cmd); if (err) return err; rsp_params = (struct dpseci_cmd_congestion_notification *)cmd.params; cfg->dest_cfg.dest_id = le32_to_cpu(rsp_params->dest_id); cfg->notification_mode = le16_to_cpu(rsp_params->notification_mode); cfg->dest_cfg.priority = rsp_params->priority; cfg->dest_cfg.dest_type = dpseci_get_field(rsp_params->options, CGN_DEST_TYPE); cfg->units = dpseci_get_field(rsp_params->options, CGN_UNITS); cfg->message_iova = le64_to_cpu(rsp_params->message_iova); cfg->message_ctx = le64_to_cpu(rsp_params->message_ctx); cfg->threshold_entry = le32_to_cpu(rsp_params->threshold_entry); cfg->threshold_exit = le32_to_cpu(rsp_params->threshold_exit); return 0; }
linux-master
drivers/crypto/caam/dpseci.c
// SPDX-License-Identifier: GPL-2.0 /* * CAAM Error Reporting * * Copyright 2009-2011 Freescale Semiconductor, Inc. */ #include "compat.h" #include "regs.h" #include "desc.h" #include "error.h" #ifdef DEBUG #include <linux/highmem.h> void caam_dump_sg(const char *prefix_str, int prefix_type, int rowsize, int groupsize, struct scatterlist *sg, size_t tlen, bool ascii) { struct scatterlist *it; void *it_page; size_t len; void *buf; for (it = sg; it && tlen > 0 ; it = sg_next(it)) { /* * make sure the scatterlist's page * has a valid virtual memory mapping */ it_page = kmap_atomic(sg_page(it)); if (unlikely(!it_page)) { pr_err("caam_dump_sg: kmap failed\n"); return; } buf = it_page + it->offset; len = min_t(size_t, tlen, it->length); print_hex_dump_debug(prefix_str, prefix_type, rowsize, groupsize, buf, len, ascii); tlen -= len; kunmap_atomic(it_page); } } #else void caam_dump_sg(const char *prefix_str, int prefix_type, int rowsize, int groupsize, struct scatterlist *sg, size_t tlen, bool ascii) {} #endif /* DEBUG */ EXPORT_SYMBOL(caam_dump_sg); bool caam_little_end; EXPORT_SYMBOL(caam_little_end); bool caam_imx; EXPORT_SYMBOL(caam_imx); size_t caam_ptr_sz; EXPORT_SYMBOL(caam_ptr_sz); static const struct { u8 value; const char *error_text; } desc_error_list[] = { { 0x00, "No error." }, { 0x01, "SGT Length Error. The descriptor is trying to read more data than is contained in the SGT table." }, { 0x02, "SGT Null Entry Error." }, { 0x03, "Job Ring Control Error. There is a bad value in the Job Ring Control register." }, { 0x04, "Invalid Descriptor Command. The Descriptor Command field is invalid." }, { 0x05, "Reserved." }, { 0x06, "Invalid KEY Command" }, { 0x07, "Invalid LOAD Command" }, { 0x08, "Invalid STORE Command" }, { 0x09, "Invalid OPERATION Command" }, { 0x0A, "Invalid FIFO LOAD Command" }, { 0x0B, "Invalid FIFO STORE Command" }, { 0x0C, "Invalid MOVE/MOVE_LEN Command" }, { 0x0D, "Invalid JUMP Command. A nonlocal JUMP Command is invalid because the target is not a Job Header Command, or the jump is from a Trusted Descriptor to a Job Descriptor, or because the target Descriptor contains a Shared Descriptor." }, { 0x0E, "Invalid MATH Command" }, { 0x0F, "Invalid SIGNATURE Command" }, { 0x10, "Invalid Sequence Command. A SEQ IN PTR OR SEQ OUT PTR Command is invalid or a SEQ KEY, SEQ LOAD, SEQ FIFO LOAD, or SEQ FIFO STORE decremented the input or output sequence length below 0. This error may result if a built-in PROTOCOL Command has encountered a malformed PDU." }, { 0x11, "Skip data type invalid. The type must be 0xE or 0xF."}, { 0x12, "Shared Descriptor Header Error" }, { 0x13, "Header Error. Invalid length or parity, or certain other problems." }, { 0x14, "Burster Error. Burster has gotten to an illegal state" }, { 0x15, "Context Register Length Error. The descriptor is trying to read or write past the end of the Context Register. A SEQ LOAD or SEQ STORE with the VLF bit set was executed with too large a length in the variable length register (VSOL for SEQ STORE or VSIL for SEQ LOAD)." }, { 0x16, "DMA Error" }, { 0x17, "Reserved." }, { 0x1A, "Job failed due to JR reset" }, { 0x1B, "Job failed due to Fail Mode" }, { 0x1C, "DECO Watchdog timer timeout error" }, { 0x1D, "DECO tried to copy a key from another DECO but the other DECO's Key Registers were locked" }, { 0x1E, "DECO attempted to copy data from a DECO that had an unmasked Descriptor error" }, { 0x1F, "LIODN error. DECO was trying to share from itself or from another DECO but the two Non-SEQ LIODN values didn't match or the 'shared from' DECO's Descriptor required that the SEQ LIODNs be the same and they aren't." }, { 0x20, "DECO has completed a reset initiated via the DRR register" }, { 0x21, "Nonce error. When using EKT (CCM) key encryption option in the FIFO STORE Command, the Nonce counter reached its maximum value and this encryption mode can no longer be used." }, { 0x22, "Meta data is too large (> 511 bytes) for TLS decap (input frame; block ciphers) and IPsec decap (output frame, when doing the next header byte update) and DCRC (output frame)." }, { 0x23, "Read Input Frame error" }, { 0x24, "JDKEK, TDKEK or TDSK not loaded error" }, { 0x80, "DNR (do not run) error" }, { 0x81, "undefined protocol command" }, { 0x82, "invalid setting in PDB" }, { 0x83, "Anti-replay LATE error" }, { 0x84, "Anti-replay REPLAY error" }, { 0x85, "Sequence number overflow" }, { 0x86, "Sigver invalid signature" }, { 0x87, "DSA Sign Illegal test descriptor" }, { 0x88, "Protocol Format Error - A protocol has seen an error in the format of data received. When running RSA, this means that formatting with random padding was used, and did not follow the form: 0x00, 0x02, 8-to-N bytes of non-zero pad, 0x00, F data." }, { 0x89, "Protocol Size Error - A protocol has seen an error in size. When running RSA, pdb size N < (size of F) when no formatting is used; or pdb size N < (F + 11) when formatting is used." }, { 0xC1, "Blob Command error: Undefined mode" }, { 0xC2, "Blob Command error: Secure Memory Blob mode error" }, { 0xC4, "Blob Command error: Black Blob key or input size error" }, { 0xC5, "Blob Command error: Invalid key destination" }, { 0xC8, "Blob Command error: Trusted/Secure mode error" }, { 0xF0, "IPsec TTL or hop limit field either came in as 0, or was decremented to 0" }, { 0xF1, "3GPP HFN matches or exceeds the Threshold" }, }; static const struct { u8 value; const char *error_text; } qi_error_list[] = { { 0x00, "No error" }, { 0x1F, "Job terminated by FQ or ICID flush" }, { 0x20, "FD format error"}, { 0x21, "FD command format error"}, { 0x23, "FL format error"}, { 0x25, "CRJD specified in FD, but not enabled in FLC"}, { 0x30, "Max. buffer size too small"}, { 0x31, "DHR exceeds max. buffer size (allocate mode, S/G format)"}, { 0x32, "SGT exceeds max. buffer size (allocate mode, S/G format"}, { 0x33, "Size over/underflow (allocate mode)"}, { 0x34, "Size over/underflow (reuse mode)"}, { 0x35, "Length exceeds max. short length (allocate mode, S/G/ format)"}, { 0x36, "Memory footprint exceeds max. value (allocate mode, S/G/ format)"}, { 0x41, "SBC frame format not supported (allocate mode)"}, { 0x42, "Pool 0 invalid / pool 1 size < pool 0 size (allocate mode)"}, { 0x43, "Annotation output enabled but ASAR = 0 (allocate mode)"}, { 0x44, "Unsupported or reserved frame format or SGHR = 1 (reuse mode)"}, { 0x45, "DHR correction underflow (reuse mode, single buffer format)"}, { 0x46, "Annotation length exceeds offset (reuse mode)"}, { 0x48, "Annotation output enabled but ASA limited by ASAR (reuse mode)"}, { 0x49, "Data offset correction exceeds input frame data length (reuse mode)"}, { 0x4B, "Annotation output enabled but ASA cannot be expanded (frame list)"}, { 0x51, "Unsupported IF reuse mode"}, { 0x52, "Unsupported FL use mode"}, { 0x53, "Unsupported RJD use mode"}, { 0x54, "Unsupported inline descriptor use mode"}, { 0xC0, "Table buffer pool 0 depletion"}, { 0xC1, "Table buffer pool 1 depletion"}, { 0xC2, "Data buffer pool 0 depletion, no OF allocated"}, { 0xC3, "Data buffer pool 1 depletion, no OF allocated"}, { 0xC4, "Data buffer pool 0 depletion, partial OF allocated"}, { 0xC5, "Data buffer pool 1 depletion, partial OF allocated"}, { 0xD0, "FLC read error"}, { 0xD1, "FL read error"}, { 0xD2, "FL write error"}, { 0xD3, "OF SGT write error"}, { 0xD4, "PTA read error"}, { 0xD5, "PTA write error"}, { 0xD6, "OF SGT F-bit write error"}, { 0xD7, "ASA write error"}, { 0xE1, "FLC[ICR]=0 ICID error"}, { 0xE2, "FLC[ICR]=1 ICID error"}, { 0xE4, "source of ICID flush not trusted (BDI = 0)"}, }; static const char * const cha_id_list[] = { "", "AES", "DES", "ARC4", "MDHA", "RNG", "SNOW f8", "Kasumi f8/9", "PKHA", "CRCA", "SNOW f9", "ZUCE", "ZUCA", }; static const char * const err_id_list[] = { "No error.", "Mode error.", "Data size error.", "Key size error.", "PKHA A memory size error.", "PKHA B memory size error.", "Data arrived out of sequence error.", "PKHA divide-by-zero error.", "PKHA modulus even error.", "DES key parity error.", "ICV check failed.", "Hardware error.", "Unsupported CCM AAD size.", "Class 1 CHA is not reset", "Invalid CHA combination was selected", "Invalid CHA selected.", }; static const char * const rng_err_id_list[] = { "", "", "", "Instantiate", "Not instantiated", "Test instantiate", "Prediction resistance", "Prediction resistance and test request", "Uninstantiate", "Secure key generation", "", "Hardware error", "Continuous check" }; static int report_ccb_status(struct device *jrdev, const u32 status, const char *error) { u8 cha_id = (status & JRSTA_CCBERR_CHAID_MASK) >> JRSTA_CCBERR_CHAID_SHIFT; u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> JRSTA_DECOERR_INDEX_SHIFT; char *idx_str; const char *cha_str = "unidentified cha_id value 0x"; char cha_err_code[3] = { 0 }; const char *err_str = "unidentified err_id value 0x"; char err_err_code[3] = { 0 }; if (status & JRSTA_DECOERR_JUMP) idx_str = "jump tgt desc idx"; else idx_str = "desc idx"; if (cha_id < ARRAY_SIZE(cha_id_list)) cha_str = cha_id_list[cha_id]; else snprintf(cha_err_code, sizeof(cha_err_code), "%02x", cha_id); if ((cha_id << JRSTA_CCBERR_CHAID_SHIFT) == JRSTA_CCBERR_CHAID_RNG && err_id < ARRAY_SIZE(rng_err_id_list) && strlen(rng_err_id_list[err_id])) { /* RNG-only error */ err_str = rng_err_id_list[err_id]; } else { err_str = err_id_list[err_id]; } /* * CCB ICV check failures are part of normal operation life; * we leave the upper layers to do what they want with them. */ if (err_id == JRSTA_CCBERR_ERRID_ICVCHK) return -EBADMSG; dev_err_ratelimited(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n", status, error, idx_str, idx, cha_str, cha_err_code, err_str, err_err_code); return -EINVAL; } static int report_jump_status(struct device *jrdev, const u32 status, const char *error) { dev_err(jrdev, "%08x: %s: %s() not implemented\n", status, error, __func__); return -EINVAL; } static int report_deco_status(struct device *jrdev, const u32 status, const char *error) { u8 err_id = status & JRSTA_DECOERR_ERROR_MASK; u8 idx = (status & JRSTA_DECOERR_INDEX_MASK) >> JRSTA_DECOERR_INDEX_SHIFT; char *idx_str; const char *err_str = "unidentified error value 0x"; char err_err_code[3] = { 0 }; int i; if (status & JRSTA_DECOERR_JUMP) idx_str = "jump tgt desc idx"; else idx_str = "desc idx"; for (i = 0; i < ARRAY_SIZE(desc_error_list); i++) if (desc_error_list[i].value == err_id) break; if (i != ARRAY_SIZE(desc_error_list) && desc_error_list[i].error_text) err_str = desc_error_list[i].error_text; else snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); dev_err(jrdev, "%08x: %s: %s %d: %s%s\n", status, error, idx_str, idx, err_str, err_err_code); return -EINVAL; } static int report_qi_status(struct device *qidev, const u32 status, const char *error) { u8 err_id = status & JRSTA_QIERR_ERROR_MASK; const char *err_str = "unidentified error value 0x"; char err_err_code[3] = { 0 }; int i; for (i = 0; i < ARRAY_SIZE(qi_error_list); i++) if (qi_error_list[i].value == err_id) break; if (i != ARRAY_SIZE(qi_error_list) && qi_error_list[i].error_text) err_str = qi_error_list[i].error_text; else snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id); dev_err(qidev, "%08x: %s: %s%s\n", status, error, err_str, err_err_code); return -EINVAL; } static int report_jr_status(struct device *jrdev, const u32 status, const char *error) { dev_err(jrdev, "%08x: %s: %s() not implemented\n", status, error, __func__); return -EINVAL; } static int report_cond_code_status(struct device *jrdev, const u32 status, const char *error) { dev_err(jrdev, "%08x: %s: %s() not implemented\n", status, error, __func__); return -EINVAL; } int caam_strstatus(struct device *jrdev, u32 status, bool qi_v2) { static const struct stat_src { int (*report_ssed)(struct device *jrdev, const u32 status, const char *error); const char *error; } status_src[16] = { { NULL, "No error" }, { NULL, NULL }, { report_ccb_status, "CCB" }, { report_jump_status, "Jump" }, { report_deco_status, "DECO" }, { report_qi_status, "Queue Manager Interface" }, { report_jr_status, "Job Ring" }, { report_cond_code_status, "Condition Code" }, { NULL, NULL }, { NULL, NULL }, { NULL, NULL }, { NULL, NULL }, { NULL, NULL }, { NULL, NULL }, { NULL, NULL }, { NULL, NULL }, }; u32 ssrc = status >> JRSTA_SSRC_SHIFT; const char *error = status_src[ssrc].error; /* * If there is an error handling function, call it to report the error. * Otherwise print the error source name. */ if (status_src[ssrc].report_ssed) return status_src[ssrc].report_ssed(jrdev, status, error); if (error) dev_err(jrdev, "%d: %s\n", ssrc, error); else dev_err(jrdev, "%d: unknown error source\n", ssrc); return -EINVAL; } EXPORT_SYMBOL(caam_strstatus); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("FSL CAAM error reporting"); MODULE_AUTHOR("Freescale Semiconductor");
linux-master
drivers/crypto/caam/error.c
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* Copyright 2019 NXP */ #include <linux/module.h> #include <linux/device.h> #include <linux/debugfs.h> #include "dpseci-debugfs.h" static int dpseci_dbg_fqs_show(struct seq_file *file, void *offset) { struct dpaa2_caam_priv *priv = file->private; u32 fqid, fcnt, bcnt; int i, err; seq_printf(file, "FQ stats for %s:\n", dev_name(priv->dev)); seq_printf(file, "%s%16s%16s\n", "Rx-VFQID", "Pending frames", "Pending bytes"); for (i = 0; i < priv->num_pairs; i++) { fqid = priv->rx_queue_attr[i].fqid; err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt); if (err) continue; seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt); } seq_printf(file, "%s%16s%16s\n", "Tx-VFQID", "Pending frames", "Pending bytes"); for (i = 0; i < priv->num_pairs; i++) { fqid = priv->tx_queue_attr[i].fqid; err = dpaa2_io_query_fq_count(NULL, fqid, &fcnt, &bcnt); if (err) continue; seq_printf(file, "%5d%16u%16u\n", fqid, fcnt, bcnt); } return 0; } DEFINE_SHOW_ATTRIBUTE(dpseci_dbg_fqs); void dpaa2_dpseci_debugfs_init(struct dpaa2_caam_priv *priv) { priv->dfs_root = debugfs_create_dir(dev_name(priv->dev), NULL); debugfs_create_file("fq_stats", 0444, priv->dfs_root, priv, &dpseci_dbg_fqs_fops); } void dpaa2_dpseci_debugfs_exit(struct dpaa2_caam_priv *priv) { debugfs_remove_recursive(priv->dfs_root); }
linux-master
drivers/crypto/caam/dpseci-debugfs.c
// SPDX-License-Identifier: GPL-2.0 /* * CAAM/SEC 4.x QI transport/backend driver * Queue Interface backend functionality * * Copyright 2013-2016 Freescale Semiconductor, Inc. * Copyright 2016-2017, 2019-2020 NXP */ #include <linux/cpumask.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/string.h> #include <soc/fsl/qman.h> #include "debugfs.h" #include "regs.h" #include "qi.h" #include "desc.h" #include "intern.h" #include "desc_constr.h" #define PREHDR_RSLS_SHIFT 31 #define PREHDR_ABS BIT(25) /* * Use a reasonable backlog of frames (per CPU) as congestion threshold, * so that resources used by the in-flight buffers do not become a memory hog. */ #define MAX_RSP_FQ_BACKLOG_PER_CPU 256 #define CAAM_QI_ENQUEUE_RETRIES 10000 #define CAAM_NAPI_WEIGHT 63 /* * caam_napi - struct holding CAAM NAPI-related params * @irqtask: IRQ task for QI backend * @p: QMan portal */ struct caam_napi { struct napi_struct irqtask; struct qman_portal *p; }; /* * caam_qi_pcpu_priv - percpu private data structure to main list of pending * responses expected on each cpu. * @caam_napi: CAAM NAPI params * @net_dev: netdev used by NAPI * @rsp_fq: response FQ from CAAM */ struct caam_qi_pcpu_priv { struct caam_napi caam_napi; struct net_device net_dev; struct qman_fq *rsp_fq; } ____cacheline_aligned; static DEFINE_PER_CPU(struct caam_qi_pcpu_priv, pcpu_qipriv); static DEFINE_PER_CPU(int, last_cpu); /* * caam_qi_priv - CAAM QI backend private params * @cgr: QMan congestion group */ struct caam_qi_priv { struct qman_cgr cgr; }; static struct caam_qi_priv qipriv ____cacheline_aligned; /* * This is written by only one core - the one that initialized the CGR - and * read by multiple cores (all the others). */ bool caam_congested __read_mostly; EXPORT_SYMBOL(caam_congested); /* * This is a cache of buffers, from which the users of CAAM QI driver * can allocate short (CAAM_QI_MEMCACHE_SIZE) buffers. It's faster than * doing malloc on the hotpath. * NOTE: A more elegant solution would be to have some headroom in the frames * being processed. This could be added by the dpaa-ethernet driver. * This would pose a problem for userspace application processing which * cannot know of this limitation. So for now, this will work. * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here */ static struct kmem_cache *qi_cache; static void *caam_iova_to_virt(struct iommu_domain *domain, dma_addr_t iova_addr) { phys_addr_t phys_addr; phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; return phys_to_virt(phys_addr); } int caam_qi_enqueue(struct device *qidev, struct caam_drv_req *req) { struct qm_fd fd; dma_addr_t addr; int ret; int num_retries = 0; qm_fd_clear_fd(&fd); qm_fd_set_compound(&fd, qm_sg_entry_get_len(&req->fd_sgt[1])); addr = dma_map_single(qidev, req->fd_sgt, sizeof(req->fd_sgt), DMA_BIDIRECTIONAL); if (dma_mapping_error(qidev, addr)) { dev_err(qidev, "DMA mapping error for QI enqueue request\n"); return -EIO; } qm_fd_addr_set64(&fd, addr); do { ret = qman_enqueue(req->drv_ctx->req_fq, &fd); if (likely(!ret)) { refcount_inc(&req->drv_ctx->refcnt); return 0; } if (ret != -EBUSY) break; num_retries++; } while (num_retries < CAAM_QI_ENQUEUE_RETRIES); dev_err(qidev, "qman_enqueue failed: %d\n", ret); return ret; } EXPORT_SYMBOL(caam_qi_enqueue); static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, const union qm_mr_entry *msg) { const struct qm_fd *fd; struct caam_drv_req *drv_req; struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); struct caam_drv_private *priv = dev_get_drvdata(qidev); fd = &msg->ern.fd; drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); if (!drv_req) { dev_err(qidev, "Can't find original request for CAAM response\n"); return; } refcount_dec(&drv_req->drv_ctx->refcnt); if (qm_fd_get_format(fd) != qm_fd_compound) { dev_err(qidev, "Non-compound FD from CAAM\n"); return; } dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); if (fd->status) drv_req->cbk(drv_req, be32_to_cpu(fd->status)); else drv_req->cbk(drv_req, JRSTA_SSRC_QI); } static struct qman_fq *create_caam_req_fq(struct device *qidev, struct qman_fq *rsp_fq, dma_addr_t hwdesc, int fq_sched_flag) { int ret; struct qman_fq *req_fq; struct qm_mcc_initfq opts; req_fq = kzalloc(sizeof(*req_fq), GFP_ATOMIC); if (!req_fq) return ERR_PTR(-ENOMEM); req_fq->cb.ern = caam_fq_ern_cb; req_fq->cb.fqs = NULL; ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | QMAN_FQ_FLAG_TO_DCPORTAL, req_fq); if (ret) { dev_err(qidev, "Failed to create session req FQ\n"); goto create_req_fq_fail; } memset(&opts, 0, sizeof(opts)); opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID); opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE); qm_fqd_set_destwq(&opts.fqd, qm_channel_caam, 2); opts.fqd.context_b = cpu_to_be32(qman_fq_fqid(rsp_fq)); qm_fqd_context_a_set64(&opts.fqd, hwdesc); opts.fqd.cgid = qipriv.cgr.cgrid; ret = qman_init_fq(req_fq, fq_sched_flag, &opts); if (ret) { dev_err(qidev, "Failed to init session req FQ\n"); goto init_req_fq_fail; } dev_dbg(qidev, "Allocated request FQ %u for CPU %u\n", req_fq->fqid, smp_processor_id()); return req_fq; init_req_fq_fail: qman_destroy_fq(req_fq); create_req_fq_fail: kfree(req_fq); return ERR_PTR(ret); } static int empty_retired_fq(struct device *qidev, struct qman_fq *fq) { int ret; ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT | QMAN_VOLATILE_FLAG_FINISH, QM_VDQCR_PRECEDENCE_VDQCR | QM_VDQCR_NUMFRAMES_TILLEMPTY); if (ret) { dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid); return ret; } do { struct qman_portal *p; p = qman_get_affine_portal(smp_processor_id()); qman_p_poll_dqrr(p, 16); } while (fq->flags & QMAN_FQ_STATE_NE); return 0; } static int kill_fq(struct device *qidev, struct qman_fq *fq) { u32 flags; int ret; ret = qman_retire_fq(fq, &flags); if (ret < 0) { dev_err(qidev, "qman_retire_fq failed: %d\n", ret); return ret; } if (!ret) goto empty_fq; /* Async FQ retirement condition */ if (ret == 1) { /* Retry till FQ gets in retired state */ do { msleep(20); } while (fq->state != qman_fq_state_retired); WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS); WARN_ON(fq->flags & QMAN_FQ_STATE_ORL); } empty_fq: if (fq->flags & QMAN_FQ_STATE_NE) { ret = empty_retired_fq(qidev, fq); if (ret) { dev_err(qidev, "empty_retired_fq fail for FQ: %u\n", fq->fqid); return ret; } } ret = qman_oos_fq(fq); if (ret) dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid); qman_destroy_fq(fq); kfree(fq); return ret; } static int empty_caam_fq(struct qman_fq *fq, struct caam_drv_ctx *drv_ctx) { int ret; int retries = 10; struct qm_mcr_queryfq_np np; /* Wait till the older CAAM FQ get empty */ do { ret = qman_query_fq_np(fq, &np); if (ret) return ret; if (!qm_mcr_np_get(&np, frm_cnt)) break; msleep(20); } while (1); /* Wait until pending jobs from this FQ are processed by CAAM */ do { if (refcount_read(&drv_ctx->refcnt) == 1) break; msleep(20); } while (--retries); if (!retries) dev_warn_once(drv_ctx->qidev, "%d frames from FQID %u still pending in CAAM\n", refcount_read(&drv_ctx->refcnt), fq->fqid); return 0; } int caam_drv_ctx_update(struct caam_drv_ctx *drv_ctx, u32 *sh_desc) { int ret; u32 num_words; struct qman_fq *new_fq, *old_fq; struct device *qidev = drv_ctx->qidev; num_words = desc_len(sh_desc); if (num_words > MAX_SDLEN) { dev_err(qidev, "Invalid descriptor len: %d words\n", num_words); return -EINVAL; } /* Note down older req FQ */ old_fq = drv_ctx->req_fq; /* Create a new req FQ in parked state */ new_fq = create_caam_req_fq(drv_ctx->qidev, drv_ctx->rsp_fq, drv_ctx->context_a, 0); if (IS_ERR(new_fq)) { dev_err(qidev, "FQ allocation for shdesc update failed\n"); return PTR_ERR(new_fq); } /* Hook up new FQ to context so that new requests keep queuing */ drv_ctx->req_fq = new_fq; /* Empty and remove the older FQ */ ret = empty_caam_fq(old_fq, drv_ctx); if (ret) { dev_err(qidev, "Old CAAM FQ empty failed: %d\n", ret); /* We can revert to older FQ */ drv_ctx->req_fq = old_fq; if (kill_fq(qidev, new_fq)) dev_warn(qidev, "New CAAM FQ kill failed\n"); return ret; } /* * Re-initialise pre-header. Set RSLS and SDLEN. * Update the shared descriptor for driver context. */ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | num_words); drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); dma_sync_single_for_device(qidev, drv_ctx->context_a, sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr), DMA_BIDIRECTIONAL); /* Put the new FQ in scheduled state */ ret = qman_schedule_fq(new_fq); if (ret) { dev_err(qidev, "Fail to sched new CAAM FQ, ecode = %d\n", ret); /* * We can kill new FQ and revert to old FQ. * Since the desc is already modified, it is success case */ drv_ctx->req_fq = old_fq; if (kill_fq(qidev, new_fq)) dev_warn(qidev, "New CAAM FQ kill failed\n"); } else if (kill_fq(qidev, old_fq)) { dev_warn(qidev, "Old CAAM FQ kill failed\n"); } return 0; } EXPORT_SYMBOL(caam_drv_ctx_update); struct caam_drv_ctx *caam_drv_ctx_init(struct device *qidev, int *cpu, u32 *sh_desc) { size_t size; u32 num_words; dma_addr_t hwdesc; struct caam_drv_ctx *drv_ctx; const cpumask_t *cpus = qman_affine_cpus(); num_words = desc_len(sh_desc); if (num_words > MAX_SDLEN) { dev_err(qidev, "Invalid descriptor len: %d words\n", num_words); return ERR_PTR(-EINVAL); } drv_ctx = kzalloc(sizeof(*drv_ctx), GFP_ATOMIC); if (!drv_ctx) return ERR_PTR(-ENOMEM); /* * Initialise pre-header - set RSLS and SDLEN - and shared descriptor * and dma-map them. */ drv_ctx->prehdr[0] = cpu_to_caam32((1 << PREHDR_RSLS_SHIFT) | num_words); drv_ctx->prehdr[1] = cpu_to_caam32(PREHDR_ABS); memcpy(drv_ctx->sh_desc, sh_desc, desc_bytes(sh_desc)); size = sizeof(drv_ctx->prehdr) + sizeof(drv_ctx->sh_desc); hwdesc = dma_map_single(qidev, drv_ctx->prehdr, size, DMA_BIDIRECTIONAL); if (dma_mapping_error(qidev, hwdesc)) { dev_err(qidev, "DMA map error for preheader + shdesc\n"); kfree(drv_ctx); return ERR_PTR(-ENOMEM); } drv_ctx->context_a = hwdesc; /* If given CPU does not own the portal, choose another one that does */ if (!cpumask_test_cpu(*cpu, cpus)) { int *pcpu = &get_cpu_var(last_cpu); *pcpu = cpumask_next(*pcpu, cpus); if (*pcpu >= nr_cpu_ids) *pcpu = cpumask_first(cpus); *cpu = *pcpu; put_cpu_var(last_cpu); } drv_ctx->cpu = *cpu; /* Find response FQ hooked with this CPU */ drv_ctx->rsp_fq = per_cpu(pcpu_qipriv.rsp_fq, drv_ctx->cpu); /* Attach request FQ */ drv_ctx->req_fq = create_caam_req_fq(qidev, drv_ctx->rsp_fq, hwdesc, QMAN_INITFQ_FLAG_SCHED); if (IS_ERR(drv_ctx->req_fq)) { dev_err(qidev, "create_caam_req_fq failed\n"); dma_unmap_single(qidev, hwdesc, size, DMA_BIDIRECTIONAL); kfree(drv_ctx); return ERR_PTR(-ENOMEM); } /* init reference counter used to track references to request FQ */ refcount_set(&drv_ctx->refcnt, 1); drv_ctx->qidev = qidev; return drv_ctx; } EXPORT_SYMBOL(caam_drv_ctx_init); void *qi_cache_alloc(gfp_t flags) { return kmem_cache_alloc(qi_cache, flags); } EXPORT_SYMBOL(qi_cache_alloc); void qi_cache_free(void *obj) { kmem_cache_free(qi_cache, obj); } EXPORT_SYMBOL(qi_cache_free); static int caam_qi_poll(struct napi_struct *napi, int budget) { struct caam_napi *np = container_of(napi, struct caam_napi, irqtask); int cleaned = qman_p_poll_dqrr(np->p, budget); if (cleaned < budget) { napi_complete(napi); qman_p_irqsource_add(np->p, QM_PIRQ_DQRI); } return cleaned; } void caam_drv_ctx_rel(struct caam_drv_ctx *drv_ctx) { if (IS_ERR_OR_NULL(drv_ctx)) return; /* Remove request FQ */ if (kill_fq(drv_ctx->qidev, drv_ctx->req_fq)) dev_err(drv_ctx->qidev, "Crypto session req FQ kill failed\n"); dma_unmap_single(drv_ctx->qidev, drv_ctx->context_a, sizeof(drv_ctx->sh_desc) + sizeof(drv_ctx->prehdr), DMA_BIDIRECTIONAL); kfree(drv_ctx); } EXPORT_SYMBOL(caam_drv_ctx_rel); static void caam_qi_shutdown(void *data) { int i; struct device *qidev = data; struct caam_qi_priv *priv = &qipriv; const cpumask_t *cpus = qman_affine_cpus(); for_each_cpu(i, cpus) { struct napi_struct *irqtask; irqtask = &per_cpu_ptr(&pcpu_qipriv.caam_napi, i)->irqtask; napi_disable(irqtask); netif_napi_del(irqtask); if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i))) dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); } qman_delete_cgr_safe(&priv->cgr); qman_release_cgrid(priv->cgr.cgrid); kmem_cache_destroy(qi_cache); } static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested) { caam_congested = congested; if (congested) { caam_debugfs_qi_congested(); pr_debug_ratelimited("CAAM entered congestion\n"); } else { pr_debug_ratelimited("CAAM exited congestion\n"); } } static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np, bool sched_napi) { if (sched_napi) { /* Disable QMan IRQ source and invoke NAPI */ qman_p_irqsource_remove(p, QM_PIRQ_DQRI); np->p = p; napi_schedule(&np->irqtask); return 1; } return 0; } static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, struct qman_fq *rsp_fq, const struct qm_dqrr_entry *dqrr, bool sched_napi) { struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi); struct caam_drv_req *drv_req; const struct qm_fd *fd; struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); struct caam_drv_private *priv = dev_get_drvdata(qidev); u32 status; if (caam_qi_napi_schedule(p, caam_napi, sched_napi)) return qman_cb_dqrr_stop; fd = &dqrr->fd; drv_req = caam_iova_to_virt(priv->domain, qm_fd_addr_get64(fd)); if (unlikely(!drv_req)) { dev_err(qidev, "Can't find original request for caam response\n"); return qman_cb_dqrr_consume; } refcount_dec(&drv_req->drv_ctx->refcnt); status = be32_to_cpu(fd->status); if (unlikely(status)) { u32 ssrc = status & JRSTA_SSRC_MASK; u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; if (ssrc != JRSTA_SSRC_CCB_ERROR || err_id != JRSTA_CCBERR_ERRID_ICVCHK) dev_err_ratelimited(qidev, "Error: %#x in CAAM response FD\n", status); } if (unlikely(qm_fd_get_format(fd) != qm_fd_compound)) { dev_err(qidev, "Non-compound FD from CAAM\n"); return qman_cb_dqrr_consume; } dma_unmap_single(drv_req->drv_ctx->qidev, qm_fd_addr(fd), sizeof(drv_req->fd_sgt), DMA_BIDIRECTIONAL); drv_req->cbk(drv_req, status); return qman_cb_dqrr_consume; } static int alloc_rsp_fq_cpu(struct device *qidev, unsigned int cpu) { struct qm_mcc_initfq opts; struct qman_fq *fq; int ret; fq = kzalloc(sizeof(*fq), GFP_KERNEL); if (!fq) return -ENOMEM; fq->cb.dqrr = caam_rsp_fq_dqrr_cb; ret = qman_create_fq(0, QMAN_FQ_FLAG_NO_ENQUEUE | QMAN_FQ_FLAG_DYNAMIC_FQID, fq); if (ret) { dev_err(qidev, "Rsp FQ create failed\n"); kfree(fq); return -ENODEV; } memset(&opts, 0, sizeof(opts)); opts.we_mask = cpu_to_be16(QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_CONTEXTB | QM_INITFQ_WE_CONTEXTA | QM_INITFQ_WE_CGID); opts.fqd.fq_ctrl = cpu_to_be16(QM_FQCTRL_CTXASTASHING | QM_FQCTRL_CPCSTASH | QM_FQCTRL_CGE); qm_fqd_set_destwq(&opts.fqd, qman_affine_channel(cpu), 3); opts.fqd.cgid = qipriv.cgr.cgrid; opts.fqd.context_a.stashing.exclusive = QM_STASHING_EXCL_CTX | QM_STASHING_EXCL_DATA; qm_fqd_set_stashing(&opts.fqd, 0, 1, 1); ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); if (ret) { dev_err(qidev, "Rsp FQ init failed\n"); kfree(fq); return -ENODEV; } per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq; dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu); return 0; } static int init_cgr(struct device *qidev) { int ret; struct qm_mcc_initcgr opts; const u64 val = (u64)cpumask_weight(qman_affine_cpus()) * MAX_RSP_FQ_BACKLOG_PER_CPU; ret = qman_alloc_cgrid(&qipriv.cgr.cgrid); if (ret) { dev_err(qidev, "CGR alloc failed for rsp FQs: %d\n", ret); return ret; } qipriv.cgr.cb = cgr_cb; memset(&opts, 0, sizeof(opts)); opts.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES | QM_CGR_WE_MODE); opts.cgr.cscn_en = QM_CGR_EN; opts.cgr.mode = QMAN_CGR_MODE_FRAME; qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, val, 1); ret = qman_create_cgr(&qipriv.cgr, QMAN_CGR_FLAG_USE_INIT, &opts); if (ret) { dev_err(qidev, "Error %d creating CAAM CGRID: %u\n", ret, qipriv.cgr.cgrid); return ret; } dev_dbg(qidev, "Congestion threshold set to %llu\n", val); return 0; } static int alloc_rsp_fqs(struct device *qidev) { int ret, i; const cpumask_t *cpus = qman_affine_cpus(); /*Now create response FQs*/ for_each_cpu(i, cpus) { ret = alloc_rsp_fq_cpu(qidev, i); if (ret) { dev_err(qidev, "CAAM rsp FQ alloc failed, cpu: %u", i); return ret; } } return 0; } static void free_rsp_fqs(void) { int i; const cpumask_t *cpus = qman_affine_cpus(); for_each_cpu(i, cpus) kfree(per_cpu(pcpu_qipriv.rsp_fq, i)); } int caam_qi_init(struct platform_device *caam_pdev) { int err, i; struct device *ctrldev = &caam_pdev->dev, *qidev; struct caam_drv_private *ctrlpriv; const cpumask_t *cpus = qman_affine_cpus(); ctrlpriv = dev_get_drvdata(ctrldev); qidev = ctrldev; /* Initialize the congestion detection */ err = init_cgr(qidev); if (err) { dev_err(qidev, "CGR initialization failed: %d\n", err); return err; } /* Initialise response FQs */ err = alloc_rsp_fqs(qidev); if (err) { dev_err(qidev, "Can't allocate CAAM response FQs: %d\n", err); free_rsp_fqs(); return err; } /* * Enable the NAPI contexts on each of the core which has an affine * portal. */ for_each_cpu(i, cpus) { struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i); struct caam_napi *caam_napi = &priv->caam_napi; struct napi_struct *irqtask = &caam_napi->irqtask; struct net_device *net_dev = &priv->net_dev; net_dev->dev = *qidev; INIT_LIST_HEAD(&net_dev->napi_list); netif_napi_add_tx_weight(net_dev, irqtask, caam_qi_poll, CAAM_NAPI_WEIGHT); napi_enable(irqtask); } qi_cache = kmem_cache_create("caamqicache", CAAM_QI_MEMCACHE_SIZE, dma_get_cache_alignment(), 0, NULL); if (!qi_cache) { dev_err(qidev, "Can't allocate CAAM cache\n"); free_rsp_fqs(); return -ENOMEM; } caam_debugfs_qi_init(ctrlpriv); err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv); if (err) return err; dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); return 0; }
linux-master
drivers/crypto/caam/qi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Pengutronix, Steffen Trumtrar <[email protected]> * Copyright (C) 2021 Pengutronix, Ahmad Fatoum <[email protected]> */ #define pr_fmt(fmt) "caam blob_gen: " fmt #include <linux/bitfield.h> #include <linux/device.h> #include <soc/fsl/caam-blob.h> #include "compat.h" #include "desc_constr.h" #include "desc.h" #include "error.h" #include "intern.h" #include "jr.h" #include "regs.h" #define CAAM_BLOB_DESC_BYTES_MAX \ /* Command to initialize & stating length of descriptor */ \ (CAAM_CMD_SZ + \ /* Command to append the key-modifier + key-modifier data */ \ CAAM_CMD_SZ + CAAM_BLOB_KEYMOD_LENGTH + \ /* Command to include input key + pointer to the input key */ \ CAAM_CMD_SZ + CAAM_PTR_SZ_MAX + \ /* Command to include output key + pointer to the output key */ \ CAAM_CMD_SZ + CAAM_PTR_SZ_MAX + \ /* Command describing the operation to perform */ \ CAAM_CMD_SZ) struct caam_blob_priv { struct device jrdev; }; struct caam_blob_job_result { int err; struct completion completion; }; static void caam_blob_job_done(struct device *dev, u32 *desc, u32 err, void *context) { struct caam_blob_job_result *res = context; int ecode = 0; dev_dbg(dev, "%s %d: err 0x%x\n", __func__, __LINE__, err); if (err) ecode = caam_jr_strstatus(dev, err); res->err = ecode; /* * Upon completion, desc points to a buffer containing a CAAM job * descriptor which encapsulates data into an externally-storable * blob. */ complete(&res->completion); } int caam_process_blob(struct caam_blob_priv *priv, struct caam_blob_info *info, bool encap) { const struct caam_drv_private *ctrlpriv; struct caam_blob_job_result testres; struct device *jrdev = &priv->jrdev; dma_addr_t dma_in, dma_out; int op = OP_PCLID_BLOB; size_t output_len; u32 *desc; u32 moo; int ret; if (info->key_mod_len > CAAM_BLOB_KEYMOD_LENGTH) return -EINVAL; if (encap) { op |= OP_TYPE_ENCAP_PROTOCOL; output_len = info->input_len + CAAM_BLOB_OVERHEAD; } else { op |= OP_TYPE_DECAP_PROTOCOL; output_len = info->input_len - CAAM_BLOB_OVERHEAD; } desc = kzalloc(CAAM_BLOB_DESC_BYTES_MAX, GFP_KERNEL); if (!desc) return -ENOMEM; dma_in = dma_map_single(jrdev, info->input, info->input_len, DMA_TO_DEVICE); if (dma_mapping_error(jrdev, dma_in)) { dev_err(jrdev, "unable to map input DMA buffer\n"); ret = -ENOMEM; goto out_free; } dma_out = dma_map_single(jrdev, info->output, output_len, DMA_FROM_DEVICE); if (dma_mapping_error(jrdev, dma_out)) { dev_err(jrdev, "unable to map output DMA buffer\n"); ret = -ENOMEM; goto out_unmap_in; } ctrlpriv = dev_get_drvdata(jrdev->parent); moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status)); if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED) dev_warn(jrdev, "using insecure test key, enable HAB to use unique device key!\n"); /* * A data blob is encrypted using a blob key (BK); a random number. * The BK is used as an AES-CCM key. The initial block (B0) and the * initial counter (Ctr0) are generated automatically and stored in * Class 1 Context DWords 0+1+2+3. The random BK is stored in the * Class 1 Key Register. Operation Mode is set to AES-CCM. */ init_job_desc(desc, 0); append_key_as_imm(desc, info->key_mod, info->key_mod_len, info->key_mod_len, CLASS_2 | KEY_DEST_CLASS_REG); append_seq_in_ptr_intlen(desc, dma_in, info->input_len, 0); append_seq_out_ptr_intlen(desc, dma_out, output_len, 0); append_operation(desc, op); print_hex_dump_debug("data@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 1, info->input, info->input_len, false); print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 1, desc, desc_bytes(desc), false); testres.err = 0; init_completion(&testres.completion); ret = caam_jr_enqueue(jrdev, desc, caam_blob_job_done, &testres); if (ret == -EINPROGRESS) { wait_for_completion(&testres.completion); ret = testres.err; print_hex_dump_debug("output@"__stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 1, info->output, output_len, false); } if (ret == 0) info->output_len = output_len; dma_unmap_single(jrdev, dma_out, output_len, DMA_FROM_DEVICE); out_unmap_in: dma_unmap_single(jrdev, dma_in, info->input_len, DMA_TO_DEVICE); out_free: kfree(desc); return ret; } EXPORT_SYMBOL(caam_process_blob); struct caam_blob_priv *caam_blob_gen_init(void) { struct caam_drv_private *ctrlpriv; struct device *jrdev; /* * caam_blob_gen_init() may expectedly fail with -ENODEV, e.g. when * CAAM driver didn't probe or when SoC lacks BLOB support. An * error would be harsh in this case, so we stick to info level. */ jrdev = caam_jr_alloc(); if (IS_ERR(jrdev)) { pr_info("job ring requested, but none currently available\n"); return ERR_PTR(-ENODEV); } ctrlpriv = dev_get_drvdata(jrdev->parent); if (!ctrlpriv->blob_present) { dev_info(jrdev, "no hardware blob generation support\n"); caam_jr_free(jrdev); return ERR_PTR(-ENODEV); } return container_of(jrdev, struct caam_blob_priv, jrdev); } EXPORT_SYMBOL(caam_blob_gen_init); void caam_blob_gen_exit(struct caam_blob_priv *priv) { caam_jr_free(&priv->jrdev); } EXPORT_SYMBOL(caam_blob_gen_exit);
linux-master
drivers/crypto/caam/blob_gen.c
// SPDX-License-Identifier: GPL-2.0+ /* * Driver to expose SEC4 PRNG via crypto RNG API * * Copyright 2022 NXP * */ #include <linux/completion.h> #include <crypto/internal/rng.h> #include <linux/dma-mapping.h> #include <linux/kernel.h> #include "compat.h" #include "regs.h" #include "intern.h" #include "desc_constr.h" #include "jr.h" #include "error.h" /* * Length of used descriptors, see caam_init_desc() */ #define CAAM_PRNG_MAX_DESC_LEN (CAAM_CMD_SZ + \ CAAM_CMD_SZ + \ CAAM_CMD_SZ + CAAM_PTR_SZ_MAX) /* prng per-device context */ struct caam_prng_ctx { int err; struct completion done; }; struct caam_prng_alg { struct rng_alg rng; bool registered; }; static void caam_prng_done(struct device *jrdev, u32 *desc, u32 err, void *context) { struct caam_prng_ctx *jctx = context; jctx->err = err ? caam_jr_strstatus(jrdev, err) : 0; complete(&jctx->done); } static u32 *caam_init_reseed_desc(u32 *desc) { init_job_desc(desc, 0); /* + 1 cmd_sz */ /* Generate random bytes: + 1 cmd_sz */ append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG | OP_ALG_AS_FINALIZE); print_hex_dump_debug("prng reseed desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return desc; } static u32 *caam_init_prng_desc(u32 *desc, dma_addr_t dst_dma, u32 len) { init_job_desc(desc, 0); /* + 1 cmd_sz */ /* Generate random bytes: + 1 cmd_sz */ append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG); /* Store bytes: + 1 cmd_sz + caam_ptr_sz */ append_fifo_store(desc, dst_dma, len, FIFOST_TYPE_RNGSTORE); print_hex_dump_debug("prng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return desc; } static int caam_prng_generate(struct crypto_rng *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int dlen) { unsigned int aligned_dlen = ALIGN(dlen, dma_get_cache_alignment()); struct caam_prng_ctx ctx; struct device *jrdev; dma_addr_t dst_dma; u32 *desc; u8 *buf; int ret; if (aligned_dlen < dlen) return -EOVERFLOW; buf = kzalloc(aligned_dlen, GFP_KERNEL); if (!buf) return -ENOMEM; jrdev = caam_jr_alloc(); ret = PTR_ERR_OR_ZERO(jrdev); if (ret) { pr_err("Job Ring Device allocation failed\n"); kfree(buf); return ret; } desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL); if (!desc) { ret = -ENOMEM; goto out1; } dst_dma = dma_map_single(jrdev, buf, dlen, DMA_FROM_DEVICE); if (dma_mapping_error(jrdev, dst_dma)) { dev_err(jrdev, "Failed to map destination buffer memory\n"); ret = -ENOMEM; goto out; } init_completion(&ctx.done); ret = caam_jr_enqueue(jrdev, caam_init_prng_desc(desc, dst_dma, dlen), caam_prng_done, &ctx); if (ret == -EINPROGRESS) { wait_for_completion(&ctx.done); ret = ctx.err; } dma_unmap_single(jrdev, dst_dma, dlen, DMA_FROM_DEVICE); if (!ret) memcpy(dst, buf, dlen); out: kfree(desc); out1: caam_jr_free(jrdev); kfree(buf); return ret; } static void caam_prng_exit(struct crypto_tfm *tfm) {} static int caam_prng_init(struct crypto_tfm *tfm) { return 0; } static int caam_prng_seed(struct crypto_rng *tfm, const u8 *seed, unsigned int slen) { struct caam_prng_ctx ctx; struct device *jrdev; u32 *desc; int ret; if (slen) { pr_err("Seed length should be zero\n"); return -EINVAL; } jrdev = caam_jr_alloc(); ret = PTR_ERR_OR_ZERO(jrdev); if (ret) { pr_err("Job Ring Device allocation failed\n"); return ret; } desc = kzalloc(CAAM_PRNG_MAX_DESC_LEN, GFP_KERNEL); if (!desc) { caam_jr_free(jrdev); return -ENOMEM; } init_completion(&ctx.done); ret = caam_jr_enqueue(jrdev, caam_init_reseed_desc(desc), caam_prng_done, &ctx); if (ret == -EINPROGRESS) { wait_for_completion(&ctx.done); ret = ctx.err; } kfree(desc); caam_jr_free(jrdev); return ret; } static struct caam_prng_alg caam_prng_alg = { .rng = { .generate = caam_prng_generate, .seed = caam_prng_seed, .seedsize = 0, .base = { .cra_name = "stdrng", .cra_driver_name = "prng-caam", .cra_priority = 500, .cra_ctxsize = sizeof(struct caam_prng_ctx), .cra_module = THIS_MODULE, .cra_init = caam_prng_init, .cra_exit = caam_prng_exit, }, } }; void caam_prng_unregister(void *data) { if (caam_prng_alg.registered) crypto_unregister_rng(&caam_prng_alg.rng); } int caam_prng_register(struct device *ctrldev) { struct caam_drv_private *priv = dev_get_drvdata(ctrldev); u32 rng_inst; int ret = 0; /* Check for available RNG blocks before registration */ if (priv->era < 10) rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; else rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK; if (!rng_inst) { dev_dbg(ctrldev, "RNG block is not available... skipping registering algorithm\n"); return ret; } ret = crypto_register_rng(&caam_prng_alg.rng); if (ret) { dev_err(ctrldev, "couldn't register rng crypto alg: %d\n", ret); return ret; } caam_prng_alg.registered = true; dev_info(ctrldev, "rng crypto API alg registered %s\n", caam_prng_alg.rng.base.cra_driver_name); return 0; }
linux-master
drivers/crypto/caam/caamprng.c
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* * caam - Freescale FSL CAAM support for Public Key Cryptography * * Copyright 2016 Freescale Semiconductor, Inc. * Copyright 2018-2019, 2023 NXP * * There is no Shared Descriptor for PKC so that the Job Descriptor must carry * all the desired key parameters, input and output pointers. */ #include "compat.h" #include "regs.h" #include "intern.h" #include "jr.h" #include "error.h" #include "desc_constr.h" #include "sg_sw_sec4.h" #include "caampkc.h" #include <crypto/internal/engine.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #define DESC_RSA_PUB_LEN (2 * CAAM_CMD_SZ + SIZEOF_RSA_PUB_PDB) #define DESC_RSA_PRIV_F1_LEN (2 * CAAM_CMD_SZ + \ SIZEOF_RSA_PRIV_F1_PDB) #define DESC_RSA_PRIV_F2_LEN (2 * CAAM_CMD_SZ + \ SIZEOF_RSA_PRIV_F2_PDB) #define DESC_RSA_PRIV_F3_LEN (2 * CAAM_CMD_SZ + \ SIZEOF_RSA_PRIV_F3_PDB) #define CAAM_RSA_MAX_INPUT_SIZE 512 /* for a 4096-bit modulus */ /* buffer filled with zeros, used for padding */ static u8 *zero_buffer; /* * variable used to avoid double free of resources in case * algorithm registration was unsuccessful */ static bool init_done; struct caam_akcipher_alg { struct akcipher_engine_alg akcipher; bool registered; }; static void rsa_io_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) { struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); dma_unmap_sg(dev, req->dst, edesc->dst_nents, DMA_FROM_DEVICE); dma_unmap_sg(dev, req_ctx->fixup_src, edesc->src_nents, DMA_TO_DEVICE); if (edesc->sec4_sg_bytes) dma_unmap_single(dev, edesc->sec4_sg_dma, edesc->sec4_sg_bytes, DMA_TO_DEVICE); } static void rsa_pub_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct rsa_pub_pdb *pdb = &edesc->pdb.pub; dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->e_dma, key->e_sz, DMA_TO_DEVICE); } static void rsa_priv_f1_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); } static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; size_t p_sz = key->p_sz; size_t q_sz = key->q_sz; dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); } static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; size_t p_sz = key->p_sz; size_t q_sz = key->q_sz; dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); } /* RSA Job Completion handler */ static void rsa_pub_done(struct device *dev, u32 *desc, u32 err, void *context) { struct akcipher_request *req = context; struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); struct rsa_edesc *edesc; int ecode = 0; bool has_bklog; if (err) ecode = caam_jr_strstatus(dev, err); edesc = req_ctx->edesc; has_bklog = edesc->bklog; rsa_pub_unmap(dev, edesc, req); rsa_io_unmap(dev, edesc, req); kfree(edesc); /* * If no backlog flag, the completion of the request is done * by CAAM, not crypto engine. */ if (!has_bklog) akcipher_request_complete(req, ecode); else crypto_finalize_akcipher_request(jrp->engine, req, ecode); } static void rsa_priv_f_done(struct device *dev, u32 *desc, u32 err, void *context) { struct akcipher_request *req = context; struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct rsa_edesc *edesc; int ecode = 0; bool has_bklog; if (err) ecode = caam_jr_strstatus(dev, err); edesc = req_ctx->edesc; has_bklog = edesc->bklog; switch (key->priv_form) { case FORM1: rsa_priv_f1_unmap(dev, edesc, req); break; case FORM2: rsa_priv_f2_unmap(dev, edesc, req); break; case FORM3: rsa_priv_f3_unmap(dev, edesc, req); } rsa_io_unmap(dev, edesc, req); kfree(edesc); /* * If no backlog flag, the completion of the request is done * by CAAM, not crypto engine. */ if (!has_bklog) akcipher_request_complete(req, ecode); else crypto_finalize_akcipher_request(jrp->engine, req, ecode); } /** * caam_rsa_count_leading_zeros - Count leading zeros, need it to strip, * from a given scatterlist * * @sgl : scatterlist to count zeros from * @nbytes: number of zeros, in bytes, to strip * @flags : operation flags */ static int caam_rsa_count_leading_zeros(struct scatterlist *sgl, unsigned int nbytes, unsigned int flags) { struct sg_mapping_iter miter; int lzeros, ents; unsigned int len; unsigned int tbytes = nbytes; const u8 *buff; ents = sg_nents_for_len(sgl, nbytes); if (ents < 0) return ents; sg_miter_start(&miter, sgl, ents, SG_MITER_FROM_SG | flags); lzeros = 0; len = 0; while (nbytes > 0) { /* do not strip more than given bytes */ while (len && !*buff && lzeros < nbytes) { lzeros++; len--; buff++; } if (len && *buff) break; if (!sg_miter_next(&miter)) break; buff = miter.addr; len = miter.length; nbytes -= lzeros; lzeros = 0; } miter.consumed = lzeros; sg_miter_stop(&miter); nbytes -= lzeros; return tbytes - nbytes; } static struct rsa_edesc *rsa_edesc_alloc(struct akcipher_request *req, size_t desclen) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct device *dev = ctx->dev; struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct caam_rsa_key *key = &ctx->key; struct rsa_edesc *edesc; gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; int sg_flags = (flags == GFP_ATOMIC) ? SG_MITER_ATOMIC : 0; int sec4_sg_index, sec4_sg_len = 0, sec4_sg_bytes; int src_nents, dst_nents; int mapped_src_nents, mapped_dst_nents; unsigned int diff_size = 0; int lzeros; if (req->src_len > key->n_sz) { /* * strip leading zeros and * return the number of zeros to skip */ lzeros = caam_rsa_count_leading_zeros(req->src, req->src_len - key->n_sz, sg_flags); if (lzeros < 0) return ERR_PTR(lzeros); req_ctx->fixup_src = scatterwalk_ffwd(req_ctx->src, req->src, lzeros); req_ctx->fixup_src_len = req->src_len - lzeros; } else { /* * input src is less then n key modulus, * so there will be zero padding */ diff_size = key->n_sz - req->src_len; req_ctx->fixup_src = req->src; req_ctx->fixup_src_len = req->src_len; } src_nents = sg_nents_for_len(req_ctx->fixup_src, req_ctx->fixup_src_len); dst_nents = sg_nents_for_len(req->dst, req->dst_len); mapped_src_nents = dma_map_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); if (unlikely(!mapped_src_nents)) { dev_err(dev, "unable to map source\n"); return ERR_PTR(-ENOMEM); } mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); if (unlikely(!mapped_dst_nents)) { dev_err(dev, "unable to map destination\n"); goto src_fail; } if (!diff_size && mapped_src_nents == 1) sec4_sg_len = 0; /* no need for an input hw s/g table */ else sec4_sg_len = mapped_src_nents + !!diff_size; sec4_sg_index = sec4_sg_len; if (mapped_dst_nents > 1) sec4_sg_len += pad_sg_nents(mapped_dst_nents); else sec4_sg_len = pad_sg_nents(sec4_sg_len); sec4_sg_bytes = sec4_sg_len * sizeof(struct sec4_sg_entry); /* allocate space for base edesc, hw desc commands and link tables */ edesc = kzalloc(sizeof(*edesc) + desclen + sec4_sg_bytes, flags); if (!edesc) goto dst_fail; edesc->sec4_sg = (void *)edesc + sizeof(*edesc) + desclen; if (diff_size) dma_to_sec4_sg_one(edesc->sec4_sg, ctx->padding_dma, diff_size, 0); if (sec4_sg_index) sg_to_sec4_sg_last(req_ctx->fixup_src, req_ctx->fixup_src_len, edesc->sec4_sg + !!diff_size, 0); if (mapped_dst_nents > 1) sg_to_sec4_sg_last(req->dst, req->dst_len, edesc->sec4_sg + sec4_sg_index, 0); /* Save nents for later use in Job Descriptor */ edesc->src_nents = src_nents; edesc->dst_nents = dst_nents; req_ctx->edesc = edesc; if (!sec4_sg_bytes) return edesc; edesc->mapped_src_nents = mapped_src_nents; edesc->mapped_dst_nents = mapped_dst_nents; edesc->sec4_sg_dma = dma_map_single(dev, edesc->sec4_sg, sec4_sg_bytes, DMA_TO_DEVICE); if (dma_mapping_error(dev, edesc->sec4_sg_dma)) { dev_err(dev, "unable to map S/G table\n"); goto sec4_sg_fail; } edesc->sec4_sg_bytes = sec4_sg_bytes; print_hex_dump_debug("caampkc sec4_sg@" __stringify(__LINE__) ": ", DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg, edesc->sec4_sg_bytes, 1); return edesc; sec4_sg_fail: kfree(edesc); dst_fail: dma_unmap_sg(dev, req->dst, dst_nents, DMA_FROM_DEVICE); src_fail: dma_unmap_sg(dev, req_ctx->fixup_src, src_nents, DMA_TO_DEVICE); return ERR_PTR(-ENOMEM); } static int akcipher_do_one_req(struct crypto_engine *engine, void *areq) { struct akcipher_request *req = container_of(areq, struct akcipher_request, base); struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct device *jrdev = ctx->dev; u32 *desc = req_ctx->edesc->hw_desc; int ret; req_ctx->edesc->bklog = true; ret = caam_jr_enqueue(jrdev, desc, req_ctx->akcipher_op_done, req); if (ret == -ENOSPC && engine->retry_support) return ret; if (ret != -EINPROGRESS) { rsa_pub_unmap(jrdev, req_ctx->edesc, req); rsa_io_unmap(jrdev, req_ctx->edesc, req); kfree(req_ctx->edesc); } else { ret = 0; } return ret; } static int set_rsa_pub_pdb(struct akcipher_request *req, struct rsa_edesc *edesc) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct device *dev = ctx->dev; struct rsa_pub_pdb *pdb = &edesc->pdb.pub; int sec4_sg_index = 0; pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->n_dma)) { dev_err(dev, "Unable to map RSA modulus memory\n"); return -ENOMEM; } pdb->e_dma = dma_map_single(dev, key->e, key->e_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->e_dma)) { dev_err(dev, "Unable to map RSA public exponent memory\n"); dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); return -ENOMEM; } if (edesc->mapped_src_nents > 1) { pdb->sgf |= RSA_PDB_SGF_F; pdb->f_dma = edesc->sec4_sg_dma; sec4_sg_index += edesc->mapped_src_nents; } else { pdb->f_dma = sg_dma_address(req_ctx->fixup_src); } if (edesc->mapped_dst_nents > 1) { pdb->sgf |= RSA_PDB_SGF_G; pdb->g_dma = edesc->sec4_sg_dma + sec4_sg_index * sizeof(struct sec4_sg_entry); } else { pdb->g_dma = sg_dma_address(req->dst); } pdb->sgf |= (key->e_sz << RSA_PDB_E_SHIFT) | key->n_sz; pdb->f_len = req_ctx->fixup_src_len; return 0; } static int set_rsa_priv_f1_pdb(struct akcipher_request *req, struct rsa_edesc *edesc) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct device *dev = ctx->dev; struct rsa_priv_f1_pdb *pdb = &edesc->pdb.priv_f1; int sec4_sg_index = 0; pdb->n_dma = dma_map_single(dev, key->n, key->n_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->n_dma)) { dev_err(dev, "Unable to map modulus memory\n"); return -ENOMEM; } pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->d_dma)) { dev_err(dev, "Unable to map RSA private exponent memory\n"); dma_unmap_single(dev, pdb->n_dma, key->n_sz, DMA_TO_DEVICE); return -ENOMEM; } if (edesc->mapped_src_nents > 1) { pdb->sgf |= RSA_PRIV_PDB_SGF_G; pdb->g_dma = edesc->sec4_sg_dma; sec4_sg_index += edesc->mapped_src_nents; } else { struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); pdb->g_dma = sg_dma_address(req_ctx->fixup_src); } if (edesc->mapped_dst_nents > 1) { pdb->sgf |= RSA_PRIV_PDB_SGF_F; pdb->f_dma = edesc->sec4_sg_dma + sec4_sg_index * sizeof(struct sec4_sg_entry); } else { pdb->f_dma = sg_dma_address(req->dst); } pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz; return 0; } static int set_rsa_priv_f2_pdb(struct akcipher_request *req, struct rsa_edesc *edesc) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct device *dev = ctx->dev; struct rsa_priv_f2_pdb *pdb = &edesc->pdb.priv_f2; int sec4_sg_index = 0; size_t p_sz = key->p_sz; size_t q_sz = key->q_sz; pdb->d_dma = dma_map_single(dev, key->d, key->d_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->d_dma)) { dev_err(dev, "Unable to map RSA private exponent memory\n"); return -ENOMEM; } pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->p_dma)) { dev_err(dev, "Unable to map RSA prime factor p memory\n"); goto unmap_d; } pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->q_dma)) { dev_err(dev, "Unable to map RSA prime factor q memory\n"); goto unmap_p; } pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp1_dma)) { dev_err(dev, "Unable to map RSA tmp1 memory\n"); goto unmap_q; } pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp2_dma)) { dev_err(dev, "Unable to map RSA tmp2 memory\n"); goto unmap_tmp1; } if (edesc->mapped_src_nents > 1) { pdb->sgf |= RSA_PRIV_PDB_SGF_G; pdb->g_dma = edesc->sec4_sg_dma; sec4_sg_index += edesc->mapped_src_nents; } else { struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); pdb->g_dma = sg_dma_address(req_ctx->fixup_src); } if (edesc->mapped_dst_nents > 1) { pdb->sgf |= RSA_PRIV_PDB_SGF_F; pdb->f_dma = edesc->sec4_sg_dma + sec4_sg_index * sizeof(struct sec4_sg_entry); } else { pdb->f_dma = sg_dma_address(req->dst); } pdb->sgf |= (key->d_sz << RSA_PDB_D_SHIFT) | key->n_sz; pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; return 0; unmap_tmp1: dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); unmap_q: dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); unmap_p: dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); unmap_d: dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); return -ENOMEM; } static int set_rsa_priv_f3_pdb(struct akcipher_request *req, struct rsa_edesc *edesc) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct device *dev = ctx->dev; struct rsa_priv_f3_pdb *pdb = &edesc->pdb.priv_f3; int sec4_sg_index = 0; size_t p_sz = key->p_sz; size_t q_sz = key->q_sz; pdb->p_dma = dma_map_single(dev, key->p, p_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->p_dma)) { dev_err(dev, "Unable to map RSA prime factor p memory\n"); return -ENOMEM; } pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->q_dma)) { dev_err(dev, "Unable to map RSA prime factor q memory\n"); goto unmap_p; } pdb->dp_dma = dma_map_single(dev, key->dp, p_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->dp_dma)) { dev_err(dev, "Unable to map RSA exponent dp memory\n"); goto unmap_q; } pdb->dq_dma = dma_map_single(dev, key->dq, q_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->dq_dma)) { dev_err(dev, "Unable to map RSA exponent dq memory\n"); goto unmap_dp; } pdb->c_dma = dma_map_single(dev, key->qinv, p_sz, DMA_TO_DEVICE); if (dma_mapping_error(dev, pdb->c_dma)) { dev_err(dev, "Unable to map RSA CRT coefficient qinv memory\n"); goto unmap_dq; } pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp1_dma)) { dev_err(dev, "Unable to map RSA tmp1 memory\n"); goto unmap_qinv; } pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, pdb->tmp2_dma)) { dev_err(dev, "Unable to map RSA tmp2 memory\n"); goto unmap_tmp1; } if (edesc->mapped_src_nents > 1) { pdb->sgf |= RSA_PRIV_PDB_SGF_G; pdb->g_dma = edesc->sec4_sg_dma; sec4_sg_index += edesc->mapped_src_nents; } else { struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); pdb->g_dma = sg_dma_address(req_ctx->fixup_src); } if (edesc->mapped_dst_nents > 1) { pdb->sgf |= RSA_PRIV_PDB_SGF_F; pdb->f_dma = edesc->sec4_sg_dma + sec4_sg_index * sizeof(struct sec4_sg_entry); } else { pdb->f_dma = sg_dma_address(req->dst); } pdb->sgf |= key->n_sz; pdb->p_q_len = (q_sz << RSA_PDB_Q_SHIFT) | p_sz; return 0; unmap_tmp1: dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); unmap_qinv: dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); unmap_dq: dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); unmap_dp: dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); unmap_q: dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); unmap_p: dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); return -ENOMEM; } static int akcipher_enqueue_req(struct device *jrdev, void (*cbk)(struct device *jrdev, u32 *desc, u32 err, void *context), struct akcipher_request *req) { struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev); struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct caam_rsa_req_ctx *req_ctx = akcipher_request_ctx(req); struct rsa_edesc *edesc = req_ctx->edesc; u32 *desc = edesc->hw_desc; int ret; req_ctx->akcipher_op_done = cbk; /* * Only the backlog request are sent to crypto-engine since the others * can be handled by CAAM, if free, especially since JR has up to 1024 * entries (more than the 10 entries from crypto-engine). */ if (req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG) ret = crypto_transfer_akcipher_request_to_engine(jrpriv->engine, req); else ret = caam_jr_enqueue(jrdev, desc, cbk, req); if ((ret != -EINPROGRESS) && (ret != -EBUSY)) { switch (key->priv_form) { case FORM1: rsa_priv_f1_unmap(jrdev, edesc, req); break; case FORM2: rsa_priv_f2_unmap(jrdev, edesc, req); break; case FORM3: rsa_priv_f3_unmap(jrdev, edesc, req); break; default: rsa_pub_unmap(jrdev, edesc, req); } rsa_io_unmap(jrdev, edesc, req); kfree(edesc); } return ret; } static int caam_rsa_enc(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; struct device *jrdev = ctx->dev; struct rsa_edesc *edesc; int ret; if (unlikely(!key->n || !key->e)) return -EINVAL; if (req->dst_len < key->n_sz) { req->dst_len = key->n_sz; dev_err(jrdev, "Output buffer length less than parameter n\n"); return -EOVERFLOW; } /* Allocate extended descriptor */ edesc = rsa_edesc_alloc(req, DESC_RSA_PUB_LEN); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Set RSA Encrypt Protocol Data Block */ ret = set_rsa_pub_pdb(req, edesc); if (ret) goto init_fail; /* Initialize Job Descriptor */ init_rsa_pub_desc(edesc->hw_desc, &edesc->pdb.pub); return akcipher_enqueue_req(jrdev, rsa_pub_done, req); init_fail: rsa_io_unmap(jrdev, edesc, req); kfree(edesc); return ret; } static int caam_rsa_dec_priv_f1(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct device *jrdev = ctx->dev; struct rsa_edesc *edesc; int ret; /* Allocate extended descriptor */ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F1_LEN); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Set RSA Decrypt Protocol Data Block - Private Key Form #1 */ ret = set_rsa_priv_f1_pdb(req, edesc); if (ret) goto init_fail; /* Initialize Job Descriptor */ init_rsa_priv_f1_desc(edesc->hw_desc, &edesc->pdb.priv_f1); return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req); init_fail: rsa_io_unmap(jrdev, edesc, req); kfree(edesc); return ret; } static int caam_rsa_dec_priv_f2(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct device *jrdev = ctx->dev; struct rsa_edesc *edesc; int ret; /* Allocate extended descriptor */ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F2_LEN); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Set RSA Decrypt Protocol Data Block - Private Key Form #2 */ ret = set_rsa_priv_f2_pdb(req, edesc); if (ret) goto init_fail; /* Initialize Job Descriptor */ init_rsa_priv_f2_desc(edesc->hw_desc, &edesc->pdb.priv_f2); return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req); init_fail: rsa_io_unmap(jrdev, edesc, req); kfree(edesc); return ret; } static int caam_rsa_dec_priv_f3(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct device *jrdev = ctx->dev; struct rsa_edesc *edesc; int ret; /* Allocate extended descriptor */ edesc = rsa_edesc_alloc(req, DESC_RSA_PRIV_F3_LEN); if (IS_ERR(edesc)) return PTR_ERR(edesc); /* Set RSA Decrypt Protocol Data Block - Private Key Form #3 */ ret = set_rsa_priv_f3_pdb(req, edesc); if (ret) goto init_fail; /* Initialize Job Descriptor */ init_rsa_priv_f3_desc(edesc->hw_desc, &edesc->pdb.priv_f3); return akcipher_enqueue_req(jrdev, rsa_priv_f_done, req); init_fail: rsa_io_unmap(jrdev, edesc, req); kfree(edesc); return ret; } static int caam_rsa_dec(struct akcipher_request *req) { struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; int ret; if (unlikely(!key->n || !key->d)) return -EINVAL; if (req->dst_len < key->n_sz) { req->dst_len = key->n_sz; dev_err(ctx->dev, "Output buffer length less than parameter n\n"); return -EOVERFLOW; } if (key->priv_form == FORM3) ret = caam_rsa_dec_priv_f3(req); else if (key->priv_form == FORM2) ret = caam_rsa_dec_priv_f2(req); else ret = caam_rsa_dec_priv_f1(req); return ret; } static void caam_rsa_free_key(struct caam_rsa_key *key) { kfree_sensitive(key->d); kfree_sensitive(key->p); kfree_sensitive(key->q); kfree_sensitive(key->dp); kfree_sensitive(key->dq); kfree_sensitive(key->qinv); kfree_sensitive(key->tmp1); kfree_sensitive(key->tmp2); kfree(key->e); kfree(key->n); memset(key, 0, sizeof(*key)); } static void caam_rsa_drop_leading_zeros(const u8 **ptr, size_t *nbytes) { while (!**ptr && *nbytes) { (*ptr)++; (*nbytes)--; } } /** * caam_read_rsa_crt - Used for reading dP, dQ, qInv CRT members. * dP, dQ and qInv could decode to less than corresponding p, q length, as the * BER-encoding requires that the minimum number of bytes be used to encode the * integer. dP, dQ, qInv decoded values have to be zero-padded to appropriate * length. * * @ptr : pointer to {dP, dQ, qInv} CRT member * @nbytes: length in bytes of {dP, dQ, qInv} CRT member * @dstlen: length in bytes of corresponding p or q prime factor */ static u8 *caam_read_rsa_crt(const u8 *ptr, size_t nbytes, size_t dstlen) { u8 *dst; caam_rsa_drop_leading_zeros(&ptr, &nbytes); if (!nbytes) return NULL; dst = kzalloc(dstlen, GFP_KERNEL); if (!dst) return NULL; memcpy(dst + (dstlen - nbytes), ptr, nbytes); return dst; } /** * caam_read_raw_data - Read a raw byte stream as a positive integer. * The function skips buffer's leading zeros, copies the remained data * to a buffer allocated in the GFP_KERNEL zone and returns * the address of the new buffer. * * @buf : The data to read * @nbytes: The amount of data to read */ static inline u8 *caam_read_raw_data(const u8 *buf, size_t *nbytes) { caam_rsa_drop_leading_zeros(&buf, nbytes); if (!*nbytes) return NULL; return kmemdup(buf, *nbytes, GFP_KERNEL); } static int caam_rsa_check_key_length(unsigned int len) { if (len > 4096) return -EINVAL; return 0; } static int caam_rsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct rsa_key raw_key = {NULL}; struct caam_rsa_key *rsa_key = &ctx->key; int ret; /* Free the old RSA key if any */ caam_rsa_free_key(rsa_key); ret = rsa_parse_pub_key(&raw_key, key, keylen); if (ret) return ret; /* Copy key in DMA zone */ rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL); if (!rsa_key->e) goto err; /* * Skip leading zeros and copy the positive integer to a buffer * allocated in the GFP_KERNEL zone. The decryption descriptor * expects a positive integer for the RSA modulus and uses its length as * decryption output length. */ rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz); if (!rsa_key->n) goto err; if (caam_rsa_check_key_length(raw_key.n_sz << 3)) { caam_rsa_free_key(rsa_key); return -EINVAL; } rsa_key->e_sz = raw_key.e_sz; rsa_key->n_sz = raw_key.n_sz; return 0; err: caam_rsa_free_key(rsa_key); return -ENOMEM; } static void caam_rsa_set_priv_key_form(struct caam_rsa_ctx *ctx, struct rsa_key *raw_key) { struct caam_rsa_key *rsa_key = &ctx->key; size_t p_sz = raw_key->p_sz; size_t q_sz = raw_key->q_sz; unsigned aligned_size; rsa_key->p = caam_read_raw_data(raw_key->p, &p_sz); if (!rsa_key->p) return; rsa_key->p_sz = p_sz; rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz); if (!rsa_key->q) goto free_p; rsa_key->q_sz = q_sz; aligned_size = ALIGN(raw_key->p_sz, dma_get_cache_alignment()); rsa_key->tmp1 = kzalloc(aligned_size, GFP_KERNEL); if (!rsa_key->tmp1) goto free_q; aligned_size = ALIGN(raw_key->q_sz, dma_get_cache_alignment()); rsa_key->tmp2 = kzalloc(aligned_size, GFP_KERNEL); if (!rsa_key->tmp2) goto free_tmp1; rsa_key->priv_form = FORM2; rsa_key->dp = caam_read_rsa_crt(raw_key->dp, raw_key->dp_sz, p_sz); if (!rsa_key->dp) goto free_tmp2; rsa_key->dq = caam_read_rsa_crt(raw_key->dq, raw_key->dq_sz, q_sz); if (!rsa_key->dq) goto free_dp; rsa_key->qinv = caam_read_rsa_crt(raw_key->qinv, raw_key->qinv_sz, q_sz); if (!rsa_key->qinv) goto free_dq; rsa_key->priv_form = FORM3; return; free_dq: kfree_sensitive(rsa_key->dq); free_dp: kfree_sensitive(rsa_key->dp); free_tmp2: kfree_sensitive(rsa_key->tmp2); free_tmp1: kfree_sensitive(rsa_key->tmp1); free_q: kfree_sensitive(rsa_key->q); free_p: kfree_sensitive(rsa_key->p); } static int caam_rsa_set_priv_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen) { struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct rsa_key raw_key = {NULL}; struct caam_rsa_key *rsa_key = &ctx->key; int ret; /* Free the old RSA key if any */ caam_rsa_free_key(rsa_key); ret = rsa_parse_priv_key(&raw_key, key, keylen); if (ret) return ret; /* Copy key in DMA zone */ rsa_key->d = kmemdup(raw_key.d, raw_key.d_sz, GFP_KERNEL); if (!rsa_key->d) goto err; rsa_key->e = kmemdup(raw_key.e, raw_key.e_sz, GFP_KERNEL); if (!rsa_key->e) goto err; /* * Skip leading zeros and copy the positive integer to a buffer * allocated in the GFP_KERNEL zone. The decryption descriptor * expects a positive integer for the RSA modulus and uses its length as * decryption output length. */ rsa_key->n = caam_read_raw_data(raw_key.n, &raw_key.n_sz); if (!rsa_key->n) goto err; if (caam_rsa_check_key_length(raw_key.n_sz << 3)) { caam_rsa_free_key(rsa_key); return -EINVAL; } rsa_key->d_sz = raw_key.d_sz; rsa_key->e_sz = raw_key.e_sz; rsa_key->n_sz = raw_key.n_sz; caam_rsa_set_priv_key_form(ctx, &raw_key); return 0; err: caam_rsa_free_key(rsa_key); return -ENOMEM; } static unsigned int caam_rsa_max_size(struct crypto_akcipher *tfm) { struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); return ctx->key.n_sz; } /* Per session pkc's driver context creation function */ static int caam_rsa_init_tfm(struct crypto_akcipher *tfm) { struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); akcipher_set_reqsize(tfm, sizeof(struct caam_rsa_req_ctx)); ctx->dev = caam_jr_alloc(); if (IS_ERR(ctx->dev)) { pr_err("Job Ring Device allocation for transform failed\n"); return PTR_ERR(ctx->dev); } ctx->padding_dma = dma_map_single(ctx->dev, zero_buffer, CAAM_RSA_MAX_INPUT_SIZE - 1, DMA_TO_DEVICE); if (dma_mapping_error(ctx->dev, ctx->padding_dma)) { dev_err(ctx->dev, "unable to map padding\n"); caam_jr_free(ctx->dev); return -ENOMEM; } return 0; } /* Per session pkc's driver context cleanup function */ static void caam_rsa_exit_tfm(struct crypto_akcipher *tfm) { struct caam_rsa_ctx *ctx = akcipher_tfm_ctx_dma(tfm); struct caam_rsa_key *key = &ctx->key; dma_unmap_single(ctx->dev, ctx->padding_dma, CAAM_RSA_MAX_INPUT_SIZE - 1, DMA_TO_DEVICE); caam_rsa_free_key(key); caam_jr_free(ctx->dev); } static struct caam_akcipher_alg caam_rsa = { .akcipher.base = { .encrypt = caam_rsa_enc, .decrypt = caam_rsa_dec, .set_pub_key = caam_rsa_set_pub_key, .set_priv_key = caam_rsa_set_priv_key, .max_size = caam_rsa_max_size, .init = caam_rsa_init_tfm, .exit = caam_rsa_exit_tfm, .base = { .cra_name = "rsa", .cra_driver_name = "rsa-caam", .cra_priority = 3000, .cra_module = THIS_MODULE, .cra_ctxsize = sizeof(struct caam_rsa_ctx) + CRYPTO_DMA_PADDING, }, }, .akcipher.op = { .do_one_request = akcipher_do_one_req, }, }; /* Public Key Cryptography module initialization handler */ int caam_pkc_init(struct device *ctrldev) { struct caam_drv_private *priv = dev_get_drvdata(ctrldev); u32 pk_inst, pkha; int err; init_done = false; /* Determine public key hardware accelerator presence. */ if (priv->era < 10) { pk_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) & CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT; } else { pkha = rd_reg32(&priv->jr[0]->vreg.pkha); pk_inst = pkha & CHA_VER_NUM_MASK; /* * Newer CAAMs support partially disabled functionality. If this is the * case, the number is non-zero, but this bit is set to indicate that * no encryption or decryption is supported. Only signing and verifying * is supported. */ if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT) pk_inst = 0; } /* Do not register algorithms if PKHA is not present. */ if (!pk_inst) return 0; /* allocate zero buffer, used for padding input */ zero_buffer = kzalloc(CAAM_RSA_MAX_INPUT_SIZE - 1, GFP_KERNEL); if (!zero_buffer) return -ENOMEM; err = crypto_engine_register_akcipher(&caam_rsa.akcipher); if (err) { kfree(zero_buffer); dev_warn(ctrldev, "%s alg registration failed\n", caam_rsa.akcipher.base.base.cra_driver_name); } else { init_done = true; caam_rsa.registered = true; dev_info(ctrldev, "caam pkc algorithms registered in /proc/crypto\n"); } return err; } void caam_pkc_exit(void) { if (!init_done) return; if (caam_rsa.registered) crypto_engine_unregister_akcipher(&caam_rsa.akcipher); kfree(zero_buffer); }
linux-master
drivers/crypto/caam/caampkc.c
// SPDX-License-Identifier: GPL-2.0+ /* * Shared descriptors for aead, skcipher algorithms * * Copyright 2016-2019 NXP */ #include "compat.h" #include "desc_constr.h" #include "caamalg_desc.h" /* * For aead functions, read payload and write payload, * both of which are specified in req->src and req->dst */ static inline void aead_append_src_dst(u32 *desc, u32 msg_type) { append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | msg_type | FIFOLD_TYPE_LASTBOTH); } /* Set DK bit in class 1 operation if shared */ static inline void append_dec_op1(u32 *desc, u32 type) { u32 *jump_cmd, *uncond_jump_cmd; /* DK bit is valid only for AES */ if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) { append_operation(desc, type | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); return; } jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT); uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); set_jump_tgt_here(desc, jump_cmd); append_operation(desc, type | OP_ALG_AS_INIT | OP_ALG_DECRYPT | OP_ALG_AAI_DK); set_jump_tgt_here(desc, uncond_jump_cmd); } /** * cnstr_shdsc_aead_null_encap - IPSec ESP encapsulation shared descriptor * (non-protocol) with no (null) encryption. * @desc: pointer to buffer used for descriptor construction * @adata: pointer to authentication transform definitions. * A split key is required for SEC Era < 6; the size of the split key * is specified in this case. Valid algorithm values - one of * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed * with OP_ALG_AAI_HMAC_PRECOMP. * @icvsize: integrity check value (ICV) size (truncated or full) * @era: SEC Era */ void cnstr_shdsc_aead_null_encap(u32 * const desc, struct alginfo *adata, unsigned int icvsize, int era) { u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; init_sh_desc(desc, HDR_SHARE_SERIAL); /* Skip if already shared */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); if (era < 6) { if (adata->key_inline) append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); else append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); } else { append_proto_dkp(desc, adata); } set_jump_tgt_here(desc, key_jump_cmd); /* assoclen + cryptlen = seqinlen */ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); /* Prepare to read and write cryptlen + assoclen bytes */ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); /* * MOVE_LEN opcode is not available in all SEC HW revisions, * thus need to do some magic, i.e. self-patch the descriptor * buffer. */ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | (0x6 << MOVE_LEN_SHIFT)); write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | MOVE_WAITCOMP | (0x8 << MOVE_LEN_SHIFT)); /* Class 2 operation */ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); /* Read and write cryptlen bytes */ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); set_move_tgt_here(desc, read_move_cmd); set_move_tgt_here(desc, write_move_cmd); append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | MOVE_AUX_LS); /* Write ICV */ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); print_hex_dump_debug("aead null enc shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_aead_null_encap); /** * cnstr_shdsc_aead_null_decap - IPSec ESP decapsulation shared descriptor * (non-protocol) with no (null) decryption. * @desc: pointer to buffer used for descriptor construction * @adata: pointer to authentication transform definitions. * A split key is required for SEC Era < 6; the size of the split key * is specified in this case. Valid algorithm values - one of * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed * with OP_ALG_AAI_HMAC_PRECOMP. * @icvsize: integrity check value (ICV) size (truncated or full) * @era: SEC Era */ void cnstr_shdsc_aead_null_decap(u32 * const desc, struct alginfo *adata, unsigned int icvsize, int era) { u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd, *jump_cmd; init_sh_desc(desc, HDR_SHARE_SERIAL); /* Skip if already shared */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); if (era < 6) { if (adata->key_inline) append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); else append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); } else { append_proto_dkp(desc, adata); } set_jump_tgt_here(desc, key_jump_cmd); /* Class 2 operation */ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); /* assoclen + cryptlen = seqoutlen */ append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* Prepare to read and write cryptlen + assoclen bytes */ append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); /* * MOVE_LEN opcode is not available in all SEC HW revisions, * thus need to do some magic, i.e. self-patch the descriptor * buffer. */ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH2 | (0x6 << MOVE_LEN_SHIFT)); write_move_cmd = append_move(desc, MOVE_SRC_MATH2 | MOVE_DEST_DESCBUF | MOVE_WAITCOMP | (0x8 << MOVE_LEN_SHIFT)); /* Read and write cryptlen bytes */ aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); /* * Insert a NOP here, since we need at least 4 instructions between * code patching the descriptor buffer and the location being patched. */ jump_cmd = append_jump(desc, JUMP_TEST_ALL); set_jump_tgt_here(desc, jump_cmd); set_move_tgt_here(desc, read_move_cmd); set_move_tgt_here(desc, write_move_cmd); append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | MOVE_AUX_LS); append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); /* Load ICV */ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); print_hex_dump_debug("aead null dec shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_aead_null_decap); static void init_sh_desc_key_aead(u32 * const desc, struct alginfo * const cdata, struct alginfo * const adata, const bool is_rfc3686, u32 *nonce, int era) { u32 *key_jump_cmd; unsigned int enckeylen = cdata->keylen; /* Note: Context registers are saved. */ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); /* Skip if already shared */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); /* * RFC3686 specific: * | key = {AUTH_KEY, ENC_KEY, NONCE} * | enckeylen = encryption key size + nonce size */ if (is_rfc3686) enckeylen -= CTR_RFC3686_NONCE_SIZE; if (era < 6) { if (adata->key_inline) append_key_as_imm(desc, adata->key_virt, adata->keylen_pad, adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); else append_key(desc, adata->key_dma, adata->keylen, CLASS_2 | KEY_DEST_MDHA_SPLIT | KEY_ENC); } else { append_proto_dkp(desc, adata); } if (cdata->key_inline) append_key_as_imm(desc, cdata->key_virt, enckeylen, enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); else append_key(desc, cdata->key_dma, enckeylen, CLASS_1 | KEY_DEST_CLASS_REG); /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) { append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, LDST_CLASS_IND_CCB | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); append_move(desc, MOVE_SRC_OUTFIFO | MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) | (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); } set_jump_tgt_here(desc, key_jump_cmd); } /** * cnstr_shdsc_aead_encap - IPSec ESP encapsulation shared descriptor * (non-protocol). * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. * @adata: pointer to authentication transform definitions. * A split key is required for SEC Era < 6; the size of the split key * is specified in this case. Valid algorithm values - one of * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed * with OP_ALG_AAI_HMAC_PRECOMP. * @ivsize: initialization vector size * @icvsize: integrity check value (ICV) size (truncated or full) * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template * @nonce: pointer to rfc3686 nonce * @ctx1_iv_off: IV offset in CONTEXT1 register * @is_qi: true when called from caam/qi * @era: SEC Era */ void cnstr_shdsc_aead_encap(u32 * const desc, struct alginfo *cdata, struct alginfo *adata, unsigned int ivsize, unsigned int icvsize, const bool is_rfc3686, u32 *nonce, const u32 ctx1_iv_off, const bool is_qi, int era) { /* Note: Context registers are saved. */ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); /* Class 2 operation */ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); if (is_qi) { u32 *wait_load_cmd; /* REG3 = assoclen */ append_seq_load(desc, 4, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 | (4 << LDST_OFFSET_SHIFT)); wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_CALM | JUMP_COND_NCP | JUMP_COND_NOP | JUMP_COND_NIP | JUMP_COND_NIFP); set_jump_tgt_here(desc, wait_load_cmd); append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | (ctx1_iv_off << LDST_OFFSET_SHIFT)); } /* Read and write assoclen bytes */ if (is_qi || era < 3) { append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); } else { append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); } /* Skip assoc data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* read assoc before reading payload */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | FIFOLDST_VLF); /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << LDST_OFFSET_SHIFT)); /* Class 1 operation */ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); /* Read and write cryptlen bytes */ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); /* Write ICV */ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); print_hex_dump_debug("aead enc shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_aead_encap); /** * cnstr_shdsc_aead_decap - IPSec ESP decapsulation shared descriptor * (non-protocol). * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. * @adata: pointer to authentication transform definitions. * A split key is required for SEC Era < 6; the size of the split key * is specified in this case. Valid algorithm values - one of * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed * with OP_ALG_AAI_HMAC_PRECOMP. * @ivsize: initialization vector size * @icvsize: integrity check value (ICV) size (truncated or full) * @geniv: whether to generate Encrypted Chain IV * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template * @nonce: pointer to rfc3686 nonce * @ctx1_iv_off: IV offset in CONTEXT1 register * @is_qi: true when called from caam/qi * @era: SEC Era */ void cnstr_shdsc_aead_decap(u32 * const desc, struct alginfo *cdata, struct alginfo *adata, unsigned int ivsize, unsigned int icvsize, const bool geniv, const bool is_rfc3686, u32 *nonce, const u32 ctx1_iv_off, const bool is_qi, int era) { /* Note: Context registers are saved. */ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); /* Class 2 operation */ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); if (is_qi) { u32 *wait_load_cmd; /* REG3 = assoclen */ append_seq_load(desc, 4, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 | (4 << LDST_OFFSET_SHIFT)); wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_CALM | JUMP_COND_NCP | JUMP_COND_NOP | JUMP_COND_NIP | JUMP_COND_NIFP); set_jump_tgt_here(desc, wait_load_cmd); if (!geniv) append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | (ctx1_iv_off << LDST_OFFSET_SHIFT)); } /* Read and write assoclen bytes */ if (is_qi || era < 3) { append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); if (geniv) append_math_add_imm_u32(desc, VARSEQOUTLEN, REG3, IMM, ivsize); else append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); } else { append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); if (geniv) append_math_add_imm_u32(desc, VARSEQOUTLEN, DPOVRD, IMM, ivsize); else append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); } /* Skip assoc data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* read assoc before reading payload */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | KEY_VLF); if (geniv) { append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | (ctx1_iv_off << LDST_OFFSET_SHIFT)); append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | ivsize); } /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << LDST_OFFSET_SHIFT)); /* Choose operation */ if (ctx1_iv_off) append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); else append_dec_op1(desc, cdata->algtype); /* Read and write cryptlen bytes */ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); aead_append_src_dst(desc, FIFOLD_TYPE_MSG); /* Load ICV */ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); print_hex_dump_debug("aead dec shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_aead_decap); /** * cnstr_shdsc_aead_givencap - IPSec ESP encapsulation shared descriptor * (non-protocol) with HW-generated initialization * vector. * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128. * @adata: pointer to authentication transform definitions. * A split key is required for SEC Era < 6; the size of the split key * is specified in this case. Valid algorithm values - one of * OP_ALG_ALGSEL_{MD5, SHA1, SHA224, SHA256, SHA384, SHA512} ANDed * with OP_ALG_AAI_HMAC_PRECOMP. * @ivsize: initialization vector size * @icvsize: integrity check value (ICV) size (truncated or full) * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template * @nonce: pointer to rfc3686 nonce * @ctx1_iv_off: IV offset in CONTEXT1 register * @is_qi: true when called from caam/qi * @era: SEC Era */ void cnstr_shdsc_aead_givencap(u32 * const desc, struct alginfo *cdata, struct alginfo *adata, unsigned int ivsize, unsigned int icvsize, const bool is_rfc3686, u32 *nonce, const u32 ctx1_iv_off, const bool is_qi, int era) { u32 geniv, moveiv; u32 *wait_cmd; /* Note: Context registers are saved. */ init_sh_desc_key_aead(desc, cdata, adata, is_rfc3686, nonce, era); if (is_qi) { u32 *wait_load_cmd; /* REG3 = assoclen */ append_seq_load(desc, 4, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 | (4 << LDST_OFFSET_SHIFT)); wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_CALM | JUMP_COND_NCP | JUMP_COND_NOP | JUMP_COND_NIP | JUMP_COND_NIFP); set_jump_tgt_here(desc, wait_load_cmd); } if (is_rfc3686) { if (is_qi) append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | (ctx1_iv_off << LDST_OFFSET_SHIFT)); goto copy_iv; } /* Generate IV */ geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO | NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 | NFIFOENTRY_PTYPE_RND | (ivsize << NFIFOENTRY_DLEN_SHIFT); append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); append_move(desc, MOVE_WAITCOMP | MOVE_SRC_INFIFO | MOVE_DEST_CLASS1CTX | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | (ivsize << MOVE_LEN_SHIFT)); append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); copy_iv: /* Copy IV to class 1 context */ append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | (ctx1_iv_off << MOVE_OFFSET_SHIFT) | (ivsize << MOVE_LEN_SHIFT)); /* Return to encryption */ append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); /* Read and write assoclen bytes */ if (is_qi || era < 3) { append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); } else { append_math_add(desc, VARSEQINLEN, ZERO, DPOVRD, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, DPOVRD, CAAM_CMD_SZ); } /* Skip assoc data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* read assoc before reading payload */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | KEY_VLF); /* Copy iv from outfifo to class 2 fifo */ moveiv = NFIFOENTRY_STYPE_OFIFO | NFIFOENTRY_DEST_CLASS2 | NFIFOENTRY_DTYPE_MSG | (ivsize << NFIFOENTRY_DLEN_SHIFT); append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM); append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB | LDST_SRCDST_WORD_DATASZ_REG | LDST_IMM); /* Load Counter into CONTEXT1 reg */ if (is_rfc3686) append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << LDST_OFFSET_SHIFT)); /* Class 1 operation */ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); /* Will write ivsize + cryptlen */ append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* Not need to reload iv */ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); /* Will read cryptlen */ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* * Wait for IV transfer (ofifo -> class2) to finish before starting * ciphertext transfer (ofifo -> external memory). */ wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NIFP); set_jump_tgt_here(desc, wait_cmd); append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); /* Write ICV */ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); print_hex_dump_debug("aead givenc shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_aead_givencap); /** * cnstr_shdsc_gcm_encap - gcm encapsulation shared descriptor * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. * @ivsize: initialization vector size * @icvsize: integrity check value (ICV) size (truncated or full) * @is_qi: true when called from caam/qi */ void cnstr_shdsc_gcm_encap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, unsigned int icvsize, const bool is_qi) { u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1, *zero_assoc_jump_cmd2; init_sh_desc(desc, HDR_SHARE_SERIAL); /* skip key loading if they are loaded due to sharing */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); if (cdata->key_inline) append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); else append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); set_jump_tgt_here(desc, key_jump_cmd); /* class 1 operation */ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); if (is_qi) { u32 *wait_load_cmd; /* REG3 = assoclen */ append_seq_load(desc, 4, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 | (4 << LDST_OFFSET_SHIFT)); wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_CALM | JUMP_COND_NCP | JUMP_COND_NOP | JUMP_COND_NIP | JUMP_COND_NIFP); set_jump_tgt_here(desc, wait_load_cmd); append_math_sub_imm_u32(desc, VARSEQOUTLEN, SEQINLEN, IMM, ivsize); } else { append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); } /* if assoclen + cryptlen is ZERO, skip to ICV write */ zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_MATH_Z); if (is_qi) append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); /* if assoclen is ZERO, skip reading the assoc data */ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_MATH_Z); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); /* skip assoc data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* cryptlen = seqinlen - assoclen */ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); /* if cryptlen is ZERO jump to zero-payload commands */ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_MATH_Z); /* read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); set_jump_tgt_here(desc, zero_assoc_jump_cmd1); append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* write encrypted data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); /* read payload data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); /* jump to ICV writing */ if (is_qi) append_jump(desc, JUMP_TEST_ALL | 4); else append_jump(desc, JUMP_TEST_ALL | 2); /* zero-payload commands */ set_jump_tgt_here(desc, zero_payload_jump_cmd); /* read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); if (is_qi) /* jump to ICV writing */ append_jump(desc, JUMP_TEST_ALL | 2); /* There is no input data */ set_jump_tgt_here(desc, zero_assoc_jump_cmd2); if (is_qi) append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1 | FIFOLD_TYPE_LAST1); /* write ICV */ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); print_hex_dump_debug("gcm enc shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_gcm_encap); /** * cnstr_shdsc_gcm_decap - gcm decapsulation shared descriptor * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. * @ivsize: initialization vector size * @icvsize: integrity check value (ICV) size (truncated or full) * @is_qi: true when called from caam/qi */ void cnstr_shdsc_gcm_decap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, unsigned int icvsize, const bool is_qi) { u32 *key_jump_cmd, *zero_payload_jump_cmd, *zero_assoc_jump_cmd1; init_sh_desc(desc, HDR_SHARE_SERIAL); /* skip key loading if they are loaded due to sharing */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); if (cdata->key_inline) append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); else append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); set_jump_tgt_here(desc, key_jump_cmd); /* class 1 operation */ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); if (is_qi) { u32 *wait_load_cmd; /* REG3 = assoclen */ append_seq_load(desc, 4, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 | (4 << LDST_OFFSET_SHIFT)); wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_CALM | JUMP_COND_NCP | JUMP_COND_NOP | JUMP_COND_NIP | JUMP_COND_NIFP); set_jump_tgt_here(desc, wait_load_cmd); append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); } /* if assoclen is ZERO, skip reading the assoc data */ append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_MATH_Z); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); /* skip assoc data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); set_jump_tgt_here(desc, zero_assoc_jump_cmd1); /* cryptlen = seqoutlen - assoclen */ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* jump to zero-payload command if cryptlen is zero */ zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_MATH_Z); append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* store encrypted data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); /* read payload data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); /* zero-payload command */ set_jump_tgt_here(desc, zero_payload_jump_cmd); /* read ICV */ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); print_hex_dump_debug("gcm dec shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_gcm_decap); /** * cnstr_shdsc_rfc4106_encap - IPSec ESP gcm encapsulation shared descriptor * (non-protocol). * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. * @ivsize: initialization vector size * @icvsize: integrity check value (ICV) size (truncated or full) * @is_qi: true when called from caam/qi * * Input sequence: AAD | PTXT * Output sequence: AAD | CTXT | ICV * AAD length (assoclen), which includes the IV length, is available in Math3. */ void cnstr_shdsc_rfc4106_encap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, unsigned int icvsize, const bool is_qi) { u32 *key_jump_cmd, *zero_cryptlen_jump_cmd, *skip_instructions; init_sh_desc(desc, HDR_SHARE_SERIAL); /* Skip key loading if it is loaded due to sharing */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); if (cdata->key_inline) append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); else append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); set_jump_tgt_here(desc, key_jump_cmd); /* Class 1 operation */ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); if (is_qi) { u32 *wait_load_cmd; /* REG3 = assoclen */ append_seq_load(desc, 4, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 | (4 << LDST_OFFSET_SHIFT)); wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_CALM | JUMP_COND_NCP | JUMP_COND_NOP | JUMP_COND_NIP | JUMP_COND_NIFP); set_jump_tgt_here(desc, wait_load_cmd); /* Read salt and IV */ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); } append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); /* Skip AAD */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* Read cryptlen and set this value into VARSEQOUTLEN */ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); /* If cryptlen is ZERO jump to AAD command */ zero_cryptlen_jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_MATH_Z); /* Read AAD data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA); /* Skip IV */ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); append_math_add(desc, VARSEQINLEN, VARSEQOUTLEN, REG0, CAAM_CMD_SZ); /* Write encrypted data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); /* Read payload data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); /* Jump instructions to avoid double reading of AAD */ skip_instructions = append_jump(desc, JUMP_TEST_ALL); /* There is no input data, cryptlen = 0 */ set_jump_tgt_here(desc, zero_cryptlen_jump_cmd); /* Read AAD */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST1); set_jump_tgt_here(desc, skip_instructions); /* Write ICV */ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); print_hex_dump_debug("rfc4106 enc shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_rfc4106_encap); /** * cnstr_shdsc_rfc4106_decap - IPSec ESP gcm decapsulation shared descriptor * (non-protocol). * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. * @ivsize: initialization vector size * @icvsize: integrity check value (ICV) size (truncated or full) * @is_qi: true when called from caam/qi */ void cnstr_shdsc_rfc4106_decap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, unsigned int icvsize, const bool is_qi) { u32 *key_jump_cmd; init_sh_desc(desc, HDR_SHARE_SERIAL); /* Skip key loading if it is loaded due to sharing */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); if (cdata->key_inline) append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); else append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); set_jump_tgt_here(desc, key_jump_cmd); /* Class 1 operation */ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); if (is_qi) { u32 *wait_load_cmd; /* REG3 = assoclen */ append_seq_load(desc, 4, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 | (4 << LDST_OFFSET_SHIFT)); wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_CALM | JUMP_COND_NCP | JUMP_COND_NOP | JUMP_COND_NIP | JUMP_COND_NIFP); set_jump_tgt_here(desc, wait_load_cmd); /* Read salt and IV */ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); } append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, ivsize); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); /* Read assoc data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_FLUSH1); /* Skip IV */ append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_SKIP); /* Will read cryptlen bytes */ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ); /* Workaround for erratum A-005473 (simultaneous SEQ FIFO skips) */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); /* Skip assoc data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); /* Will write cryptlen bytes */ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* Store payload data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); /* Read encrypted data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); /* Read ICV */ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); print_hex_dump_debug("rfc4106 dec shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_rfc4106_decap); /** * cnstr_shdsc_rfc4543_encap - IPSec ESP gmac encapsulation shared descriptor * (non-protocol). * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. * @ivsize: initialization vector size * @icvsize: integrity check value (ICV) size (truncated or full) * @is_qi: true when called from caam/qi */ void cnstr_shdsc_rfc4543_encap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, unsigned int icvsize, const bool is_qi) { u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; init_sh_desc(desc, HDR_SHARE_SERIAL); /* Skip key loading if it is loaded due to sharing */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); if (cdata->key_inline) append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); else append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); set_jump_tgt_here(desc, key_jump_cmd); /* Class 1 operation */ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); if (is_qi) { /* assoclen is not needed, skip it */ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP); /* Read salt and IV */ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); } /* assoclen + cryptlen = seqinlen */ append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); /* * MOVE_LEN opcode is not available in all SEC HW revisions, * thus need to do some magic, i.e. self-patch the descriptor * buffer. */ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | (0x6 << MOVE_LEN_SHIFT)); write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP); /* Will read assoclen + cryptlen bytes */ append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* Will write assoclen + cryptlen bytes */ append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); /* Read and write assoclen + cryptlen bytes */ aead_append_src_dst(desc, FIFOLD_TYPE_AAD); set_move_tgt_here(desc, read_move_cmd); set_move_tgt_here(desc, write_move_cmd); append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); /* Move payload data to OFIFO */ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); /* Write ICV */ append_seq_store(desc, icvsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT); print_hex_dump_debug("rfc4543 enc shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_rfc4543_encap); /** * cnstr_shdsc_rfc4543_decap - IPSec ESP gmac decapsulation shared descriptor * (non-protocol). * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_GCM. * @ivsize: initialization vector size * @icvsize: integrity check value (ICV) size (truncated or full) * @is_qi: true when called from caam/qi */ void cnstr_shdsc_rfc4543_decap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, unsigned int icvsize, const bool is_qi) { u32 *key_jump_cmd, *read_move_cmd, *write_move_cmd; init_sh_desc(desc, HDR_SHARE_SERIAL); /* Skip key loading if it is loaded due to sharing */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); if (cdata->key_inline) append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); else append_key(desc, cdata->key_dma, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); set_jump_tgt_here(desc, key_jump_cmd); /* Class 1 operation */ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); if (is_qi) { /* assoclen is not needed, skip it */ append_seq_fifo_load(desc, 4, FIFOLD_CLASS_SKIP); /* Read salt and IV */ append_fifo_load_as_imm(desc, (void *)(cdata->key_virt + cdata->keylen), 4, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV); append_seq_fifo_load(desc, ivsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | FIFOLD_TYPE_FLUSH1); } /* assoclen + cryptlen = seqoutlen */ append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* * MOVE_LEN opcode is not available in all SEC HW revisions, * thus need to do some magic, i.e. self-patch the descriptor * buffer. */ read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | (0x6 << MOVE_LEN_SHIFT)); write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | (0x8 << MOVE_LEN_SHIFT) | MOVE_WAITCOMP); /* Will read assoclen + cryptlen bytes */ append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* Will write assoclen + cryptlen bytes */ append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); /* Store payload data */ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); /* In-snoop assoclen + cryptlen data */ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | FIFOLD_TYPE_AAD | FIFOLD_TYPE_LAST2FLUSH1); set_move_tgt_here(desc, read_move_cmd); set_move_tgt_here(desc, write_move_cmd); append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); /* Move payload data to OFIFO */ append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); /* Read ICV */ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_ICV | FIFOLD_TYPE_LAST1); print_hex_dump_debug("rfc4543 dec shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_rfc4543_decap); /** * cnstr_shdsc_chachapoly - Chacha20 + Poly1305 generic AEAD (rfc7539) and * IPsec ESP (rfc7634, a.k.a. rfc7539esp) shared * descriptor (non-protocol). * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - OP_ALG_ALGSEL_CHACHA20 ANDed with * OP_ALG_AAI_AEAD. * @adata: pointer to authentication transform definitions * Valid algorithm values - OP_ALG_ALGSEL_POLY1305 ANDed with * OP_ALG_AAI_AEAD. * @ivsize: initialization vector size * @icvsize: integrity check value (ICV) size (truncated or full) * @encap: true if encapsulation, false if decapsulation * @is_qi: true when called from caam/qi */ void cnstr_shdsc_chachapoly(u32 * const desc, struct alginfo *cdata, struct alginfo *adata, unsigned int ivsize, unsigned int icvsize, const bool encap, const bool is_qi) { u32 *key_jump_cmd, *wait_cmd; u32 nfifo; const bool is_ipsec = (ivsize != CHACHAPOLY_IV_SIZE); /* Note: Context registers are saved. */ init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); /* skip key loading if they are loaded due to sharing */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); /* For IPsec load the salt from keymat in the context register */ if (is_ipsec) append_load_as_imm(desc, cdata->key_virt + cdata->keylen, 4, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | 4 << LDST_OFFSET_SHIFT); set_jump_tgt_here(desc, key_jump_cmd); /* Class 2 and 1 operations: Poly & ChaCha */ if (encap) { append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); } else { append_operation(desc, adata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT | OP_ALG_ICV_ON); append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_DECRYPT); } if (is_qi) { u32 *wait_load_cmd; u32 ctx1_iv_off = is_ipsec ? 8 : 4; /* REG3 = assoclen */ append_seq_load(desc, 4, LDST_CLASS_DECO | LDST_SRCDST_WORD_DECO_MATH3 | 4 << LDST_OFFSET_SHIFT); wait_load_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_CALM | JUMP_COND_NCP | JUMP_COND_NOP | JUMP_COND_NIP | JUMP_COND_NIFP); set_jump_tgt_here(desc, wait_load_cmd); append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | ctx1_iv_off << LDST_OFFSET_SHIFT); } /* * MAGIC with NFIFO * Read associated data from the input and send them to class1 and * class2 alignment blocks. From class1 send data to output fifo and * then write it to memory since we don't need to encrypt AD. */ nfifo = NFIFOENTRY_DEST_BOTH | NFIFOENTRY_FC1 | NFIFOENTRY_FC2 | NFIFOENTRY_DTYPE_POLY | NFIFOENTRY_BND; append_load_imm_u32(desc, nfifo, LDST_CLASS_IND_CCB | LDST_SRCDST_WORD_INFO_FIFO_SM | LDLEN_MATH3); append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); append_seq_fifo_load(desc, 0, FIFOLD_TYPE_NOINFOFIFO | FIFOLD_CLASS_CLASS1 | LDST_VLF); append_move_len(desc, MOVE_AUX_LS | MOVE_SRC_AUX_ABLK | MOVE_DEST_OUTFIFO | MOVELEN_MRSEL_MATH3); append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | LDST_VLF); /* IPsec - copy IV at the output */ if (is_ipsec) append_seq_fifo_store(desc, ivsize, FIFOST_TYPE_METADATA | 0x2 << 25); wait_cmd = append_jump(desc, JUMP_JSL | JUMP_TYPE_LOCAL | JUMP_COND_NOP | JUMP_TEST_ALL); set_jump_tgt_here(desc, wait_cmd); if (encap) { /* Read and write cryptlen bytes */ append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); /* Write ICV */ append_seq_store(desc, icvsize, LDST_CLASS_2_CCB | LDST_SRCDST_BYTE_CONTEXT); } else { /* Read and write cryptlen bytes */ append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); aead_append_src_dst(desc, FIFOLD_TYPE_MSG); /* Load ICV for verification */ append_seq_fifo_load(desc, icvsize, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_ICV); } print_hex_dump_debug("chachapoly shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_chachapoly); /* For skcipher encrypt and decrypt, read from req->src and write to req->dst */ static inline void skcipher_append_src_dst(u32 *desc) { append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | KEY_VLF | FIFOLD_TYPE_MSG | FIFOLD_TYPE_LAST1); append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); } /** * cnstr_shdsc_skcipher_encap - skcipher encapsulation shared descriptor * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128 * - OP_ALG_ALGSEL_CHACHA20 * @ivsize: initialization vector size * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template * @ctx1_iv_off: IV offset in CONTEXT1 register */ void cnstr_shdsc_skcipher_encap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, const bool is_rfc3686, const u32 ctx1_iv_off) { u32 *key_jump_cmd; u32 options = cdata->algtype | OP_ALG_AS_INIT | OP_ALG_ENCRYPT; bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_CHACHA20); init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); /* Skip if already shared */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); /* Load class1 key only */ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); /* Load nonce into CONTEXT1 reg */ if (is_rfc3686) { const u8 *nonce = cdata->key_virt + cdata->keylen; append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, LDST_CLASS_IND_CCB | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) | (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); } set_jump_tgt_here(desc, key_jump_cmd); /* Load IV, if there is one */ if (ivsize) append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); /* Load counter into CONTEXT1 reg */ if (is_rfc3686) append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << LDST_OFFSET_SHIFT)); /* Load operation */ if (is_chacha20) options |= OP_ALG_AS_FINALIZE; append_operation(desc, options); /* Perform operation */ skcipher_append_src_dst(desc); /* Store IV */ if (!is_chacha20 && ivsize) append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); print_hex_dump_debug("skcipher enc shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_skcipher_encap); /** * cnstr_shdsc_skcipher_decap - skcipher decapsulation shared descriptor * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - one of OP_ALG_ALGSEL_{AES, DES, 3DES} ANDed * with OP_ALG_AAI_CBC or OP_ALG_AAI_CTR_MOD128 * - OP_ALG_ALGSEL_CHACHA20 * @ivsize: initialization vector size * @is_rfc3686: true when ctr(aes) is wrapped by rfc3686 template * @ctx1_iv_off: IV offset in CONTEXT1 register */ void cnstr_shdsc_skcipher_decap(u32 * const desc, struct alginfo *cdata, unsigned int ivsize, const bool is_rfc3686, const u32 ctx1_iv_off) { u32 *key_jump_cmd; bool is_chacha20 = ((cdata->algtype & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_CHACHA20); init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); /* Skip if already shared */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); /* Load class1 key only */ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); /* Load nonce into CONTEXT1 reg */ if (is_rfc3686) { const u8 *nonce = cdata->key_virt + cdata->keylen; append_load_as_imm(desc, nonce, CTR_RFC3686_NONCE_SIZE, LDST_CLASS_IND_CCB | LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM); append_move(desc, MOVE_WAITCOMP | MOVE_SRC_OUTFIFO | MOVE_DEST_CLASS1CTX | (16 << MOVE_OFFSET_SHIFT) | (CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT)); } set_jump_tgt_here(desc, key_jump_cmd); /* Load IV, if there is one */ if (ivsize) append_seq_load(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); /* Load counter into CONTEXT1 reg */ if (is_rfc3686) append_load_imm_be32(desc, 1, LDST_IMM | LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | ((ctx1_iv_off + CTR_RFC3686_IV_SIZE) << LDST_OFFSET_SHIFT)); /* Choose operation */ if (ctx1_iv_off) append_operation(desc, cdata->algtype | OP_ALG_AS_INIT | OP_ALG_DECRYPT); else append_dec_op1(desc, cdata->algtype); /* Perform operation */ skcipher_append_src_dst(desc); /* Store IV */ if (!is_chacha20 && ivsize) append_seq_store(desc, ivsize, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (ctx1_iv_off << LDST_OFFSET_SHIFT)); print_hex_dump_debug("skcipher dec shdesc@" __stringify(__LINE__)": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_skcipher_decap); /** * cnstr_shdsc_xts_skcipher_encap - xts skcipher encapsulation shared descriptor * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS. */ void cnstr_shdsc_xts_skcipher_encap(u32 * const desc, struct alginfo *cdata) { /* * Set sector size to a big value, practically disabling * sector size segmentation in xts implementation. We cannot * take full advantage of this HW feature with existing * crypto API / dm-crypt SW architecture. */ __be64 sector_size = cpu_to_be64(BIT(15)); u32 *key_jump_cmd; init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); /* Skip if already shared */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); /* Load class1 keys only */ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); /* Load sector size with index 40 bytes (0x28) */ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | (0x28 << LDST_OFFSET_SHIFT)); set_jump_tgt_here(desc, key_jump_cmd); /* * create sequence for loading the sector index / 16B tweak value * Lower 8B of IV - sector index / tweak lower half * Upper 8B of IV - upper half of 16B tweak */ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT)); append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (0x30 << LDST_OFFSET_SHIFT)); /* Load operation */ append_operation(desc, cdata->algtype | OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT); /* Perform operation */ skcipher_append_src_dst(desc); /* Store lower 8B and upper 8B of IV */ append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT)); append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (0x30 << LDST_OFFSET_SHIFT)); print_hex_dump_debug("xts skcipher enc shdesc@" __stringify(__LINE__) ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_encap); /** * cnstr_shdsc_xts_skcipher_decap - xts skcipher decapsulation shared descriptor * @desc: pointer to buffer used for descriptor construction * @cdata: pointer to block cipher transform definitions * Valid algorithm values - OP_ALG_ALGSEL_AES ANDed with OP_ALG_AAI_XTS. */ void cnstr_shdsc_xts_skcipher_decap(u32 * const desc, struct alginfo *cdata) { /* * Set sector size to a big value, practically disabling * sector size segmentation in xts implementation. We cannot * take full advantage of this HW feature with existing * crypto API / dm-crypt SW architecture. */ __be64 sector_size = cpu_to_be64(BIT(15)); u32 *key_jump_cmd; init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); /* Skip if already shared */ key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_SHRD); /* Load class1 key only */ append_key_as_imm(desc, cdata->key_virt, cdata->keylen, cdata->keylen, CLASS_1 | KEY_DEST_CLASS_REG); /* Load sector size with index 40 bytes (0x28) */ append_load_as_imm(desc, (void *)&sector_size, 8, LDST_CLASS_1_CCB | LDST_SRCDST_BYTE_CONTEXT | (0x28 << LDST_OFFSET_SHIFT)); set_jump_tgt_here(desc, key_jump_cmd); /* * create sequence for loading the sector index / 16B tweak value * Lower 8B of IV - sector index / tweak lower half * Upper 8B of IV - upper half of 16B tweak */ append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT)); append_seq_load(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (0x30 << LDST_OFFSET_SHIFT)); /* Load operation */ append_dec_op1(desc, cdata->algtype); /* Perform operation */ skcipher_append_src_dst(desc); /* Store lower 8B and upper 8B of IV */ append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (0x20 << LDST_OFFSET_SHIFT)); append_seq_store(desc, 8, LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB | (0x30 << LDST_OFFSET_SHIFT)); print_hex_dump_debug("xts skcipher dec shdesc@" __stringify(__LINE__) ": ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); } EXPORT_SYMBOL(cnstr_shdsc_xts_skcipher_decap); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("FSL CAAM descriptor support"); MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
linux-master
drivers/crypto/caam/caamalg_desc.c
// SPDX-License-Identifier: GPL-2.0+ /* * caam - Freescale FSL CAAM support for hw_random * * Copyright 2011 Freescale Semiconductor, Inc. * Copyright 2018-2019, 2023 NXP * * Based on caamalg.c crypto API driver. * */ #include <linux/hw_random.h> #include <linux/completion.h> #include <linux/atomic.h> #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/kfifo.h> #include "compat.h" #include "regs.h" #include "intern.h" #include "desc_constr.h" #include "jr.h" #include "error.h" #define CAAM_RNG_MAX_FIFO_STORE_SIZE 16 /* * Length of used descriptors, see caam_init_desc() */ #define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ + \ CAAM_CMD_SZ + \ CAAM_CMD_SZ + CAAM_PTR_SZ_MAX) /* rng per-device context */ struct caam_rng_ctx { struct hwrng rng; struct device *jrdev; struct device *ctrldev; void *desc_async; void *desc_sync; struct work_struct worker; struct kfifo fifo; }; struct caam_rng_job_ctx { struct completion *done; int *err; }; static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r) { return (struct caam_rng_ctx *)r->priv; } static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) { struct caam_rng_job_ctx *jctx = context; if (err) *jctx->err = caam_jr_strstatus(jrdev, err); complete(jctx->done); } static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma) { init_job_desc(desc, 0); /* + 1 cmd_sz */ /* Generate random bytes: + 1 cmd_sz */ append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG | OP_ALG_PR_ON); /* Store bytes: + 1 cmd_sz + caam_ptr_sz */ append_fifo_store(desc, dst_dma, CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE); print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); return desc; } static int caam_rng_read_one(struct device *jrdev, void *dst, int len, void *desc, struct completion *done) { dma_addr_t dst_dma; int err, ret = 0; struct caam_rng_job_ctx jctx = { .done = done, .err = &ret, }; len = CAAM_RNG_MAX_FIFO_STORE_SIZE; dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE); if (dma_mapping_error(jrdev, dst_dma)) { dev_err(jrdev, "unable to map destination memory\n"); return -ENOMEM; } init_completion(done); err = caam_jr_enqueue(jrdev, caam_init_desc(desc, dst_dma), caam_rng_done, &jctx); if (err == -EINPROGRESS) { wait_for_completion(done); err = 0; } dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE); return err ?: (ret ?: len); } static void caam_rng_fill_async(struct caam_rng_ctx *ctx) { struct scatterlist sg[1]; struct completion done; int len, nents; sg_init_table(sg, ARRAY_SIZE(sg)); nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg), CAAM_RNG_MAX_FIFO_STORE_SIZE); if (!nents) return; len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]), sg[0].length, ctx->desc_async, &done); if (len < 0) return; kfifo_dma_in_finish(&ctx->fifo, len); } static void caam_rng_worker(struct work_struct *work) { struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx, worker); caam_rng_fill_async(ctx); } static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait) { struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); int out; if (wait) { struct completion done; return caam_rng_read_one(ctx->jrdev, dst, max, ctx->desc_sync, &done); } out = kfifo_out(&ctx->fifo, dst, max); if (kfifo_is_empty(&ctx->fifo)) schedule_work(&ctx->worker); return out; } static void caam_cleanup(struct hwrng *rng) { struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); flush_work(&ctx->worker); caam_jr_free(ctx->jrdev); kfifo_free(&ctx->fifo); } #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST static inline void test_len(struct hwrng *rng, size_t len, bool wait) { u8 *buf; int read_len; struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); struct device *dev = ctx->ctrldev; buf = kcalloc(CAAM_RNG_MAX_FIFO_STORE_SIZE, sizeof(u8), GFP_KERNEL); while (len > 0) { read_len = rng->read(rng, buf, len, wait); if (read_len < 0 || (read_len == 0 && wait)) { dev_err(dev, "RNG Read FAILED received %d bytes\n", read_len); kfree(buf); return; } print_hex_dump_debug("random bytes@: ", DUMP_PREFIX_ADDRESS, 16, 4, buf, read_len, 1); len = len - read_len; } kfree(buf); } static inline void test_mode_once(struct hwrng *rng, bool wait) { test_len(rng, 32, wait); test_len(rng, 64, wait); test_len(rng, 128, wait); } static void self_test(struct hwrng *rng) { pr_info("Executing RNG SELF-TEST with wait\n"); test_mode_once(rng, true); } #endif static int caam_init(struct hwrng *rng) { struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); int err; ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN, GFP_KERNEL); if (!ctx->desc_sync) return -ENOMEM; ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN, GFP_KERNEL); if (!ctx->desc_async) return -ENOMEM; if (kfifo_alloc(&ctx->fifo, ALIGN(CAAM_RNG_MAX_FIFO_STORE_SIZE, dma_get_cache_alignment()), GFP_KERNEL)) return -ENOMEM; INIT_WORK(&ctx->worker, caam_rng_worker); ctx->jrdev = caam_jr_alloc(); err = PTR_ERR_OR_ZERO(ctx->jrdev); if (err) { kfifo_free(&ctx->fifo); pr_err("Job Ring Device allocation for transform failed\n"); return err; } /* * Fill async buffer to have early randomness data for * hw_random */ caam_rng_fill_async(ctx); return 0; } int caam_rng_init(struct device *ctrldev); void caam_rng_exit(struct device *ctrldev) { devres_release_group(ctrldev, caam_rng_init); } int caam_rng_init(struct device *ctrldev) { struct caam_rng_ctx *ctx; u32 rng_inst; struct caam_drv_private *priv = dev_get_drvdata(ctrldev); int ret; /* Check for an instantiated RNG before registration */ if (priv->era < 10) rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; else rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK; if (!rng_inst) return 0; if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL)) return -ENOMEM; ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->ctrldev = ctrldev; ctx->rng.name = "rng-caam"; ctx->rng.init = caam_init; ctx->rng.cleanup = caam_cleanup; ctx->rng.read = caam_read; ctx->rng.priv = (unsigned long)ctx; dev_info(ctrldev, "registering rng-caam\n"); ret = devm_hwrng_register(ctrldev, &ctx->rng); if (ret) { caam_rng_exit(ctrldev); return ret; } #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_TEST self_test(&ctx->rng); #endif devres_close_group(ctrldev, caam_rng_init); return 0; }
linux-master
drivers/crypto/caam/caamrng.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ #include <crypto/internal/aead.h> #include <crypto/authenc.h> #include <crypto/scatterwalk.h> #include <linux/dmapool.h> #include <linux/dma-mapping.h> #include "cc_buffer_mgr.h" #include "cc_lli_defs.h" #include "cc_cipher.h" #include "cc_hash.h" #include "cc_aead.h" union buffer_array_entry { struct scatterlist *sgl; dma_addr_t buffer_dma; }; struct buffer_array { unsigned int num_of_buffers; union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI]; unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI]; int nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI]; bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI]; u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; }; static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type) { switch (type) { case CC_DMA_BUF_NULL: return "BUF_NULL"; case CC_DMA_BUF_DLLI: return "BUF_DLLI"; case CC_DMA_BUF_MLLI: return "BUF_MLLI"; default: return "BUF_INVALID"; } } /** * cc_copy_mac() - Copy MAC to temporary location * * @dev: device object * @req: aead request object * @dir: [IN] copy from/to sgl */ static void cc_copy_mac(struct device *dev, struct aead_request *req, enum cc_sg_cpy_direct dir) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); u32 skip = req->assoclen + req->cryptlen; cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, (skip - areq_ctx->req_authsize), skip, dir); } /** * cc_get_sgl_nents() - Get scatterlist number of entries. * * @dev: Device object * @sg_list: SG list * @nbytes: [IN] Total SGL data bytes. * @lbytes: [OUT] Returns the amount of bytes at the last entry * * Return: * Number of entries in the scatterlist */ static unsigned int cc_get_sgl_nents(struct device *dev, struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes) { unsigned int nents = 0; *lbytes = 0; while (nbytes && sg_list) { nents++; /* get the number of bytes in the last entry */ *lbytes = nbytes; nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length; sg_list = sg_next(sg_list); } dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); return nents; } /** * cc_copy_sg_portion() - Copy scatter list data, * from to_skip to end, to dest and vice versa * * @dev: Device object * @dest: Buffer to copy to/from * @sg: SG list * @to_skip: Number of bytes to skip before copying * @end: Offset of last byte to copy * @direct: Transfer direction (true == from SG list to buffer, false == from * buffer to SG list) */ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, u32 to_skip, u32 end, enum cc_sg_cpy_direct direct) { u32 nents; nents = sg_nents_for_len(sg, end); sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip, (direct == CC_SG_TO_BUF)); } static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents, u32 **mlli_entry_pp) { u32 *mlli_entry_p = *mlli_entry_pp; u32 new_nents; /* Verify there is no memory overflow*/ new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1); if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) { dev_err(dev, "Too many mlli entries. current %d max %d\n", new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES); return -ENOMEM; } /*handle buffer longer than 64 kbytes */ while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) { cc_lli_set_addr(mlli_entry_p, buff_dma); cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE); dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET], mlli_entry_p[LLI_WORD1_OFFSET]); buff_dma += CC_MAX_MLLI_ENTRY_SIZE; buff_size -= CC_MAX_MLLI_ENTRY_SIZE; mlli_entry_p = mlli_entry_p + 2; (*curr_nents)++; } /*Last entry */ cc_lli_set_addr(mlli_entry_p, buff_dma); cc_lli_set_size(mlli_entry_p, buff_size); dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET], mlli_entry_p[LLI_WORD1_OFFSET]); mlli_entry_p = mlli_entry_p + 2; *mlli_entry_pp = mlli_entry_p; (*curr_nents)++; return 0; } static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl, u32 sgl_data_len, u32 sgl_offset, u32 *curr_nents, u32 **mlli_entry_pp) { struct scatterlist *curr_sgl = sgl; u32 *mlli_entry_p = *mlli_entry_pp; s32 rc = 0; for ( ; (curr_sgl && sgl_data_len); curr_sgl = sg_next(curr_sgl)) { u32 entry_data_len = (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ? sg_dma_len(curr_sgl) - sgl_offset : sgl_data_len; sgl_data_len -= entry_data_len; rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) + sgl_offset, entry_data_len, curr_nents, &mlli_entry_p); if (rc) return rc; sgl_offset = 0; } *mlli_entry_pp = mlli_entry_p; return 0; } static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data, struct mlli_params *mlli_params, gfp_t flags) { u32 *mlli_p; u32 total_nents = 0, prev_total_nents = 0; int rc = 0, i; dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers); /* Allocate memory from the pointed pool */ mlli_params->mlli_virt_addr = dma_pool_alloc(mlli_params->curr_pool, flags, &mlli_params->mlli_dma_addr); if (!mlli_params->mlli_virt_addr) { dev_err(dev, "dma_pool_alloc() failed\n"); rc = -ENOMEM; goto build_mlli_exit; } /* Point to start of MLLI */ mlli_p = mlli_params->mlli_virt_addr; /* go over all SG's and link it to one MLLI table */ for (i = 0; i < sg_data->num_of_buffers; i++) { union buffer_array_entry *entry = &sg_data->entry[i]; u32 tot_len = sg_data->total_data_len[i]; u32 offset = sg_data->offset[i]; rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset, &total_nents, &mlli_p); if (rc) return rc; /* set last bit in the current table */ if (sg_data->mlli_nents[i]) { /*Calculate the current MLLI table length for the *length field in the descriptor */ *sg_data->mlli_nents[i] += (total_nents - prev_total_nents); prev_total_nents = total_nents; } } /* Set MLLI size for the bypass operation */ mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE); dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n", mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr, mlli_params->mlli_len); build_mlli_exit: return rc; } static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, unsigned int nents, struct scatterlist *sgl, unsigned int data_len, unsigned int data_offset, bool is_last_table, u32 *mlli_nents) { unsigned int index = sgl_data->num_of_buffers; dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n", index, nents, sgl, data_len, is_last_table); sgl_data->nents[index] = nents; sgl_data->entry[index].sgl = sgl; sgl_data->offset[index] = data_offset; sgl_data->total_data_len[index] = data_len; sgl_data->is_last[index] = is_last_table; sgl_data->mlli_nents[index] = mlli_nents; if (sgl_data->mlli_nents[index]) *sgl_data->mlli_nents[index] = 0; sgl_data->num_of_buffers++; } static int cc_map_sg(struct device *dev, struct scatterlist *sg, unsigned int nbytes, int direction, u32 *nents, u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) { int ret = 0; if (!nbytes) { *mapped_nents = 0; *lbytes = 0; *nents = 0; return 0; } *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); if (*nents > max_sg_nents) { *nents = 0; dev_err(dev, "Too many fragments. current %d max %d\n", *nents, max_sg_nents); return -ENOMEM; } ret = dma_map_sg(dev, sg, *nents, direction); if (!ret) { *nents = 0; dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); return -ENOMEM; } *mapped_nents = ret; return 0; } static int cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx, u8 *config_data, struct buffer_array *sg_data, unsigned int assoclen) { dev_dbg(dev, " handle additional data config set to DLLI\n"); /* create sg for the current buffer */ sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size); if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) { dev_err(dev, "dma_map_sg() config buffer failed\n"); return -ENOMEM; } dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", &sg_dma_address(&areq_ctx->ccm_adata_sg), sg_page(&areq_ctx->ccm_adata_sg), sg_virt(&areq_ctx->ccm_adata_sg), areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length); /* prepare for case of MLLI */ if (assoclen > 0) { cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg, (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size), 0, false, NULL); } return 0; } static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx, u8 *curr_buff, u32 curr_buff_cnt, struct buffer_array *sg_data) { dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt); /* create sg for the current buffer */ sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt); if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) { dev_err(dev, "dma_map_sg() src buffer failed\n"); return -ENOMEM; } dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg), sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset, areq_ctx->buff_sg->length); areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; areq_ctx->curr_sg = areq_ctx->buff_sg; areq_ctx->in_nents = 0; /* prepare for case of MLLI */ cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0, false, NULL); return 0; } void cc_unmap_cipher_request(struct device *dev, void *ctx, unsigned int ivsize, struct scatterlist *src, struct scatterlist *dst) { struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; if (req_ctx->gen_ctx.iv_dma_addr) { dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n", &req_ctx->gen_ctx.iv_dma_addr, ivsize); dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, ivsize, DMA_BIDIRECTIONAL); } /* Release pool */ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI && req_ctx->mlli_params.mlli_virt_addr) { dma_pool_free(req_ctx->mlli_params.curr_pool, req_ctx->mlli_params.mlli_virt_addr, req_ctx->mlli_params.mlli_dma_addr); } if (src != dst) { dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE); dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE); dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); } else { dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); } } int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, unsigned int ivsize, unsigned int nbytes, void *info, struct scatterlist *src, struct scatterlist *dst, gfp_t flags) { struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; struct mlli_params *mlli_params = &req_ctx->mlli_params; struct device *dev = drvdata_to_dev(drvdata); struct buffer_array sg_data; u32 dummy = 0; int rc = 0; u32 mapped_nents = 0; int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; mlli_params->curr_pool = NULL; sg_data.num_of_buffers = 0; /* Map IV buffer */ if (ivsize) { dump_byte_array("iv", info, ivsize); req_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", ivsize, info); return -ENOMEM; } dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", ivsize, info, &req_ctx->gen_ctx.iv_dma_addr); } else { req_ctx->gen_ctx.iv_dma_addr = 0; } /* Map the src SGL */ rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto cipher_exit; if (mapped_nents > 1) req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; if (src == dst) { /* Handle inplace operation */ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { req_ctx->out_nents = 0; cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, nbytes, 0, true, &req_ctx->in_mlli_nents); } } else { /* Map the dst sg */ rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE, &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto cipher_exit; if (mapped_nents > 1) req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, nbytes, 0, true, &req_ctx->in_mlli_nents); cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst, nbytes, 0, true, &req_ctx->out_mlli_nents); } } if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { mlli_params->curr_pool = drvdata->mlli_buffs_pool; rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto cipher_exit; } dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n", cc_dma_buf_type(req_ctx->dma_buf_type)); return 0; cipher_exit: cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); return rc; } void cc_unmap_aead_request(struct device *dev, struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct cc_drvdata *drvdata = dev_get_drvdata(dev); int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); if (areq_ctx->mac_buf_dma_addr) { dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, MAX_MAC_SIZE, DMA_BIDIRECTIONAL); } if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { if (areq_ctx->hkey_dma_addr) { dma_unmap_single(dev, areq_ctx->hkey_dma_addr, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); } if (areq_ctx->gcm_block_len_dma_addr) { dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr, AES_BLOCK_SIZE, DMA_TO_DEVICE); } if (areq_ctx->gcm_iv_inc1_dma_addr) { dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr, AES_BLOCK_SIZE, DMA_TO_DEVICE); } if (areq_ctx->gcm_iv_inc2_dma_addr) { dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE, DMA_TO_DEVICE); } } if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { if (areq_ctx->ccm_iv0_dma_addr) { dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, AES_BLOCK_SIZE, DMA_TO_DEVICE); } dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE); } if (areq_ctx->gen_ctx.iv_dma_addr) { dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size, DMA_BIDIRECTIONAL); kfree_sensitive(areq_ctx->gen_ctx.iv); } /* Release pool */ if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && (areq_ctx->mlli_params.mlli_virt_addr)) { dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", &areq_ctx->mlli_params.mlli_dma_addr, areq_ctx->mlli_params.mlli_virt_addr); dma_pool_free(areq_ctx->mlli_params.curr_pool, areq_ctx->mlli_params.mlli_virt_addr, areq_ctx->mlli_params.mlli_dma_addr); } dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, areq_ctx->assoclen, req->cryptlen); dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); if (req->src != req->dst) { dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", sg_virt(req->dst)); dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); } if (drvdata->coherent && areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && req->src == req->dst) { /* copy back mac from temporary location to deal with possible * data memory overriding that caused by cache coherence * problem. */ cc_copy_mac(dev, req, CC_SG_FROM_BUF); } } static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize, u32 last_entry_data_size) { return ((sgl_nents > 1) && (last_entry_data_size < authsize)); } static int cc_aead_chain_iv(struct cc_drvdata *drvdata, struct aead_request *req, struct buffer_array *sg_data, bool is_last, bool do_chain) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct device *dev = drvdata_to_dev(drvdata); gfp_t flags = cc_gfp_flags(&req->base); int rc = 0; if (!req->iv) { areq_ctx->gen_ctx.iv_dma_addr = 0; areq_ctx->gen_ctx.iv = NULL; goto chain_iv_exit; } areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags); if (!areq_ctx->gen_ctx.iv) return -ENOMEM; areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", hw_iv_size, req->iv); kfree_sensitive(areq_ctx->gen_ctx.iv); areq_ctx->gen_ctx.iv = NULL; rc = -ENOMEM; goto chain_iv_exit; } dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); chain_iv_exit: return rc; } static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, struct aead_request *req, struct buffer_array *sg_data, bool is_last, bool do_chain) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc = 0; int mapped_nents = 0; struct device *dev = drvdata_to_dev(drvdata); if (!sg_data) { rc = -EINVAL; goto chain_assoc_exit; } if (areq_ctx->assoclen == 0) { areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL; areq_ctx->assoc.nents = 0; areq_ctx->assoc.mlli_nents = 0; dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n", cc_dma_buf_type(areq_ctx->assoc_buff_type), areq_ctx->assoc.nents); goto chain_assoc_exit; } mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen); if (mapped_nents < 0) return mapped_nents; if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { dev_err(dev, "Too many fragments. current %d max %d\n", mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); return -ENOMEM; } areq_ctx->assoc.nents = mapped_nents; /* in CCM case we have additional entry for * ccm header configurations */ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n", (areq_ctx->assoc.nents + 1), LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); rc = -ENOMEM; goto chain_assoc_exit; } } if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null) areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI; else areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n", cc_dma_buf_type(areq_ctx->assoc_buff_type), areq_ctx->assoc.nents); cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src, areq_ctx->assoclen, 0, is_last, &areq_ctx->assoc.mlli_nents); areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; } chain_assoc_exit: return rc; } static void cc_prepare_aead_data_dlli(struct aead_request *req, u32 *src_last_bytes, u32 *dst_last_bytes) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; struct scatterlist *sg; ssize_t offset; areq_ctx->is_icv_fragmented = false; if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) { sg = areq_ctx->src_sgl; offset = *src_last_bytes - authsize; } else { sg = areq_ctx->dst_sgl; offset = *dst_last_bytes - authsize; } areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset; areq_ctx->icv_virt_addr = sg_virt(sg) + offset; } static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, struct aead_request *req, struct buffer_array *sg_data, u32 *src_last_bytes, u32 *dst_last_bytes, bool is_last_table) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; struct device *dev = drvdata_to_dev(drvdata); struct scatterlist *sg; if (req->src == req->dst) { /*INPLACE*/ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, areq_ctx->src_sgl, areq_ctx->cryptlen, areq_ctx->src_offset, is_last_table, &areq_ctx->src.mlli_nents); areq_ctx->is_icv_fragmented = cc_is_icv_frag(areq_ctx->src.nents, authsize, *src_last_bytes); if (areq_ctx->is_icv_fragmented) { /* Backup happens only when ICV is fragmented, ICV * verification is made by CPU compare in order to * simplify MAC verification upon request completion */ if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { /* In coherent platforms (e.g. ACP) * already copying ICV for any * INPLACE-DECRYPT operation, hence * we must neglect this code. */ if (!drvdata->coherent) cc_copy_mac(dev, req, CC_SG_TO_BUF); areq_ctx->icv_virt_addr = areq_ctx->backup_mac; } else { areq_ctx->icv_virt_addr = areq_ctx->mac_buf; areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr; } } else { /* Contig. ICV */ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; /*Should hanlde if the sg is not contig.*/ areq_ctx->icv_dma_addr = sg_dma_address(sg) + (*src_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt(sg) + (*src_last_bytes - authsize); } } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { /*NON-INPLACE and DECRYPT*/ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, areq_ctx->src_sgl, areq_ctx->cryptlen, areq_ctx->src_offset, is_last_table, &areq_ctx->src.mlli_nents); cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, areq_ctx->dst_sgl, areq_ctx->cryptlen, areq_ctx->dst_offset, is_last_table, &areq_ctx->dst.mlli_nents); areq_ctx->is_icv_fragmented = cc_is_icv_frag(areq_ctx->src.nents, authsize, *src_last_bytes); /* Backup happens only when ICV is fragmented, ICV * verification is made by CPU compare in order to simplify * MAC verification upon request completion */ if (areq_ctx->is_icv_fragmented) { cc_copy_mac(dev, req, CC_SG_TO_BUF); areq_ctx->icv_virt_addr = areq_ctx->backup_mac; } else { /* Contig. ICV */ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; /*Should hanlde if the sg is not contig.*/ areq_ctx->icv_dma_addr = sg_dma_address(sg) + (*src_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt(sg) + (*src_last_bytes - authsize); } } else { /*NON-INPLACE and ENCRYPT*/ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, areq_ctx->dst_sgl, areq_ctx->cryptlen, areq_ctx->dst_offset, is_last_table, &areq_ctx->dst.mlli_nents); cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, areq_ctx->src_sgl, areq_ctx->cryptlen, areq_ctx->src_offset, is_last_table, &areq_ctx->src.mlli_nents); areq_ctx->is_icv_fragmented = cc_is_icv_frag(areq_ctx->dst.nents, authsize, *dst_last_bytes); if (!areq_ctx->is_icv_fragmented) { sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]; /* Contig. ICV */ areq_ctx->icv_dma_addr = sg_dma_address(sg) + (*dst_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt(sg) + (*dst_last_bytes - authsize); } else { areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr; areq_ctx->icv_virt_addr = areq_ctx->mac_buf; } } } static int cc_aead_chain_data(struct cc_drvdata *drvdata, struct aead_request *req, struct buffer_array *sg_data, bool is_last_table, bool do_chain) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); struct device *dev = drvdata_to_dev(drvdata); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; unsigned int src_last_bytes = 0, dst_last_bytes = 0; int rc = 0; u32 src_mapped_nents = 0, dst_mapped_nents = 0; u32 offset = 0; /* non-inplace mode */ unsigned int size_for_map = req->assoclen + req->cryptlen; u32 sg_index = 0; u32 size_to_skip = req->assoclen; struct scatterlist *sgl; offset = size_to_skip; if (!sg_data) return -EINVAL; areq_ctx->src_sgl = req->src; areq_ctx->dst_sgl = req->dst; size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0; src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, &src_last_bytes); sg_index = areq_ctx->src_sgl->length; //check where the data starts while (src_mapped_nents && (sg_index <= size_to_skip)) { src_mapped_nents--; offset -= areq_ctx->src_sgl->length; sgl = sg_next(areq_ctx->src_sgl); if (!sgl) break; areq_ctx->src_sgl = sgl; sg_index += areq_ctx->src_sgl->length; } if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { dev_err(dev, "Too many fragments. current %d max %d\n", src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); return -ENOMEM; } areq_ctx->src.nents = src_mapped_nents; areq_ctx->src_offset = offset; if (req->src != req->dst) { size_for_map = req->assoclen + req->cryptlen; if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) size_for_map += authsize; else size_for_map -= authsize; rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE, &areq_ctx->dst.mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, &dst_mapped_nents); if (rc) goto chain_data_exit; } dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, &dst_last_bytes); sg_index = areq_ctx->dst_sgl->length; offset = size_to_skip; //check where the data starts while (dst_mapped_nents && sg_index <= size_to_skip) { dst_mapped_nents--; offset -= areq_ctx->dst_sgl->length; sgl = sg_next(areq_ctx->dst_sgl); if (!sgl) break; areq_ctx->dst_sgl = sgl; sg_index += areq_ctx->dst_sgl->length; } if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { dev_err(dev, "Too many fragments. current %d max %d\n", dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); return -ENOMEM; } areq_ctx->dst.nents = dst_mapped_nents; areq_ctx->dst_offset = offset; if (src_mapped_nents > 1 || dst_mapped_nents > 1 || do_chain) { areq_ctx->data_buff_type = CC_DMA_BUF_MLLI; cc_prepare_aead_data_mlli(drvdata, req, sg_data, &src_last_bytes, &dst_last_bytes, is_last_table); } else { areq_ctx->data_buff_type = CC_DMA_BUF_DLLI; cc_prepare_aead_data_dlli(req, &src_last_bytes, &dst_last_bytes); } chain_data_exit: return rc; } static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata, struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); u32 curr_mlli_size = 0; if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr; curr_mlli_size = areq_ctx->assoc.mlli_nents * LLI_ENTRY_BYTE_SIZE; } if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { /*Inplace case dst nents equal to src nents*/ if (req->src == req->dst) { areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents; areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + curr_mlli_size; areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr; if (!areq_ctx->is_single_pass) areq_ctx->assoc.mlli_nents += areq_ctx->src.mlli_nents; } else { if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + curr_mlli_size; areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr + areq_ctx->src.mlli_nents * LLI_ENTRY_BYTE_SIZE; if (!areq_ctx->is_single_pass) areq_ctx->assoc.mlli_nents += areq_ctx->src.mlli_nents; } else { areq_ctx->dst.sram_addr = drvdata->mlli_sram_addr + curr_mlli_size; areq_ctx->src.sram_addr = areq_ctx->dst.sram_addr + areq_ctx->dst.mlli_nents * LLI_ENTRY_BYTE_SIZE; if (!areq_ctx->is_single_pass) areq_ctx->assoc.mlli_nents += areq_ctx->dst.mlli_nents; } } } } int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); struct mlli_params *mlli_params = &areq_ctx->mlli_params; struct device *dev = drvdata_to_dev(drvdata); struct buffer_array sg_data; unsigned int authsize = areq_ctx->req_authsize; int rc = 0; dma_addr_t dma_addr; u32 mapped_nents = 0; u32 dummy = 0; /*used for the assoc data fragments */ u32 size_to_map; gfp_t flags = cc_gfp_flags(&req->base); mlli_params->curr_pool = NULL; sg_data.num_of_buffers = 0; /* copy mac to a temporary location to deal with possible * data memory overriding that caused by cache coherence problem. */ if (drvdata->coherent && areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && req->src == req->dst) cc_copy_mac(dev, req, CC_SG_TO_BUF); /* cacluate the size for cipher remove ICV in decrypt*/ areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) ? req->cryptlen : (req->cryptlen - authsize); dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", MAX_MAC_SIZE, areq_ctx->mac_buf); rc = -ENOMEM; goto aead_map_failure; } areq_ctx->mac_buf_dma_addr = dma_addr; if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET; dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", AES_BLOCK_SIZE, addr); areq_ctx->ccm_iv0_dma_addr = 0; rc = -ENOMEM; goto aead_map_failure; } areq_ctx->ccm_iv0_dma_addr = dma_addr; rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, &sg_data, areq_ctx->assoclen); if (rc) goto aead_map_failure; } if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n", AES_BLOCK_SIZE, areq_ctx->hkey); rc = -ENOMEM; goto aead_map_failure; } areq_ctx->hkey_dma_addr = dma_addr; dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n", AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); rc = -ENOMEM; goto aead_map_failure; } areq_ctx->gcm_block_len_dma_addr = dma_addr; dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n", AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); areq_ctx->gcm_iv_inc1_dma_addr = 0; rc = -ENOMEM; goto aead_map_failure; } areq_ctx->gcm_iv_inc1_dma_addr = dma_addr; dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n", AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); areq_ctx->gcm_iv_inc2_dma_addr = 0; rc = -ENOMEM; goto aead_map_failure; } areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; } size_to_map = req->cryptlen + req->assoclen; /* If we do in-place encryption, we also need the auth tag */ if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && (req->src == req->dst)) { size_to_map += authsize; } rc = cc_map_sg(dev, req->src, size_to_map, (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL), &areq_ctx->src.mapped_nents, (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES), &dummy, &mapped_nents); if (rc) goto aead_map_failure; if (areq_ctx->is_single_pass) { /* * Create MLLI table for: * (1) Assoc. data * (2) Src/Dst SGLs * Note: IV is contg. buffer (not an SGL) */ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false); if (rc) goto aead_map_failure; rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false); if (rc) goto aead_map_failure; rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false); if (rc) goto aead_map_failure; } else { /* DOUBLE-PASS flow */ /* * Prepare MLLI table(s) in this order: * * If ENCRYPT/DECRYPT (inplace): * (1) MLLI table for assoc * (2) IV entry (chained right after end of assoc) * (3) MLLI for src/dst (inplace operation) * * If ENCRYPT (non-inplace) * (1) MLLI table for assoc * (2) IV entry (chained right after end of assoc) * (3) MLLI for dst * (4) MLLI for src * * If DECRYPT (non-inplace) * (1) MLLI table for assoc * (2) IV entry (chained right after end of assoc) * (3) MLLI for src * (4) MLLI for dst */ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true); if (rc) goto aead_map_failure; rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true); if (rc) goto aead_map_failure; rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true); if (rc) goto aead_map_failure; } /* Mlli support -start building the MLLI according to the above * results */ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { mlli_params->curr_pool = drvdata->mlli_buffs_pool; rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto aead_map_failure; cc_update_aead_mlli_nents(drvdata, req); dev_dbg(dev, "assoc params mn %d\n", areq_ctx->assoc.mlli_nents); dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents); dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents); } return 0; aead_map_failure: cc_unmap_aead_request(dev, req); return rc; } int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update, gfp_t flags) { struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; struct device *dev = drvdata_to_dev(drvdata); u8 *curr_buff = cc_hash_buf(areq_ctx); u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); struct mlli_params *mlli_params = &areq_ctx->mlli_params; struct buffer_array sg_data; int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n", curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); /* Init the type of the dma buffer */ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; mlli_params->curr_pool = NULL; sg_data.num_of_buffers = 0; areq_ctx->in_nents = 0; if (nbytes == 0 && *curr_buff_cnt == 0) { /* nothing to do */ return 0; } /* map the previous buffer */ if (*curr_buff_cnt) { rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, &sg_data); if (rc) return rc; } if (src && nbytes > 0 && do_update) { rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto unmap_curr_buff; if (src && mapped_nents == 1 && areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { memcpy(areq_ctx->buff_sg, src, sizeof(struct scatterlist)); areq_ctx->buff_sg->length = nbytes; areq_ctx->curr_sg = areq_ctx->buff_sg; areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; } else { areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; } } /*build mlli */ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { mlli_params->curr_pool = drvdata->mlli_buffs_pool; /* add the src data to the sg_data */ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, 0, true, &areq_ctx->mlli_nents); rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto fail_unmap_din; } /* change the buffer index for the unmap function */ areq_ctx->buff_index = (areq_ctx->buff_index ^ 1); dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n", cc_dma_buf_type(areq_ctx->data_dma_buf_type)); return 0; fail_unmap_din: dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); unmap_curr_buff: if (*curr_buff_cnt) dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); return rc; } int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size, gfp_t flags) { struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; struct device *dev = drvdata_to_dev(drvdata); u8 *curr_buff = cc_hash_buf(areq_ctx); u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); u8 *next_buff = cc_next_buf(areq_ctx); u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx); struct mlli_params *mlli_params = &areq_ctx->mlli_params; unsigned int update_data_len; u32 total_in_len = nbytes + *curr_buff_cnt; struct buffer_array sg_data; unsigned int swap_index = 0; int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n", curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); /* Init the type of the dma buffer */ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; mlli_params->curr_pool = NULL; areq_ctx->curr_sg = NULL; sg_data.num_of_buffers = 0; areq_ctx->in_nents = 0; if (total_in_len < block_size) { dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); areq_ctx->in_nents = sg_nents_for_len(src, nbytes); sg_copy_to_buffer(src, areq_ctx->in_nents, &curr_buff[*curr_buff_cnt], nbytes); *curr_buff_cnt += nbytes; return 1; } /* Calculate the residue size*/ *next_buff_cnt = total_in_len & (block_size - 1); /* update data len */ update_data_len = total_in_len - *next_buff_cnt; dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n", *next_buff_cnt, update_data_len); /* Copy the new residue to next buffer */ if (*next_buff_cnt) { dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n", next_buff, (update_data_len - *curr_buff_cnt), *next_buff_cnt); cc_copy_sg_portion(dev, next_buff, src, (update_data_len - *curr_buff_cnt), nbytes, CC_SG_TO_BUF); /* change the buffer index for next operation */ swap_index = 1; } if (*curr_buff_cnt) { rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, &sg_data); if (rc) return rc; /* change the buffer index for next operation */ swap_index = 1; } if (update_data_len > *curr_buff_cnt) { rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), DMA_TO_DEVICE, &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto unmap_curr_buff; if (mapped_nents == 1 && areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { /* only one entry in the SG and no previous data */ memcpy(areq_ctx->buff_sg, src, sizeof(struct scatterlist)); areq_ctx->buff_sg->length = update_data_len; areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; areq_ctx->curr_sg = areq_ctx->buff_sg; } else { areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; } } if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { mlli_params->curr_pool = drvdata->mlli_buffs_pool; /* add the src data to the sg_data */ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, (update_data_len - *curr_buff_cnt), 0, true, &areq_ctx->mlli_nents); rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto fail_unmap_din; } areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); return 0; fail_unmap_din: dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); unmap_curr_buff: if (*curr_buff_cnt) dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); return rc; } void cc_unmap_hash_request(struct device *dev, void *ctx, struct scatterlist *src, bool do_revert) { struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; u32 *prev_len = cc_next_buf_cnt(areq_ctx); /*In case a pool was set, a table was *allocated and should be released */ if (areq_ctx->mlli_params.curr_pool) { dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", &areq_ctx->mlli_params.mlli_dma_addr, areq_ctx->mlli_params.mlli_virt_addr); dma_pool_free(areq_ctx->mlli_params.curr_pool, areq_ctx->mlli_params.mlli_virt_addr, areq_ctx->mlli_params.mlli_dma_addr); } if (src && areq_ctx->in_nents) { dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n", sg_virt(src), &sg_dma_address(src), sg_dma_len(src)); dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); } if (*prev_len) { dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n", sg_virt(areq_ctx->buff_sg), &sg_dma_address(areq_ctx->buff_sg), sg_dma_len(areq_ctx->buff_sg)); dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); if (!do_revert) { /* clean the previous data length for update * operation */ *prev_len = 0; } else { areq_ctx->buff_index ^= 1; } } } int cc_buffer_mgr_init(struct cc_drvdata *drvdata) { struct device *dev = drvdata_to_dev(drvdata); drvdata->mlli_buffs_pool = dma_pool_create("dx_single_mlli_tables", dev, MAX_NUM_OF_TOTAL_MLLI_ENTRIES * LLI_ENTRY_BYTE_SIZE, MLLI_TABLE_MIN_ALIGNMENT, 0); if (!drvdata->mlli_buffs_pool) return -ENOMEM; return 0; } int cc_buffer_mgr_fini(struct cc_drvdata *drvdata) { dma_pool_destroy(drvdata->mlli_buffs_pool); return 0; }
linux-master
drivers/crypto/ccree/cc_buffer_mgr.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> #include "cc_driver.h" #include "cc_buffer_mgr.h" #include "cc_request_mgr.h" #include "cc_sram_mgr.h" #include "cc_hash.h" #include "cc_pm.h" #include "cc_fips.h" #define POWER_DOWN_ENABLE 0x01 #define POWER_DOWN_DISABLE 0x00 static int cc_pm_suspend(struct device *dev) { struct cc_drvdata *drvdata = dev_get_drvdata(dev); dev_dbg(dev, "set HOST_POWER_DOWN_EN\n"); fini_cc_regs(drvdata); cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_ENABLE); clk_disable_unprepare(drvdata->clk); return 0; } static int cc_pm_resume(struct device *dev) { int rc; struct cc_drvdata *drvdata = dev_get_drvdata(dev); dev_dbg(dev, "unset HOST_POWER_DOWN_EN\n"); /* Enables the device source clk */ rc = clk_prepare_enable(drvdata->clk); if (rc) { dev_err(dev, "failed getting clock back on. We're toast.\n"); return rc; } /* wait for Cryptocell reset completion */ if (!cc_wait_for_reset_completion(drvdata)) { dev_err(dev, "Cryptocell reset not completed"); clk_disable_unprepare(drvdata->clk); return -EBUSY; } cc_iowrite(drvdata, CC_REG(HOST_POWER_DOWN_EN), POWER_DOWN_DISABLE); rc = init_cc_regs(drvdata); if (rc) { dev_err(dev, "init_cc_regs (%x)\n", rc); clk_disable_unprepare(drvdata->clk); return rc; } /* check if tee fips error occurred during power down */ cc_tee_handle_fips_error(drvdata); cc_init_hash_sram(drvdata); return 0; } const struct dev_pm_ops ccree_pm = { SET_RUNTIME_PM_OPS(cc_pm_suspend, cc_pm_resume, NULL) }; int cc_pm_get(struct device *dev) { int rc = pm_runtime_get_sync(dev); if (rc < 0) { pm_runtime_put_noidle(dev); return rc; } return 0; } void cc_pm_put_suspend(struct device *dev) { pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); }
linux-master
drivers/crypto/ccree/cc_pm.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ #include "cc_driver.h" #include "cc_sram_mgr.h" /** * cc_sram_mgr_init() - Initializes SRAM pool. * The pool starts right at the beginning of SRAM. * Returns zero for success, negative value otherwise. * * @drvdata: Associated device driver context * * Return: * 0 for success, negative error code for failure. */ int cc_sram_mgr_init(struct cc_drvdata *drvdata) { u32 start = 0; struct device *dev = drvdata_to_dev(drvdata); if (drvdata->hw_rev < CC_HW_REV_712) { /* Pool starts after ROM bytes */ start = cc_ioread(drvdata, CC_REG(HOST_SEP_SRAM_THRESHOLD)); if ((start & 0x3) != 0) { dev_err(dev, "Invalid SRAM offset 0x%x\n", start); return -EINVAL; } } drvdata->sram_free_offset = start; return 0; } /** * cc_sram_alloc() - Allocate buffer from SRAM pool. * * @drvdata: Associated device driver context * @size: The requested numer of bytes to allocate * * Return: * Address offset in SRAM or NULL_SRAM_ADDR for failure. */ u32 cc_sram_alloc(struct cc_drvdata *drvdata, u32 size) { struct device *dev = drvdata_to_dev(drvdata); u32 p; if ((size & 0x3)) { dev_err(dev, "Requested buffer size (%u) is not multiple of 4", size); return NULL_SRAM_ADDR; } if (size > (CC_CC_SRAM_SIZE - drvdata->sram_free_offset)) { dev_err(dev, "Not enough space to allocate %u B (at offset %u)\n", size, drvdata->sram_free_offset); return NULL_SRAM_ADDR; } p = drvdata->sram_free_offset; drvdata->sram_free_offset += size; dev_dbg(dev, "Allocated %u B @ %u\n", size, p); return p; } /** * cc_set_sram_desc() - Create const descriptors sequence to * set values in given array into SRAM. * Note: each const value can't exceed word size. * * @src: A pointer to array of words to set as consts. * @dst: The target SRAM buffer to set into * @nelement: The number of words in "src" array * @seq: A pointer to the given IN/OUT descriptor sequence * @seq_len: A pointer to the given IN/OUT sequence length */ void cc_set_sram_desc(const u32 *src, u32 dst, unsigned int nelement, struct cc_hw_desc *seq, unsigned int *seq_len) { u32 i; unsigned int idx = *seq_len; for (i = 0; i < nelement; i++, idx++) { hw_desc_init(&seq[idx]); set_din_const(&seq[idx], src[i], sizeof(u32)); set_dout_sram(&seq[idx], dst + (i * sizeof(u32)), sizeof(u32)); set_flow_mode(&seq[idx], BYPASS); } *seq_len = idx; }
linux-master
drivers/crypto/ccree/cc_sram_mgr.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ #include <linux/kernel.h> #include <linux/fips.h> #include <linux/notifier.h> #include "cc_driver.h" #include "cc_fips.h" static void fips_dsr(unsigned long devarg); struct cc_fips_handle { struct tasklet_struct tasklet; struct notifier_block nb; struct cc_drvdata *drvdata; }; /* The function called once at driver entry point to check * whether TEE FIPS error occurred. */ static bool cc_get_tee_fips_status(struct cc_drvdata *drvdata) { u32 reg; reg = cc_ioread(drvdata, CC_REG(GPR_HOST)); /* Did the TEE report status? */ if (reg & CC_FIPS_SYNC_TEE_STATUS) /* Yes. Is it OK? */ return (reg & CC_FIPS_SYNC_MODULE_OK); /* No. It's either not in use or will be reported later */ return true; } /* * This function should push the FIPS REE library status towards the TEE library * by writing the error state to HOST_GPR0 register. */ void cc_set_ree_fips_status(struct cc_drvdata *drvdata, bool status) { int val = CC_FIPS_SYNC_REE_STATUS; if (drvdata->hw_rev < CC_HW_REV_712) return; val |= (status ? CC_FIPS_SYNC_MODULE_OK : CC_FIPS_SYNC_MODULE_ERROR); cc_iowrite(drvdata, CC_REG(HOST_GPR0), val); } /* Push REE side FIPS test failure to TEE side */ static int cc_ree_fips_failure(struct notifier_block *nb, unsigned long unused1, void *unused2) { struct cc_fips_handle *fips_h = container_of(nb, struct cc_fips_handle, nb); struct cc_drvdata *drvdata = fips_h->drvdata; struct device *dev = drvdata_to_dev(drvdata); cc_set_ree_fips_status(drvdata, false); dev_info(dev, "Notifying TEE of FIPS test failure...\n"); return NOTIFY_OK; } void cc_fips_fini(struct cc_drvdata *drvdata) { struct cc_fips_handle *fips_h = drvdata->fips_handle; if (drvdata->hw_rev < CC_HW_REV_712 || !fips_h) return; atomic_notifier_chain_unregister(&fips_fail_notif_chain, &fips_h->nb); /* Kill tasklet */ tasklet_kill(&fips_h->tasklet); drvdata->fips_handle = NULL; } void fips_handler(struct cc_drvdata *drvdata) { struct cc_fips_handle *fips_handle_ptr = drvdata->fips_handle; if (drvdata->hw_rev < CC_HW_REV_712) return; tasklet_schedule(&fips_handle_ptr->tasklet); } static inline void tee_fips_error(struct device *dev) { if (fips_enabled) panic("ccree: TEE reported cryptographic error in fips mode!\n"); else dev_err(dev, "TEE reported error!\n"); } /* * This function check if cryptocell tee fips error occurred * and in such case triggers system error */ void cc_tee_handle_fips_error(struct cc_drvdata *p_drvdata) { struct device *dev = drvdata_to_dev(p_drvdata); if (!cc_get_tee_fips_status(p_drvdata)) tee_fips_error(dev); } /* Deferred service handler, run as interrupt-fired tasklet */ static void fips_dsr(unsigned long devarg) { struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg; u32 irq, val; irq = (drvdata->irq & (CC_GPR0_IRQ_MASK)); if (irq) { cc_tee_handle_fips_error(drvdata); } /* after verifying that there is nothing to do, * unmask AXI completion interrupt. */ val = (CC_REG(HOST_IMR) & ~irq); cc_iowrite(drvdata, CC_REG(HOST_IMR), val); } /* The function called once at driver entry point .*/ int cc_fips_init(struct cc_drvdata *p_drvdata) { struct cc_fips_handle *fips_h; struct device *dev = drvdata_to_dev(p_drvdata); if (p_drvdata->hw_rev < CC_HW_REV_712) return 0; fips_h = devm_kzalloc(dev, sizeof(*fips_h), GFP_KERNEL); if (!fips_h) return -ENOMEM; p_drvdata->fips_handle = fips_h; dev_dbg(dev, "Initializing fips tasklet\n"); tasklet_init(&fips_h->tasklet, fips_dsr, (unsigned long)p_drvdata); fips_h->drvdata = p_drvdata; fips_h->nb.notifier_call = cc_ree_fips_failure; atomic_notifier_chain_register(&fips_fail_notif_chain, &fips_h->nb); cc_tee_handle_fips_error(p_drvdata); return 0; }
linux-master
drivers/crypto/ccree/cc_fips.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ #include <linux/kernel.h> #include <linux/nospec.h> #include "cc_driver.h" #include "cc_buffer_mgr.h" #include "cc_request_mgr.h" #include "cc_pm.h" #define CC_MAX_POLL_ITER 10 /* The highest descriptor count in used */ #define CC_MAX_DESC_SEQ_LEN 23 struct cc_req_mgr_handle { /* Request manager resources */ unsigned int hw_queue_size; /* HW capability */ unsigned int min_free_hw_slots; unsigned int max_used_sw_slots; struct cc_crypto_req req_queue[MAX_REQUEST_QUEUE_SIZE]; u32 req_queue_head; u32 req_queue_tail; u32 axi_completed; u32 q_free_slots; /* This lock protects access to HW register * that must be single request at a time */ spinlock_t hw_lock; struct cc_hw_desc compl_desc; u8 *dummy_comp_buff; dma_addr_t dummy_comp_buff_dma; /* backlog queue */ struct list_head backlog; unsigned int bl_len; spinlock_t bl_lock; /* protect backlog queue */ #ifdef COMP_IN_WQ struct workqueue_struct *workq; struct delayed_work compwork; #else struct tasklet_struct comptask; #endif }; struct cc_bl_item { struct cc_crypto_req creq; struct cc_hw_desc desc[CC_MAX_DESC_SEQ_LEN]; unsigned int len; struct list_head list; bool notif; }; static const u32 cc_cpp_int_masks[CC_CPP_NUM_ALGS][CC_CPP_NUM_SLOTS] = { { BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_0_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_1_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_2_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_3_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_4_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_5_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_6_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_AES_7_INT_BIT_SHIFT) }, { BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_0_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_1_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_2_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_3_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_4_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_5_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_6_INT_BIT_SHIFT), BIT(CC_HOST_IRR_REE_OP_ABORTED_SM_7_INT_BIT_SHIFT) } }; static void comp_handler(unsigned long devarg); #ifdef COMP_IN_WQ static void comp_work_handler(struct work_struct *work); #endif static inline u32 cc_cpp_int_mask(enum cc_cpp_alg alg, int slot) { alg = array_index_nospec(alg, CC_CPP_NUM_ALGS); slot = array_index_nospec(slot, CC_CPP_NUM_SLOTS); return cc_cpp_int_masks[alg][slot]; } void cc_req_mgr_fini(struct cc_drvdata *drvdata) { struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; struct device *dev = drvdata_to_dev(drvdata); if (!req_mgr_h) return; /* Not allocated */ if (req_mgr_h->dummy_comp_buff_dma) { dma_free_coherent(dev, sizeof(u32), req_mgr_h->dummy_comp_buff, req_mgr_h->dummy_comp_buff_dma); } dev_dbg(dev, "max_used_hw_slots=%d\n", (req_mgr_h->hw_queue_size - req_mgr_h->min_free_hw_slots)); dev_dbg(dev, "max_used_sw_slots=%d\n", req_mgr_h->max_used_sw_slots); #ifdef COMP_IN_WQ destroy_workqueue(req_mgr_h->workq); #else /* Kill tasklet */ tasklet_kill(&req_mgr_h->comptask); #endif kfree_sensitive(req_mgr_h); drvdata->request_mgr_handle = NULL; } int cc_req_mgr_init(struct cc_drvdata *drvdata) { struct cc_req_mgr_handle *req_mgr_h; struct device *dev = drvdata_to_dev(drvdata); int rc = 0; req_mgr_h = kzalloc(sizeof(*req_mgr_h), GFP_KERNEL); if (!req_mgr_h) { rc = -ENOMEM; goto req_mgr_init_err; } drvdata->request_mgr_handle = req_mgr_h; spin_lock_init(&req_mgr_h->hw_lock); spin_lock_init(&req_mgr_h->bl_lock); INIT_LIST_HEAD(&req_mgr_h->backlog); #ifdef COMP_IN_WQ dev_dbg(dev, "Initializing completion workqueue\n"); req_mgr_h->workq = create_singlethread_workqueue("ccree"); if (!req_mgr_h->workq) { dev_err(dev, "Failed creating work queue\n"); rc = -ENOMEM; goto req_mgr_init_err; } INIT_DELAYED_WORK(&req_mgr_h->compwork, comp_work_handler); #else dev_dbg(dev, "Initializing completion tasklet\n"); tasklet_init(&req_mgr_h->comptask, comp_handler, (unsigned long)drvdata); #endif req_mgr_h->hw_queue_size = cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_SRAM_SIZE)); dev_dbg(dev, "hw_queue_size=0x%08X\n", req_mgr_h->hw_queue_size); if (req_mgr_h->hw_queue_size < MIN_HW_QUEUE_SIZE) { dev_err(dev, "Invalid HW queue size = %u (Min. required is %u)\n", req_mgr_h->hw_queue_size, MIN_HW_QUEUE_SIZE); rc = -ENOMEM; goto req_mgr_init_err; } req_mgr_h->min_free_hw_slots = req_mgr_h->hw_queue_size; req_mgr_h->max_used_sw_slots = 0; /* Allocate DMA word for "dummy" completion descriptor use */ req_mgr_h->dummy_comp_buff = dma_alloc_coherent(dev, sizeof(u32), &req_mgr_h->dummy_comp_buff_dma, GFP_KERNEL); if (!req_mgr_h->dummy_comp_buff) { dev_err(dev, "Not enough memory to allocate DMA (%zu) dropped buffer\n", sizeof(u32)); rc = -ENOMEM; goto req_mgr_init_err; } /* Init. "dummy" completion descriptor */ hw_desc_init(&req_mgr_h->compl_desc); set_din_const(&req_mgr_h->compl_desc, 0, sizeof(u32)); set_dout_dlli(&req_mgr_h->compl_desc, req_mgr_h->dummy_comp_buff_dma, sizeof(u32), NS_BIT, 1); set_flow_mode(&req_mgr_h->compl_desc, BYPASS); set_queue_last_ind(drvdata, &req_mgr_h->compl_desc); return 0; req_mgr_init_err: cc_req_mgr_fini(drvdata); return rc; } static void enqueue_seq(struct cc_drvdata *drvdata, struct cc_hw_desc seq[], unsigned int seq_len) { int i, w; void __iomem *reg = drvdata->cc_base + CC_REG(DSCRPTR_QUEUE_WORD0); struct device *dev = drvdata_to_dev(drvdata); /* * We do indeed write all 6 command words to the same * register. The HW supports this. */ for (i = 0; i < seq_len; i++) { for (w = 0; w <= 5; w++) writel_relaxed(seq[i].word[w], reg); if (cc_dump_desc) dev_dbg(dev, "desc[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n", i, seq[i].word[0], seq[i].word[1], seq[i].word[2], seq[i].word[3], seq[i].word[4], seq[i].word[5]); } } /** * request_mgr_complete() - Completion will take place if and only if user * requested completion by cc_send_sync_request(). * * @dev: Device pointer * @dx_compl_h: The completion event to signal * @dummy: unused error code */ static void request_mgr_complete(struct device *dev, void *dx_compl_h, int dummy) { struct completion *this_compl = dx_compl_h; complete(this_compl); } static int cc_queues_status(struct cc_drvdata *drvdata, struct cc_req_mgr_handle *req_mgr_h, unsigned int total_seq_len) { unsigned long poll_queue; struct device *dev = drvdata_to_dev(drvdata); /* SW queue is checked only once as it will not * be changed during the poll because the spinlock_bh * is held by the thread */ if (((req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1)) == req_mgr_h->req_queue_tail) { dev_err(dev, "SW FIFO is full. req_queue_head=%d sw_fifo_len=%d\n", req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE); return -ENOSPC; } if (req_mgr_h->q_free_slots >= total_seq_len) return 0; /* Wait for space in HW queue. Poll constant num of iterations. */ for (poll_queue = 0; poll_queue < CC_MAX_POLL_ITER ; poll_queue++) { req_mgr_h->q_free_slots = cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT)); if (req_mgr_h->q_free_slots < req_mgr_h->min_free_hw_slots) req_mgr_h->min_free_hw_slots = req_mgr_h->q_free_slots; if (req_mgr_h->q_free_slots >= total_seq_len) { /* If there is enough place return */ return 0; } dev_dbg(dev, "HW FIFO is full. q_free_slots=%d total_seq_len=%d\n", req_mgr_h->q_free_slots, total_seq_len); } /* No room in the HW queue try again later */ dev_dbg(dev, "HW FIFO full, timeout. req_queue_head=%d sw_fifo_len=%d q_free_slots=%d total_seq_len=%d\n", req_mgr_h->req_queue_head, MAX_REQUEST_QUEUE_SIZE, req_mgr_h->q_free_slots, total_seq_len); return -ENOSPC; } /** * cc_do_send_request() - Enqueue caller request to crypto hardware. * Need to be called with HW lock held and PM running * * @drvdata: Associated device driver context * @cc_req: The request to enqueue * @desc: The crypto sequence * @len: The crypto sequence length * @add_comp: If "true": add an artificial dout DMA to mark completion * */ static void cc_do_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, struct cc_hw_desc *desc, unsigned int len, bool add_comp) { struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; unsigned int used_sw_slots; unsigned int total_seq_len = len; /*initial sequence length*/ struct device *dev = drvdata_to_dev(drvdata); used_sw_slots = ((req_mgr_h->req_queue_head - req_mgr_h->req_queue_tail) & (MAX_REQUEST_QUEUE_SIZE - 1)); if (used_sw_slots > req_mgr_h->max_used_sw_slots) req_mgr_h->max_used_sw_slots = used_sw_slots; /* Enqueue request - must be locked with HW lock*/ req_mgr_h->req_queue[req_mgr_h->req_queue_head] = *cc_req; req_mgr_h->req_queue_head = (req_mgr_h->req_queue_head + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); dev_dbg(dev, "Enqueue request head=%u\n", req_mgr_h->req_queue_head); /* * We are about to push command to the HW via the command registers * that may reference host memory. We need to issue a memory barrier * to make sure there are no outstanding memory writes */ wmb(); /* STAT_PHASE_4: Push sequence */ enqueue_seq(drvdata, desc, len); if (add_comp) { enqueue_seq(drvdata, &req_mgr_h->compl_desc, 1); total_seq_len++; } if (req_mgr_h->q_free_slots < total_seq_len) { /* This situation should never occur. Maybe indicating problem * with resuming power. Set the free slot count to 0 and hope * for the best. */ dev_err(dev, "HW free slot count mismatch."); req_mgr_h->q_free_slots = 0; } else { /* Update the free slots in HW queue */ req_mgr_h->q_free_slots -= total_seq_len; } } static void cc_enqueue_backlog(struct cc_drvdata *drvdata, struct cc_bl_item *bli) { struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; struct device *dev = drvdata_to_dev(drvdata); spin_lock_bh(&mgr->bl_lock); list_add_tail(&bli->list, &mgr->backlog); ++mgr->bl_len; dev_dbg(dev, "+++bl len: %d\n", mgr->bl_len); spin_unlock_bh(&mgr->bl_lock); tasklet_schedule(&mgr->comptask); } static void cc_proc_backlog(struct cc_drvdata *drvdata) { struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; struct cc_bl_item *bli; struct cc_crypto_req *creq; void *req; struct device *dev = drvdata_to_dev(drvdata); int rc; spin_lock(&mgr->bl_lock); while (mgr->bl_len) { bli = list_first_entry(&mgr->backlog, struct cc_bl_item, list); dev_dbg(dev, "---bl len: %d\n", mgr->bl_len); spin_unlock(&mgr->bl_lock); creq = &bli->creq; req = creq->user_arg; /* * Notify the request we're moving out of the backlog * but only if we haven't done so already. */ if (!bli->notif) { creq->user_cb(dev, req, -EINPROGRESS); bli->notif = true; } spin_lock(&mgr->hw_lock); rc = cc_queues_status(drvdata, mgr, bli->len); if (rc) { /* * There is still no room in the FIFO for * this request. Bail out. We'll return here * on the next completion irq. */ spin_unlock(&mgr->hw_lock); return; } cc_do_send_request(drvdata, &bli->creq, bli->desc, bli->len, false); spin_unlock(&mgr->hw_lock); /* Remove ourselves from the backlog list */ spin_lock(&mgr->bl_lock); list_del(&bli->list); --mgr->bl_len; kfree(bli); } spin_unlock(&mgr->bl_lock); } int cc_send_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, struct cc_hw_desc *desc, unsigned int len, struct crypto_async_request *req) { int rc; struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; struct device *dev = drvdata_to_dev(drvdata); bool backlog_ok = req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG; gfp_t flags = cc_gfp_flags(req); struct cc_bl_item *bli; rc = cc_pm_get(dev); if (rc) { dev_err(dev, "cc_pm_get returned %x\n", rc); return rc; } spin_lock_bh(&mgr->hw_lock); rc = cc_queues_status(drvdata, mgr, len); #ifdef CC_DEBUG_FORCE_BACKLOG if (backlog_ok) rc = -ENOSPC; #endif /* CC_DEBUG_FORCE_BACKLOG */ if (rc == -ENOSPC && backlog_ok) { spin_unlock_bh(&mgr->hw_lock); bli = kmalloc(sizeof(*bli), flags); if (!bli) { cc_pm_put_suspend(dev); return -ENOMEM; } memcpy(&bli->creq, cc_req, sizeof(*cc_req)); memcpy(&bli->desc, desc, len * sizeof(*desc)); bli->len = len; bli->notif = false; cc_enqueue_backlog(drvdata, bli); return -EBUSY; } if (!rc) { cc_do_send_request(drvdata, cc_req, desc, len, false); rc = -EINPROGRESS; } spin_unlock_bh(&mgr->hw_lock); return rc; } int cc_send_sync_request(struct cc_drvdata *drvdata, struct cc_crypto_req *cc_req, struct cc_hw_desc *desc, unsigned int len) { int rc; struct device *dev = drvdata_to_dev(drvdata); struct cc_req_mgr_handle *mgr = drvdata->request_mgr_handle; init_completion(&cc_req->seq_compl); cc_req->user_cb = request_mgr_complete; cc_req->user_arg = &cc_req->seq_compl; rc = cc_pm_get(dev); if (rc) { dev_err(dev, "cc_pm_get returned %x\n", rc); return rc; } while (true) { spin_lock_bh(&mgr->hw_lock); rc = cc_queues_status(drvdata, mgr, len + 1); if (!rc) break; spin_unlock_bh(&mgr->hw_lock); wait_for_completion_interruptible(&drvdata->hw_queue_avail); reinit_completion(&drvdata->hw_queue_avail); } cc_do_send_request(drvdata, cc_req, desc, len, true); spin_unlock_bh(&mgr->hw_lock); wait_for_completion(&cc_req->seq_compl); return 0; } /** * send_request_init() - Enqueue caller request to crypto hardware during init * process. * Assume this function is not called in the middle of a flow, * since we set QUEUE_LAST_IND flag in the last descriptor. * * @drvdata: Associated device driver context * @desc: The crypto sequence * @len: The crypto sequence length * * Return: * Returns "0" upon success */ int send_request_init(struct cc_drvdata *drvdata, struct cc_hw_desc *desc, unsigned int len) { struct cc_req_mgr_handle *req_mgr_h = drvdata->request_mgr_handle; unsigned int total_seq_len = len; /*initial sequence length*/ int rc = 0; /* Wait for space in HW and SW FIFO. Poll for as much as FIFO_TIMEOUT. */ rc = cc_queues_status(drvdata, req_mgr_h, total_seq_len); if (rc) return rc; set_queue_last_ind(drvdata, &desc[(len - 1)]); /* * We are about to push command to the HW via the command registers * that may reference host memory. We need to issue a memory barrier * to make sure there are no outstanding memory writes */ wmb(); enqueue_seq(drvdata, desc, len); /* Update the free slots in HW queue */ req_mgr_h->q_free_slots = cc_ioread(drvdata, CC_REG(DSCRPTR_QUEUE_CONTENT)); return 0; } void complete_request(struct cc_drvdata *drvdata) { struct cc_req_mgr_handle *request_mgr_handle = drvdata->request_mgr_handle; complete(&drvdata->hw_queue_avail); #ifdef COMP_IN_WQ queue_delayed_work(request_mgr_handle->workq, &request_mgr_handle->compwork, 0); #else tasklet_schedule(&request_mgr_handle->comptask); #endif } #ifdef COMP_IN_WQ static void comp_work_handler(struct work_struct *work) { struct cc_drvdata *drvdata = container_of(work, struct cc_drvdata, compwork.work); comp_handler((unsigned long)drvdata); } #endif static void proc_completions(struct cc_drvdata *drvdata) { struct cc_crypto_req *cc_req; struct device *dev = drvdata_to_dev(drvdata); struct cc_req_mgr_handle *request_mgr_handle = drvdata->request_mgr_handle; unsigned int *tail = &request_mgr_handle->req_queue_tail; unsigned int *head = &request_mgr_handle->req_queue_head; int rc; u32 mask; while (request_mgr_handle->axi_completed) { request_mgr_handle->axi_completed--; /* Dequeue request */ if (*head == *tail) { /* We are supposed to handle a completion but our * queue is empty. This is not normal. Return and * hope for the best. */ dev_err(dev, "Request queue is empty head == tail %u\n", *head); break; } cc_req = &request_mgr_handle->req_queue[*tail]; if (cc_req->cpp.is_cpp) { dev_dbg(dev, "CPP request completion slot: %d alg:%d\n", cc_req->cpp.slot, cc_req->cpp.alg); mask = cc_cpp_int_mask(cc_req->cpp.alg, cc_req->cpp.slot); rc = (drvdata->irq & mask ? -EPERM : 0); dev_dbg(dev, "Got mask: %x irq: %x rc: %d\n", mask, drvdata->irq, rc); } else { dev_dbg(dev, "None CPP request completion\n"); rc = 0; } if (cc_req->user_cb) cc_req->user_cb(dev, cc_req->user_arg, rc); *tail = (*tail + 1) & (MAX_REQUEST_QUEUE_SIZE - 1); dev_dbg(dev, "Dequeue request tail=%u\n", *tail); dev_dbg(dev, "Request completed. axi_completed=%d\n", request_mgr_handle->axi_completed); cc_pm_put_suspend(dev); } } static inline u32 cc_axi_comp_count(struct cc_drvdata *drvdata) { return FIELD_GET(AXIM_MON_COMP_VALUE, cc_ioread(drvdata, drvdata->axim_mon_offset)); } /* Deferred service handler, run as interrupt-fired tasklet */ static void comp_handler(unsigned long devarg) { struct cc_drvdata *drvdata = (struct cc_drvdata *)devarg; struct cc_req_mgr_handle *request_mgr_handle = drvdata->request_mgr_handle; struct device *dev = drvdata_to_dev(drvdata); u32 irq; dev_dbg(dev, "Completion handler called!\n"); irq = (drvdata->irq & drvdata->comp_mask); /* To avoid the interrupt from firing as we unmask it, * we clear it now */ cc_iowrite(drvdata, CC_REG(HOST_ICR), irq); /* Avoid race with above clear: Test completion counter once more */ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); dev_dbg(dev, "AXI completion after updated: %d\n", request_mgr_handle->axi_completed); while (request_mgr_handle->axi_completed) { do { drvdata->irq |= cc_ioread(drvdata, CC_REG(HOST_IRR)); irq = (drvdata->irq & drvdata->comp_mask); proc_completions(drvdata); /* At this point (after proc_completions()), * request_mgr_handle->axi_completed is 0. */ request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); } while (request_mgr_handle->axi_completed > 0); cc_iowrite(drvdata, CC_REG(HOST_ICR), irq); request_mgr_handle->axi_completed += cc_axi_comp_count(drvdata); } /* after verifying that there is nothing to do, * unmask AXI completion interrupt */ cc_iowrite(drvdata, CC_REG(HOST_IMR), cc_ioread(drvdata, CC_REG(HOST_IMR)) & ~drvdata->comp_mask); cc_proc_backlog(drvdata); dev_dbg(dev, "Comp. handler done.\n"); }
linux-master
drivers/crypto/ccree/cc_request_mgr.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ #include <linux/kernel.h> #include <linux/module.h> #include <crypto/algapi.h> #include <crypto/internal/aead.h> #include <crypto/authenc.h> #include <crypto/gcm.h> #include <linux/rtnetlink.h> #include <crypto/internal/des.h> #include "cc_driver.h" #include "cc_buffer_mgr.h" #include "cc_aead.h" #include "cc_request_mgr.h" #include "cc_hash.h" #include "cc_sram_mgr.h" #define template_aead template_u.aead #define MAX_AEAD_SETKEY_SEQ 12 #define MAX_AEAD_PROCESS_SEQ 23 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE) #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE) #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE struct cc_aead_handle { u32 sram_workspace_addr; struct list_head aead_list; }; struct cc_hmac_s { u8 *padded_authkey; u8 *ipad_opad; /* IPAD, OPAD*/ dma_addr_t padded_authkey_dma_addr; dma_addr_t ipad_opad_dma_addr; }; struct cc_xcbc_s { u8 *xcbc_keys; /* K1,K2,K3 */ dma_addr_t xcbc_keys_dma_addr; }; struct cc_aead_ctx { struct cc_drvdata *drvdata; u8 ctr_nonce[MAX_NONCE_SIZE]; /* used for ctr3686 iv and aes ccm */ u8 *enckey; dma_addr_t enckey_dma_addr; union { struct cc_hmac_s hmac; struct cc_xcbc_s xcbc; } auth_state; unsigned int enc_keylen; unsigned int auth_keylen; unsigned int authsize; /* Actual (reduced?) size of the MAC/ICv */ unsigned int hash_len; enum drv_cipher_mode cipher_mode; enum cc_flow_mode flow_mode; enum drv_hash_mode auth_mode; }; static void cc_aead_exit(struct crypto_aead *tfm) { struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm), crypto_tfm_alg_name(&tfm->base)); /* Unmap enckey buffer */ if (ctx->enckey) { dma_free_coherent(dev, AES_MAX_KEY_SIZE, ctx->enckey, ctx->enckey_dma_addr); dev_dbg(dev, "Freed enckey DMA buffer enckey_dma_addr=%pad\n", &ctx->enckey_dma_addr); ctx->enckey_dma_addr = 0; ctx->enckey = NULL; } if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc; if (xcbc->xcbc_keys) { dma_free_coherent(dev, CC_AES_128_BIT_KEY_SIZE * 3, xcbc->xcbc_keys, xcbc->xcbc_keys_dma_addr); } dev_dbg(dev, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n", &xcbc->xcbc_keys_dma_addr); xcbc->xcbc_keys_dma_addr = 0; xcbc->xcbc_keys = NULL; } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC auth. */ struct cc_hmac_s *hmac = &ctx->auth_state.hmac; if (hmac->ipad_opad) { dma_free_coherent(dev, 2 * MAX_HMAC_DIGEST_SIZE, hmac->ipad_opad, hmac->ipad_opad_dma_addr); dev_dbg(dev, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n", &hmac->ipad_opad_dma_addr); hmac->ipad_opad_dma_addr = 0; hmac->ipad_opad = NULL; } if (hmac->padded_authkey) { dma_free_coherent(dev, MAX_HMAC_BLOCK_SIZE, hmac->padded_authkey, hmac->padded_authkey_dma_addr); dev_dbg(dev, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n", &hmac->padded_authkey_dma_addr); hmac->padded_authkey_dma_addr = 0; hmac->padded_authkey = NULL; } } } static unsigned int cc_get_aead_hash_len(struct crypto_aead *tfm) { struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); return cc_get_default_hash_len(ctx->drvdata); } static int cc_aead_init(struct crypto_aead *tfm) { struct aead_alg *alg = crypto_aead_alg(tfm); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct cc_crypto_alg *cc_alg = container_of(alg, struct cc_crypto_alg, aead_alg); struct device *dev = drvdata_to_dev(cc_alg->drvdata); dev_dbg(dev, "Initializing context @%p for %s\n", ctx, crypto_tfm_alg_name(&tfm->base)); /* Initialize modes in instance */ ctx->cipher_mode = cc_alg->cipher_mode; ctx->flow_mode = cc_alg->flow_mode; ctx->auth_mode = cc_alg->auth_mode; ctx->drvdata = cc_alg->drvdata; crypto_aead_set_reqsize_dma(tfm, sizeof(struct aead_req_ctx)); /* Allocate key buffer, cache line aligned */ ctx->enckey = dma_alloc_coherent(dev, AES_MAX_KEY_SIZE, &ctx->enckey_dma_addr, GFP_KERNEL); if (!ctx->enckey) { dev_err(dev, "Failed allocating key buffer\n"); goto init_failed; } dev_dbg(dev, "Allocated enckey buffer in context ctx->enckey=@%p\n", ctx->enckey); /* Set default authlen value */ if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { /* XCBC authetication */ struct cc_xcbc_s *xcbc = &ctx->auth_state.xcbc; const unsigned int key_size = CC_AES_128_BIT_KEY_SIZE * 3; /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */ /* (and temporary for user key - up to 256b) */ xcbc->xcbc_keys = dma_alloc_coherent(dev, key_size, &xcbc->xcbc_keys_dma_addr, GFP_KERNEL); if (!xcbc->xcbc_keys) { dev_err(dev, "Failed allocating buffer for XCBC keys\n"); goto init_failed; } } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC authentication */ struct cc_hmac_s *hmac = &ctx->auth_state.hmac; const unsigned int digest_size = 2 * MAX_HMAC_DIGEST_SIZE; dma_addr_t *pkey_dma = &hmac->padded_authkey_dma_addr; /* Allocate dma-coherent buffer for IPAD + OPAD */ hmac->ipad_opad = dma_alloc_coherent(dev, digest_size, &hmac->ipad_opad_dma_addr, GFP_KERNEL); if (!hmac->ipad_opad) { dev_err(dev, "Failed allocating IPAD/OPAD buffer\n"); goto init_failed; } dev_dbg(dev, "Allocated authkey buffer in context ctx->authkey=@%p\n", hmac->ipad_opad); hmac->padded_authkey = dma_alloc_coherent(dev, MAX_HMAC_BLOCK_SIZE, pkey_dma, GFP_KERNEL); if (!hmac->padded_authkey) { dev_err(dev, "failed to allocate padded_authkey\n"); goto init_failed; } } else { ctx->auth_state.hmac.ipad_opad = NULL; ctx->auth_state.hmac.padded_authkey = NULL; } ctx->hash_len = cc_get_aead_hash_len(tfm); return 0; init_failed: cc_aead_exit(tfm); return -ENOMEM; } static void cc_aead_complete(struct device *dev, void *cc_req, int err) { struct aead_request *areq = (struct aead_request *)cc_req; struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq); struct crypto_aead *tfm = crypto_aead_reqtfm(cc_req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); /* BACKLOG notification */ if (err == -EINPROGRESS) goto done; cc_unmap_aead_request(dev, areq); /* Restore ordinary iv pointer */ areq->iv = areq_ctx->backup_iv; if (err) goto done; if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { if (memcmp(areq_ctx->mac_buf, areq_ctx->icv_virt_addr, ctx->authsize) != 0) { dev_dbg(dev, "Payload authentication failure, (auth-size=%d, cipher=%d)\n", ctx->authsize, ctx->cipher_mode); /* In case of payload authentication failure, MUST NOT * revealed the decrypted message --> zero its memory. */ sg_zero_buffer(areq->dst, sg_nents(areq->dst), areq->cryptlen, areq->assoclen); err = -EBADMSG; } /*ENCRYPT*/ } else if (areq_ctx->is_icv_fragmented) { u32 skip = areq->cryptlen + areq_ctx->dst_offset; cc_copy_sg_portion(dev, areq_ctx->mac_buf, areq_ctx->dst_sgl, skip, (skip + ctx->authsize), CC_SG_FROM_BUF); } done: aead_request_complete(areq, err); } static unsigned int xcbc_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx) { /* Load the AES key */ hw_desc_init(&desc[0]); /* We are using for the source/user key the same buffer * as for the output keys, * because after this key loading it * is not needed anymore */ set_din_type(&desc[0], DMA_DLLI, ctx->auth_state.xcbc.xcbc_keys_dma_addr, ctx->auth_keylen, NS_BIT); set_cipher_mode(&desc[0], DRV_CIPHER_ECB); set_cipher_config0(&desc[0], DRV_CRYPTO_DIRECTION_ENCRYPT); set_key_size_aes(&desc[0], ctx->auth_keylen); set_flow_mode(&desc[0], S_DIN_to_AES); set_setup_mode(&desc[0], SETUP_LOAD_KEY0); hw_desc_init(&desc[1]); set_din_const(&desc[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[1], DIN_AES_DOUT); set_dout_dlli(&desc[1], ctx->auth_state.xcbc.xcbc_keys_dma_addr, AES_KEYSIZE_128, NS_BIT, 0); hw_desc_init(&desc[2]); set_din_const(&desc[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[2], DIN_AES_DOUT); set_dout_dlli(&desc[2], (ctx->auth_state.xcbc.xcbc_keys_dma_addr + AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT, 0); hw_desc_init(&desc[3]); set_din_const(&desc[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[3], DIN_AES_DOUT); set_dout_dlli(&desc[3], (ctx->auth_state.xcbc.xcbc_keys_dma_addr + 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT, 0); return 4; } static unsigned int hmac_setkey(struct cc_hw_desc *desc, struct cc_aead_ctx *ctx) { unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST }; unsigned int digest_ofs = 0; unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ? DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256; unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE; struct cc_hmac_s *hmac = &ctx->auth_state.hmac; unsigned int idx = 0; int i; /* calc derived HMAC key */ for (i = 0; i < 2; i++) { /* Load hash initial state */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_din_sram(&desc[idx], cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode), digest_size); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); idx++; /* Load the hash current length*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_din_const(&desc[idx], 0, ctx->hash_len); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; /* Prepare ipad key */ hw_desc_init(&desc[idx]); set_xor_val(&desc[idx], hmac_pad_const[i]); set_cipher_mode(&desc[idx], hash_mode); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); idx++; /* Perform HASH update */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, hmac->padded_authkey_dma_addr, SHA256_BLOCK_SIZE, NS_BIT); set_cipher_mode(&desc[idx], hash_mode); set_xor_active(&desc[idx]); set_flow_mode(&desc[idx], DIN_HASH); idx++; /* Get the digset */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_dout_dlli(&desc[idx], (hmac->ipad_opad_dma_addr + digest_ofs), digest_size, NS_BIT, 0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); idx++; digest_ofs += digest_size; } return idx; } static int validate_keys_sizes(struct cc_aead_ctx *ctx) { struct device *dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "enc_keylen=%u authkeylen=%u\n", ctx->enc_keylen, ctx->auth_keylen); switch (ctx->auth_mode) { case DRV_HASH_SHA1: case DRV_HASH_SHA256: break; case DRV_HASH_XCBC_MAC: if (ctx->auth_keylen != AES_KEYSIZE_128 && ctx->auth_keylen != AES_KEYSIZE_192 && ctx->auth_keylen != AES_KEYSIZE_256) return -ENOTSUPP; break; case DRV_HASH_NULL: /* Not authenc (e.g., CCM) - no auth_key) */ if (ctx->auth_keylen > 0) return -EINVAL; break; default: dev_dbg(dev, "Invalid auth_mode=%d\n", ctx->auth_mode); return -EINVAL; } /* Check cipher key size */ if (ctx->flow_mode == S_DIN_to_DES) { if (ctx->enc_keylen != DES3_EDE_KEY_SIZE) { dev_dbg(dev, "Invalid cipher(3DES) key size: %u\n", ctx->enc_keylen); return -EINVAL; } } else { /* Default assumed to be AES ciphers */ if (ctx->enc_keylen != AES_KEYSIZE_128 && ctx->enc_keylen != AES_KEYSIZE_192 && ctx->enc_keylen != AES_KEYSIZE_256) { dev_dbg(dev, "Invalid cipher(AES) key size: %u\n", ctx->enc_keylen); return -EINVAL; } } return 0; /* All tests of keys sizes passed */ } /* This function prepers the user key so it can pass to the hmac processing * (copy to intenral buffer or hash in case of key longer than block */ static int cc_get_plain_hmac_key(struct crypto_aead *tfm, const u8 *authkey, unsigned int keylen) { dma_addr_t key_dma_addr = 0; struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); u32 larval_addr; struct cc_crypto_req cc_req = {}; unsigned int blocksize; unsigned int digestsize; unsigned int hashmode; unsigned int idx = 0; int rc = 0; u8 *key = NULL; struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; dma_addr_t padded_authkey_dma_addr = ctx->auth_state.hmac.padded_authkey_dma_addr; switch (ctx->auth_mode) { /* auth_key required and >0 */ case DRV_HASH_SHA1: blocksize = SHA1_BLOCK_SIZE; digestsize = SHA1_DIGEST_SIZE; hashmode = DRV_HASH_HW_SHA1; break; case DRV_HASH_SHA256: default: blocksize = SHA256_BLOCK_SIZE; digestsize = SHA256_DIGEST_SIZE; hashmode = DRV_HASH_HW_SHA256; } if (keylen != 0) { key = kmemdup(authkey, keylen, GFP_KERNEL); if (!key) return -ENOMEM; key_dma_addr = dma_map_single(dev, key, keylen, DMA_TO_DEVICE); if (dma_mapping_error(dev, key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", key, keylen); kfree_sensitive(key); return -ENOMEM; } if (keylen > blocksize) { /* Load hash initial state */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hashmode); larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->auth_mode); set_din_sram(&desc[idx], larval_addr, digestsize); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); idx++; /* Load the hash current length*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hashmode); set_din_const(&desc[idx], 0, ctx->hash_len); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, keylen, NS_BIT); set_flow_mode(&desc[idx], DIN_HASH); idx++; /* Get hashed key */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hashmode); set_dout_dlli(&desc[idx], padded_authkey_dma_addr, digestsize, NS_BIT, 0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN); idx++; hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0, (blocksize - digestsize)); set_flow_mode(&desc[idx], BYPASS); set_dout_dlli(&desc[idx], (padded_authkey_dma_addr + digestsize), (blocksize - digestsize), NS_BIT, 0); idx++; } else { hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, key_dma_addr, keylen, NS_BIT); set_flow_mode(&desc[idx], BYPASS); set_dout_dlli(&desc[idx], padded_authkey_dma_addr, keylen, NS_BIT, 0); idx++; if ((blocksize - keylen) != 0) { hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0, (blocksize - keylen)); set_flow_mode(&desc[idx], BYPASS); set_dout_dlli(&desc[idx], (padded_authkey_dma_addr + keylen), (blocksize - keylen), NS_BIT, 0); idx++; } } } else { hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0, (blocksize - keylen)); set_flow_mode(&desc[idx], BYPASS); set_dout_dlli(&desc[idx], padded_authkey_dma_addr, blocksize, NS_BIT, 0); idx++; } rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx); if (rc) dev_err(dev, "send_request() failed (rc=%d)\n", rc); if (key_dma_addr) dma_unmap_single(dev, key_dma_addr, keylen, DMA_TO_DEVICE); kfree_sensitive(key); return rc; } static int cc_aead_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct cc_crypto_req cc_req = {}; struct cc_hw_desc desc[MAX_AEAD_SETKEY_SEQ]; unsigned int seq_len = 0; struct device *dev = drvdata_to_dev(ctx->drvdata); const u8 *enckey, *authkey; int rc; dev_dbg(dev, "Setting key in context @%p for %s. key=%p keylen=%u\n", ctx, crypto_tfm_alg_name(crypto_aead_tfm(tfm)), key, keylen); /* STAT_PHASE_0: Init and sanity checks */ if (ctx->auth_mode != DRV_HASH_NULL) { /* authenc() alg. */ struct crypto_authenc_keys keys; rc = crypto_authenc_extractkeys(&keys, key, keylen); if (rc) return rc; enckey = keys.enckey; authkey = keys.authkey; ctx->enc_keylen = keys.enckeylen; ctx->auth_keylen = keys.authkeylen; if (ctx->cipher_mode == DRV_CIPHER_CTR) { /* the nonce is stored in bytes at end of key */ if (ctx->enc_keylen < (AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE)) return -EINVAL; /* Copy nonce from last 4 bytes in CTR key to * first 4 bytes in CTR IV */ memcpy(ctx->ctr_nonce, enckey + ctx->enc_keylen - CTR_RFC3686_NONCE_SIZE, CTR_RFC3686_NONCE_SIZE); /* Set CTR key size */ ctx->enc_keylen -= CTR_RFC3686_NONCE_SIZE; } } else { /* non-authenc - has just one key */ enckey = key; authkey = NULL; ctx->enc_keylen = keylen; ctx->auth_keylen = 0; } rc = validate_keys_sizes(ctx); if (rc) return rc; /* STAT_PHASE_1: Copy key to ctx */ /* Get key material */ memcpy(ctx->enckey, enckey, ctx->enc_keylen); if (ctx->enc_keylen == 24) memset(ctx->enckey + 24, 0, CC_AES_KEY_SIZE_MAX - 24); if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { memcpy(ctx->auth_state.xcbc.xcbc_keys, authkey, ctx->auth_keylen); } else if (ctx->auth_mode != DRV_HASH_NULL) { /* HMAC */ rc = cc_get_plain_hmac_key(tfm, authkey, ctx->auth_keylen); if (rc) return rc; } /* STAT_PHASE_2: Create sequence */ switch (ctx->auth_mode) { case DRV_HASH_SHA1: case DRV_HASH_SHA256: seq_len = hmac_setkey(desc, ctx); break; case DRV_HASH_XCBC_MAC: seq_len = xcbc_setkey(desc, ctx); break; case DRV_HASH_NULL: /* non-authenc modes, e.g., CCM */ break; /* No auth. key setup */ default: dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode); return -ENOTSUPP; } /* STAT_PHASE_3: Submit sequence to HW */ if (seq_len > 0) { /* For CCM there is no sequence to setup the key */ rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, seq_len); if (rc) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); return rc; } } /* Update STAT_PHASE_3 */ return rc; } static int cc_des3_aead_setkey(struct crypto_aead *aead, const u8 *key, unsigned int keylen) { struct crypto_authenc_keys keys; int err; err = crypto_authenc_extractkeys(&keys, key, keylen); if (unlikely(err)) return err; err = verify_aead_des3_key(aead, keys.enckey, keys.enckeylen) ?: cc_aead_setkey(aead, key, keylen); memzero_explicit(&keys, sizeof(keys)); return err; } static int cc_rfc4309_ccm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); if (keylen < 3) return -EINVAL; keylen -= 3; memcpy(ctx->ctr_nonce, key + keylen, 3); return cc_aead_setkey(tfm, key, keylen); } static int cc_aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc); struct device *dev = drvdata_to_dev(ctx->drvdata); /* Unsupported auth. sizes */ if (authsize == 0 || authsize > crypto_aead_maxauthsize(authenc)) { return -ENOTSUPP; } ctx->authsize = authsize; dev_dbg(dev, "authlen=%d\n", ctx->authsize); return 0; } static int cc_rfc4309_ccm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } return cc_aead_setauthsize(authenc, authsize); } static int cc_ccm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { switch (authsize) { case 4: case 6: case 8: case 10: case 12: case 14: case 16: break; default: return -EINVAL; } return cc_aead_setauthsize(authenc, authsize); } static void cc_set_assoc_desc(struct aead_request *areq, unsigned int flow_mode, struct cc_hw_desc desc[], unsigned int *seq_size) { struct crypto_aead *tfm = crypto_aead_reqtfm(areq); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq); enum cc_req_dma_buf_type assoc_dma_type = areq_ctx->assoc_buff_type; unsigned int idx = *seq_size; struct device *dev = drvdata_to_dev(ctx->drvdata); switch (assoc_dma_type) { case CC_DMA_BUF_DLLI: dev_dbg(dev, "ASSOC buffer type DLLI\n"); hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq->src), areq_ctx->assoclen, NS_BIT); set_flow_mode(&desc[idx], flow_mode); if (ctx->auth_mode == DRV_HASH_XCBC_MAC && areq_ctx->cryptlen > 0) set_din_not_last_indication(&desc[idx]); break; case CC_DMA_BUF_MLLI: dev_dbg(dev, "ASSOC buffer type MLLI\n"); hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_MLLI, areq_ctx->assoc.sram_addr, areq_ctx->assoc.mlli_nents, NS_BIT); set_flow_mode(&desc[idx], flow_mode); if (ctx->auth_mode == DRV_HASH_XCBC_MAC && areq_ctx->cryptlen > 0) set_din_not_last_indication(&desc[idx]); break; case CC_DMA_BUF_NULL: default: dev_err(dev, "Invalid ASSOC buffer type\n"); } *seq_size = (++idx); } static void cc_proc_authen_desc(struct aead_request *areq, unsigned int flow_mode, struct cc_hw_desc desc[], unsigned int *seq_size, int direct) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq); enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type; unsigned int idx = *seq_size; struct crypto_aead *tfm = crypto_aead_reqtfm(areq); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); switch (data_dma_type) { case CC_DMA_BUF_DLLI: { struct scatterlist *cipher = (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? areq_ctx->dst_sgl : areq_ctx->src_sgl; unsigned int offset = (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? areq_ctx->dst_offset : areq_ctx->src_offset; dev_dbg(dev, "AUTHENC: SRC/DST buffer type DLLI\n"); hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, (sg_dma_address(cipher) + offset), areq_ctx->cryptlen, NS_BIT); set_flow_mode(&desc[idx], flow_mode); break; } case CC_DMA_BUF_MLLI: { /* DOUBLE-PASS flow (as default) * assoc. + iv + data -compact in one table * if assoclen is ZERO only IV perform */ u32 mlli_addr = areq_ctx->assoc.sram_addr; u32 mlli_nents = areq_ctx->assoc.mlli_nents; if (areq_ctx->is_single_pass) { if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { mlli_addr = areq_ctx->dst.sram_addr; mlli_nents = areq_ctx->dst.mlli_nents; } else { mlli_addr = areq_ctx->src.sram_addr; mlli_nents = areq_ctx->src.mlli_nents; } } dev_dbg(dev, "AUTHENC: SRC/DST buffer type MLLI\n"); hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_MLLI, mlli_addr, mlli_nents, NS_BIT); set_flow_mode(&desc[idx], flow_mode); break; } case CC_DMA_BUF_NULL: default: dev_err(dev, "AUTHENC: Invalid SRC/DST buffer type\n"); } *seq_size = (++idx); } static void cc_proc_cipher_desc(struct aead_request *areq, unsigned int flow_mode, struct cc_hw_desc desc[], unsigned int *seq_size) { unsigned int idx = *seq_size; struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(areq); enum cc_req_dma_buf_type data_dma_type = areq_ctx->data_buff_type; struct crypto_aead *tfm = crypto_aead_reqtfm(areq); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); if (areq_ctx->cryptlen == 0) return; /*null processing*/ switch (data_dma_type) { case CC_DMA_BUF_DLLI: dev_dbg(dev, "CIPHER: SRC/DST buffer type DLLI\n"); hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, (sg_dma_address(areq_ctx->src_sgl) + areq_ctx->src_offset), areq_ctx->cryptlen, NS_BIT); set_dout_dlli(&desc[idx], (sg_dma_address(areq_ctx->dst_sgl) + areq_ctx->dst_offset), areq_ctx->cryptlen, NS_BIT, 0); set_flow_mode(&desc[idx], flow_mode); break; case CC_DMA_BUF_MLLI: dev_dbg(dev, "CIPHER: SRC/DST buffer type MLLI\n"); hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_MLLI, areq_ctx->src.sram_addr, areq_ctx->src.mlli_nents, NS_BIT); set_dout_mlli(&desc[idx], areq_ctx->dst.sram_addr, areq_ctx->dst.mlli_nents, NS_BIT, 0); set_flow_mode(&desc[idx], flow_mode); break; case CC_DMA_BUF_NULL: default: dev_err(dev, "CIPHER: Invalid SRC/DST buffer type\n"); } *seq_size = (++idx); } static void cc_proc_digest_desc(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); unsigned int idx = *seq_size; unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ? DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256; int direct = req_ctx->gen_ctx.op_type; /* Get final ICV result */ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { hw_desc_init(&desc[idx]); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_dout_dlli(&desc[idx], req_ctx->icv_dma_addr, ctx->authsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { set_aes_not_hash_mode(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); } else { set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN); set_cipher_mode(&desc[idx], hash_mode); } } else { /*Decrypt*/ /* Get ICV out from hardware */ hw_desc_init(&desc[idx]); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN); set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); if (ctx->auth_mode == DRV_HASH_XCBC_MAC) { set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); set_aes_not_hash_mode(&desc[idx]); } else { set_cipher_mode(&desc[idx], hash_mode); } } *seq_size = (++idx); } static void cc_set_cipher_desc(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); unsigned int hw_iv_size = req_ctx->hw_iv_size; unsigned int idx = *seq_size; int direct = req_ctx->gen_ctx.op_type; /* Setup cipher state */ hw_desc_init(&desc[idx]); set_cipher_config0(&desc[idx], direct); set_flow_mode(&desc[idx], ctx->flow_mode); set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr, hw_iv_size, NS_BIT); if (ctx->cipher_mode == DRV_CIPHER_CTR) set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); else set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); set_cipher_mode(&desc[idx], ctx->cipher_mode); idx++; /* Setup enc. key */ hw_desc_init(&desc[idx]); set_cipher_config0(&desc[idx], direct); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); set_flow_mode(&desc[idx], ctx->flow_mode); if (ctx->flow_mode == S_DIN_to_AES) { set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX : ctx->enc_keylen), NS_BIT); set_key_size_aes(&desc[idx], ctx->enc_keylen); } else { set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, ctx->enc_keylen, NS_BIT); set_key_size_des(&desc[idx], ctx->enc_keylen); } set_cipher_mode(&desc[idx], ctx->cipher_mode); idx++; *seq_size = idx; } static void cc_proc_cipher(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size, unsigned int data_flow_mode) { struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); int direct = req_ctx->gen_ctx.op_type; unsigned int idx = *seq_size; if (req_ctx->cryptlen == 0) return; /*null processing*/ cc_set_cipher_desc(req, desc, &idx); cc_proc_cipher_desc(req, data_flow_mode, desc, &idx); if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { /* We must wait for DMA to write all cipher */ hw_desc_init(&desc[idx]); set_din_no_dma(&desc[idx], 0, 0xfffff0); set_dout_no_dma(&desc[idx], 0, 0, 1); idx++; } *seq_size = idx; } static void cc_set_hmac_desc(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ? DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256; unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE; unsigned int idx = *seq_size; /* Loading hash ipad xor key state */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_din_type(&desc[idx], DMA_DLLI, ctx->auth_state.hmac.ipad_opad_dma_addr, digest_size, NS_BIT); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); idx++; /* Load init. digest len (64 bytes) */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode), ctx->hash_len); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; *seq_size = idx; } static void cc_set_xcbc_desc(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); unsigned int idx = *seq_size; /* Loading MAC state */ hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0, CC_AES_BLOCK_SIZE); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_aes_not_hash_mode(&desc[idx]); idx++; /* Setup XCBC MAC K1 */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, ctx->auth_state.xcbc.xcbc_keys_dma_addr, AES_KEYSIZE_128, NS_BIT); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_aes_not_hash_mode(&desc[idx]); idx++; /* Setup XCBC MAC K2 */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, (ctx->auth_state.xcbc.xcbc_keys_dma_addr + AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT); set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_aes_not_hash_mode(&desc[idx]); idx++; /* Setup XCBC MAC K3 */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, (ctx->auth_state.xcbc.xcbc_keys_dma_addr + 2 * AES_KEYSIZE_128), AES_KEYSIZE_128, NS_BIT); set_setup_mode(&desc[idx], SETUP_LOAD_STATE2); set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_aes_not_hash_mode(&desc[idx]); idx++; *seq_size = idx; } static void cc_proc_header_desc(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); unsigned int idx = *seq_size; /* Hash associated data */ if (areq_ctx->assoclen > 0) cc_set_assoc_desc(req, DIN_HASH, desc, &idx); /* Hash IV */ *seq_size = idx; } static void cc_proc_scheme_desc(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct cc_aead_handle *aead_handle = ctx->drvdata->aead_handle; unsigned int hash_mode = (ctx->auth_mode == DRV_HASH_SHA1) ? DRV_HASH_HW_SHA1 : DRV_HASH_HW_SHA256; unsigned int digest_size = (ctx->auth_mode == DRV_HASH_SHA1) ? CC_SHA1_DIGEST_SIZE : CC_SHA256_DIGEST_SIZE; unsigned int idx = *seq_size; hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr, ctx->hash_len); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); set_cipher_do(&desc[idx], DO_PAD); idx++; /* Get final ICV result */ hw_desc_init(&desc[idx]); set_dout_sram(&desc[idx], aead_handle->sram_workspace_addr, digest_size); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN); set_cipher_mode(&desc[idx], hash_mode); idx++; /* Loading hash opad xor key state */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_din_type(&desc[idx], DMA_DLLI, (ctx->auth_state.hmac.ipad_opad_dma_addr + digest_size), digest_size, NS_BIT); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); idx++; /* Load init. digest len (64 bytes) */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], hash_mode); set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, hash_mode), ctx->hash_len); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; /* Perform HASH update */ hw_desc_init(&desc[idx]); set_din_sram(&desc[idx], aead_handle->sram_workspace_addr, digest_size); set_flow_mode(&desc[idx], DIN_HASH); idx++; *seq_size = idx; } static void cc_mlli_to_sram(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); if ((req_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || req_ctx->data_buff_type == CC_DMA_BUF_MLLI || !req_ctx->is_single_pass) && req_ctx->mlli_params.mlli_len) { dev_dbg(dev, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n", ctx->drvdata->mlli_sram_addr, req_ctx->mlli_params.mlli_len); /* Copy MLLI table host-to-sram */ hw_desc_init(&desc[*seq_size]); set_din_type(&desc[*seq_size], DMA_DLLI, req_ctx->mlli_params.mlli_dma_addr, req_ctx->mlli_params.mlli_len, NS_BIT); set_dout_sram(&desc[*seq_size], ctx->drvdata->mlli_sram_addr, req_ctx->mlli_params.mlli_len); set_flow_mode(&desc[*seq_size], BYPASS); (*seq_size)++; } } static enum cc_flow_mode cc_get_data_flow(enum drv_crypto_direction direct, enum cc_flow_mode setup_flow_mode, bool is_single_pass) { enum cc_flow_mode data_flow_mode; if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { if (setup_flow_mode == S_DIN_to_AES) data_flow_mode = is_single_pass ? AES_to_HASH_and_DOUT : DIN_AES_DOUT; else data_flow_mode = is_single_pass ? DES_to_HASH_and_DOUT : DIN_DES_DOUT; } else { /* Decrypt */ if (setup_flow_mode == S_DIN_to_AES) data_flow_mode = is_single_pass ? AES_and_HASH : DIN_AES_DOUT; else data_flow_mode = is_single_pass ? DES_and_HASH : DIN_DES_DOUT; } return data_flow_mode; } static void cc_hmac_authenc(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); int direct = req_ctx->gen_ctx.op_type; unsigned int data_flow_mode = cc_get_data_flow(direct, ctx->flow_mode, req_ctx->is_single_pass); if (req_ctx->is_single_pass) { /* * Single-pass flow */ cc_set_hmac_desc(req, desc, seq_size); cc_set_cipher_desc(req, desc, seq_size); cc_proc_header_desc(req, desc, seq_size); cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size); cc_proc_scheme_desc(req, desc, seq_size); cc_proc_digest_desc(req, desc, seq_size); return; } /* * Double-pass flow * Fallback for unsupported single-pass modes, * i.e. using assoc. data of non-word-multiple */ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { /* encrypt first.. */ cc_proc_cipher(req, desc, seq_size, data_flow_mode); /* authenc after..*/ cc_set_hmac_desc(req, desc, seq_size); cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct); cc_proc_scheme_desc(req, desc, seq_size); cc_proc_digest_desc(req, desc, seq_size); } else { /*DECRYPT*/ /* authenc first..*/ cc_set_hmac_desc(req, desc, seq_size); cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct); cc_proc_scheme_desc(req, desc, seq_size); /* decrypt after.. */ cc_proc_cipher(req, desc, seq_size, data_flow_mode); /* read the digest result with setting the completion bit * must be after the cipher operation */ cc_proc_digest_desc(req, desc, seq_size); } } static void cc_xcbc_authenc(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); int direct = req_ctx->gen_ctx.op_type; unsigned int data_flow_mode = cc_get_data_flow(direct, ctx->flow_mode, req_ctx->is_single_pass); if (req_ctx->is_single_pass) { /* * Single-pass flow */ cc_set_xcbc_desc(req, desc, seq_size); cc_set_cipher_desc(req, desc, seq_size); cc_proc_header_desc(req, desc, seq_size); cc_proc_cipher_desc(req, data_flow_mode, desc, seq_size); cc_proc_digest_desc(req, desc, seq_size); return; } /* * Double-pass flow * Fallback for unsupported single-pass modes, * i.e. using assoc. data of non-word-multiple */ if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) { /* encrypt first.. */ cc_proc_cipher(req, desc, seq_size, data_flow_mode); /* authenc after.. */ cc_set_xcbc_desc(req, desc, seq_size); cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct); cc_proc_digest_desc(req, desc, seq_size); } else { /*DECRYPT*/ /* authenc first.. */ cc_set_xcbc_desc(req, desc, seq_size); cc_proc_authen_desc(req, DIN_HASH, desc, seq_size, direct); /* decrypt after..*/ cc_proc_cipher(req, desc, seq_size, data_flow_mode); /* read the digest result with setting the completion bit * must be after the cipher operation */ cc_proc_digest_desc(req, desc, seq_size); } } static int validate_data_size(struct cc_aead_ctx *ctx, enum drv_crypto_direction direct, struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); struct device *dev = drvdata_to_dev(ctx->drvdata); unsigned int assoclen = areq_ctx->assoclen; unsigned int cipherlen = (direct == DRV_CRYPTO_DIRECTION_DECRYPT) ? (req->cryptlen - ctx->authsize) : req->cryptlen; if (direct == DRV_CRYPTO_DIRECTION_DECRYPT && req->cryptlen < ctx->authsize) goto data_size_err; areq_ctx->is_single_pass = true; /*defaulted to fast flow*/ switch (ctx->flow_mode) { case S_DIN_to_AES: if (ctx->cipher_mode == DRV_CIPHER_CBC && !IS_ALIGNED(cipherlen, AES_BLOCK_SIZE)) goto data_size_err; if (ctx->cipher_mode == DRV_CIPHER_CCM) break; if (ctx->cipher_mode == DRV_CIPHER_GCTR) { if (areq_ctx->plaintext_authenticate_only) areq_ctx->is_single_pass = false; break; } if (!IS_ALIGNED(assoclen, sizeof(u32))) areq_ctx->is_single_pass = false; if (ctx->cipher_mode == DRV_CIPHER_CTR && !IS_ALIGNED(cipherlen, sizeof(u32))) areq_ctx->is_single_pass = false; break; case S_DIN_to_DES: if (!IS_ALIGNED(cipherlen, DES_BLOCK_SIZE)) goto data_size_err; if (!IS_ALIGNED(assoclen, DES_BLOCK_SIZE)) areq_ctx->is_single_pass = false; break; default: dev_err(dev, "Unexpected flow mode (%d)\n", ctx->flow_mode); goto data_size_err; } return 0; data_size_err: return -EINVAL; } static unsigned int format_ccm_a0(u8 *pa0_buff, u32 header_size) { unsigned int len = 0; if (header_size == 0) return 0; if (header_size < ((1UL << 16) - (1UL << 8))) { len = 2; pa0_buff[0] = (header_size >> 8) & 0xFF; pa0_buff[1] = header_size & 0xFF; } else { len = 6; pa0_buff[0] = 0xFF; pa0_buff[1] = 0xFE; pa0_buff[2] = (header_size >> 24) & 0xFF; pa0_buff[3] = (header_size >> 16) & 0xFF; pa0_buff[4] = (header_size >> 8) & 0xFF; pa0_buff[5] = header_size & 0xFF; } return len; } static int set_msg_len(u8 *block, unsigned int msglen, unsigned int csize) { __be32 data; memset(block, 0, csize); block += csize; if (csize >= 4) csize = 4; else if (msglen > (1 << (8 * csize))) return -EOVERFLOW; data = cpu_to_be32(msglen); memcpy(block - csize, (u8 *)&data + 4 - csize, csize); return 0; } static int cc_ccm(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); unsigned int idx = *seq_size; unsigned int cipher_flow_mode; dma_addr_t mac_result; if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { cipher_flow_mode = AES_to_HASH_and_DOUT; mac_result = req_ctx->mac_buf_dma_addr; } else { /* Encrypt */ cipher_flow_mode = AES_and_HASH; mac_result = req_ctx->icv_dma_addr; } /* load key */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_CTR); set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX : ctx->enc_keylen), NS_BIT); set_key_size_aes(&desc[idx], ctx->enc_keylen); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; /* load ctr state */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_CTR); set_key_size_aes(&desc[idx], ctx->enc_keylen); set_din_type(&desc[idx], DMA_DLLI, req_ctx->gen_ctx.iv_dma_addr, AES_BLOCK_SIZE, NS_BIT); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; /* load MAC key */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC); set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, ((ctx->enc_keylen == 24) ? CC_AES_KEY_SIZE_MAX : ctx->enc_keylen), NS_BIT); set_key_size_aes(&desc[idx], ctx->enc_keylen); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_aes_not_hash_mode(&desc[idx]); idx++; /* load MAC state */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC); set_key_size_aes(&desc[idx], ctx->enc_keylen); set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE, NS_BIT); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_aes_not_hash_mode(&desc[idx]); idx++; /* process assoc data */ if (req_ctx->assoclen > 0) { cc_set_assoc_desc(req, DIN_HASH, desc, &idx); } else { hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(&req_ctx->ccm_adata_sg), AES_BLOCK_SIZE + req_ctx->ccm_hdr_size, NS_BIT); set_flow_mode(&desc[idx], DIN_HASH); idx++; } /* process the cipher */ if (req_ctx->cryptlen) cc_proc_cipher_desc(req, cipher_flow_mode, desc, &idx); /* Read temporal MAC */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_CBC_MAC); set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, ctx->authsize, NS_BIT, 0); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_cipher_config0(&desc[idx], HASH_DIGEST_RESULT_LITTLE_ENDIAN); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_aes_not_hash_mode(&desc[idx]); idx++; /* load AES-CTR state (for last MAC calculation)*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_CTR); set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); set_din_type(&desc[idx], DMA_DLLI, req_ctx->ccm_iv0_dma_addr, AES_BLOCK_SIZE, NS_BIT); set_key_size_aes(&desc[idx], ctx->enc_keylen); set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; hw_desc_init(&desc[idx]); set_din_no_dma(&desc[idx], 0, 0xfffff0); set_dout_no_dma(&desc[idx], 0, 0, 1); idx++; /* encrypt the "T" value and store MAC in mac_state */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr, ctx->authsize, NS_BIT); set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); set_flow_mode(&desc[idx], DIN_AES_DOUT); idx++; *seq_size = idx; return 0; } static int config_ccm_adata(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); //unsigned int size_of_a = 0, rem_a_size = 0; unsigned int lp = req->iv[0]; /* Note: The code assume that req->iv[0] already contains the value * of L' of RFC3610 */ unsigned int l = lp + 1; /* This is L' of RFC 3610. */ unsigned int m = ctx->authsize; /* This is M' of RFC 3610. */ u8 *b0 = req_ctx->ccm_config + CCM_B0_OFFSET; u8 *a0 = req_ctx->ccm_config + CCM_A0_OFFSET; u8 *ctr_count_0 = req_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET; unsigned int cryptlen = (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) ? req->cryptlen : (req->cryptlen - ctx->authsize); int rc; memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE); memset(req_ctx->ccm_config, 0, AES_BLOCK_SIZE * 3); /* taken from crypto/ccm.c */ /* 2 <= L <= 8, so 1 <= L' <= 7. */ if (l < 2 || l > 8) { dev_dbg(dev, "illegal iv value %X\n", req->iv[0]); return -EINVAL; } memcpy(b0, req->iv, AES_BLOCK_SIZE); /* format control info per RFC 3610 and * NIST Special Publication 800-38C */ *b0 |= (8 * ((m - 2) / 2)); if (req_ctx->assoclen > 0) *b0 |= 64; /* Enable bit 6 if Adata exists. */ rc = set_msg_len(b0 + 16 - l, cryptlen, l); /* Write L'. */ if (rc) { dev_err(dev, "message len overflow detected"); return rc; } /* END of "taken from crypto/ccm.c" */ /* l(a) - size of associated data. */ req_ctx->ccm_hdr_size = format_ccm_a0(a0, req_ctx->assoclen); memset(req->iv + 15 - req->iv[0], 0, req->iv[0] + 1); req->iv[15] = 1; memcpy(ctr_count_0, req->iv, AES_BLOCK_SIZE); ctr_count_0[15] = 0; return 0; } static void cc_proc_rfc4309_ccm(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); /* L' */ memset(areq_ctx->ctr_iv, 0, AES_BLOCK_SIZE); /* For RFC 4309, always use 4 bytes for message length * (at most 2^32-1 bytes). */ areq_ctx->ctr_iv[0] = 3; /* In RFC 4309 there is an 11-bytes nonce+IV part, * that we build here. */ memcpy(areq_ctx->ctr_iv + CCM_BLOCK_NONCE_OFFSET, ctx->ctr_nonce, CCM_BLOCK_NONCE_SIZE); memcpy(areq_ctx->ctr_iv + CCM_BLOCK_IV_OFFSET, req->iv, CCM_BLOCK_IV_SIZE); req->iv = areq_ctx->ctr_iv; } static void cc_set_ghash_desc(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); unsigned int idx = *seq_size; /* load key to AES*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_ECB); set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, ctx->enc_keylen, NS_BIT); set_key_size_aes(&desc[idx], ctx->enc_keylen); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; /* process one zero block to generate hkey */ hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE); set_dout_dlli(&desc[idx], req_ctx->hkey_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0); set_flow_mode(&desc[idx], DIN_AES_DOUT); idx++; /* Memory Barrier */ hw_desc_init(&desc[idx]); set_din_no_dma(&desc[idx], 0, 0xfffff0); set_dout_no_dma(&desc[idx], 0, 0, 1); idx++; /* Load GHASH subkey */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, req_ctx->hkey_dma_addr, AES_BLOCK_SIZE, NS_BIT); set_dout_no_dma(&desc[idx], 0, 0, 1); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_aes_not_hash_mode(&desc[idx]); set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; /* Configure Hash Engine to work with GHASH. * Since it was not possible to extend HASH submodes to add GHASH, * The following command is necessary in order to * select GHASH (according to HW designers) */ hw_desc_init(&desc[idx]); set_din_no_dma(&desc[idx], 0, 0xfffff0); set_dout_no_dma(&desc[idx], 0, 0, 1); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_aes_not_hash_mode(&desc[idx]); set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH); set_cipher_do(&desc[idx], 1); //1=AES_SK RKEK set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; /* Load GHASH initial STATE (which is 0). (for any hash there is an * initial state) */ hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0x0, AES_BLOCK_SIZE); set_dout_no_dma(&desc[idx], 0, 0, 1); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_aes_not_hash_mode(&desc[idx]); set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); idx++; *seq_size = idx; } static void cc_set_gctr_desc(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); unsigned int idx = *seq_size; /* load key to AES*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR); set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); set_din_type(&desc[idx], DMA_DLLI, ctx->enckey_dma_addr, ctx->enc_keylen, NS_BIT); set_key_size_aes(&desc[idx], ctx->enc_keylen); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; if (req_ctx->cryptlen && !req_ctx->plaintext_authenticate_only) { /* load AES/CTR initial CTR value inc by 2*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR); set_key_size_aes(&desc[idx], ctx->enc_keylen); set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE, NS_BIT); set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; } *seq_size = idx; } static void cc_proc_gcm_result(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); dma_addr_t mac_result; unsigned int idx = *seq_size; if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { mac_result = req_ctx->mac_buf_dma_addr; } else { /* Encrypt */ mac_result = req_ctx->icv_dma_addr; } /* process(ghash) gcm_block_len */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_block_len_dma_addr, AES_BLOCK_SIZE, NS_BIT); set_flow_mode(&desc[idx], DIN_HASH); idx++; /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_HASH_HW_GHASH); set_din_no_dma(&desc[idx], 0, 0xfffff0); set_dout_dlli(&desc[idx], req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE, NS_BIT, 0); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_aes_not_hash_mode(&desc[idx]); idx++; /* load AES/CTR initial CTR value inc by 1*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR); set_key_size_aes(&desc[idx], ctx->enc_keylen); set_din_type(&desc[idx], DMA_DLLI, req_ctx->gcm_iv_inc1_dma_addr, AES_BLOCK_SIZE, NS_BIT); set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; /* Memory Barrier */ hw_desc_init(&desc[idx]); set_din_no_dma(&desc[idx], 0, 0xfffff0); set_dout_no_dma(&desc[idx], 0, 0, 1); idx++; /* process GCTR on stored GHASH and store MAC in mac_state*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_GCTR); set_din_type(&desc[idx], DMA_DLLI, req_ctx->mac_buf_dma_addr, AES_BLOCK_SIZE, NS_BIT); set_dout_dlli(&desc[idx], mac_result, ctx->authsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); set_flow_mode(&desc[idx], DIN_AES_DOUT); idx++; *seq_size = idx; } static int cc_gcm(struct aead_request *req, struct cc_hw_desc desc[], unsigned int *seq_size) { struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); unsigned int cipher_flow_mode; //in RFC4543 no data to encrypt. just copy data from src to dest. if (req_ctx->plaintext_authenticate_only) { cc_proc_cipher_desc(req, BYPASS, desc, seq_size); cc_set_ghash_desc(req, desc, seq_size); /* process(ghash) assoc data */ cc_set_assoc_desc(req, DIN_HASH, desc, seq_size); cc_set_gctr_desc(req, desc, seq_size); cc_proc_gcm_result(req, desc, seq_size); return 0; } if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { cipher_flow_mode = AES_and_HASH; } else { /* Encrypt */ cipher_flow_mode = AES_to_HASH_and_DOUT; } // for gcm and rfc4106. cc_set_ghash_desc(req, desc, seq_size); /* process(ghash) assoc data */ if (req_ctx->assoclen > 0) cc_set_assoc_desc(req, DIN_HASH, desc, seq_size); cc_set_gctr_desc(req, desc, seq_size); /* process(gctr+ghash) */ if (req_ctx->cryptlen) cc_proc_cipher_desc(req, cipher_flow_mode, desc, seq_size); cc_proc_gcm_result(req, desc, seq_size); return 0; } static int config_gcm_context(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *req_ctx = aead_request_ctx_dma(req); struct device *dev = drvdata_to_dev(ctx->drvdata); unsigned int cryptlen = (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) ? req->cryptlen : (req->cryptlen - ctx->authsize); __be32 counter = cpu_to_be32(2); dev_dbg(dev, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n", __func__, cryptlen, req_ctx->assoclen, ctx->authsize); memset(req_ctx->hkey, 0, AES_BLOCK_SIZE); memset(req_ctx->mac_buf, 0, AES_BLOCK_SIZE); memcpy(req->iv + 12, &counter, 4); memcpy(req_ctx->gcm_iv_inc2, req->iv, 16); counter = cpu_to_be32(1); memcpy(req->iv + 12, &counter, 4); memcpy(req_ctx->gcm_iv_inc1, req->iv, 16); if (!req_ctx->plaintext_authenticate_only) { __be64 temp64; temp64 = cpu_to_be64(req_ctx->assoclen * 8); memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); temp64 = cpu_to_be64(cryptlen * 8); memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); } else { /* rfc4543=> all data(AAD,IV,Plain) are considered additional * data that is nothing is encrypted. */ __be64 temp64; temp64 = cpu_to_be64((req_ctx->assoclen + cryptlen) * 8); memcpy(&req_ctx->gcm_len_block.len_a, &temp64, sizeof(temp64)); temp64 = 0; memcpy(&req_ctx->gcm_len_block.len_c, &temp64, 8); } return 0; } static void cc_proc_rfc4_gcm(struct aead_request *req) { struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_NONCE_OFFSET, ctx->ctr_nonce, GCM_BLOCK_RFC4_NONCE_SIZE); memcpy(areq_ctx->ctr_iv + GCM_BLOCK_RFC4_IV_OFFSET, req->iv, GCM_BLOCK_RFC4_IV_SIZE); req->iv = areq_ctx->ctr_iv; } static int cc_proc_aead(struct aead_request *req, enum drv_crypto_direction direct) { int rc = 0; int seq_len = 0; struct cc_hw_desc desc[MAX_AEAD_PROCESS_SEQ]; struct crypto_aead *tfm = crypto_aead_reqtfm(req); struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); struct device *dev = drvdata_to_dev(ctx->drvdata); struct cc_crypto_req cc_req = {}; dev_dbg(dev, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n", ((direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Enc" : "Dec"), ctx, req, req->iv, sg_virt(req->src), req->src->offset, sg_virt(req->dst), req->dst->offset, req->cryptlen); /* STAT_PHASE_0: Init and sanity checks */ /* Check data length according to mode */ if (validate_data_size(ctx, direct, req)) { dev_err(dev, "Unsupported crypt/assoc len %d/%d.\n", req->cryptlen, areq_ctx->assoclen); return -EINVAL; } /* Setup request structure */ cc_req.user_cb = cc_aead_complete; cc_req.user_arg = req; /* Setup request context */ areq_ctx->gen_ctx.op_type = direct; areq_ctx->req_authsize = ctx->authsize; areq_ctx->cipher_mode = ctx->cipher_mode; /* STAT_PHASE_1: Map buffers */ if (ctx->cipher_mode == DRV_CIPHER_CTR) { /* Build CTR IV - Copy nonce from last 4 bytes in * CTR key to first 4 bytes in CTR IV */ memcpy(areq_ctx->ctr_iv, ctx->ctr_nonce, CTR_RFC3686_NONCE_SIZE); memcpy(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE, req->iv, CTR_RFC3686_IV_SIZE); /* Initialize counter portion of counter block */ *(__be32 *)(areq_ctx->ctr_iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) = cpu_to_be32(1); /* Replace with counter iv */ req->iv = areq_ctx->ctr_iv; areq_ctx->hw_iv_size = CTR_RFC3686_BLOCK_SIZE; } else if ((ctx->cipher_mode == DRV_CIPHER_CCM) || (ctx->cipher_mode == DRV_CIPHER_GCTR)) { areq_ctx->hw_iv_size = AES_BLOCK_SIZE; if (areq_ctx->ctr_iv != req->iv) { memcpy(areq_ctx->ctr_iv, req->iv, crypto_aead_ivsize(tfm)); req->iv = areq_ctx->ctr_iv; } } else { areq_ctx->hw_iv_size = crypto_aead_ivsize(tfm); } if (ctx->cipher_mode == DRV_CIPHER_CCM) { rc = config_ccm_adata(req); if (rc) { dev_dbg(dev, "config_ccm_adata() returned with a failure %d!", rc); goto exit; } } else { areq_ctx->ccm_hdr_size = ccm_header_size_null; } if (ctx->cipher_mode == DRV_CIPHER_GCTR) { rc = config_gcm_context(req); if (rc) { dev_dbg(dev, "config_gcm_context() returned with a failure %d!", rc); goto exit; } } rc = cc_map_aead_request(ctx->drvdata, req); if (rc) { dev_err(dev, "map_request() failed\n"); goto exit; } /* STAT_PHASE_2: Create sequence */ /* Load MLLI tables to SRAM if necessary */ cc_mlli_to_sram(req, desc, &seq_len); switch (ctx->auth_mode) { case DRV_HASH_SHA1: case DRV_HASH_SHA256: cc_hmac_authenc(req, desc, &seq_len); break; case DRV_HASH_XCBC_MAC: cc_xcbc_authenc(req, desc, &seq_len); break; case DRV_HASH_NULL: if (ctx->cipher_mode == DRV_CIPHER_CCM) cc_ccm(req, desc, &seq_len); if (ctx->cipher_mode == DRV_CIPHER_GCTR) cc_gcm(req, desc, &seq_len); break; default: dev_err(dev, "Unsupported authenc (%d)\n", ctx->auth_mode); cc_unmap_aead_request(dev, req); rc = -ENOTSUPP; goto exit; } /* STAT_PHASE_3: Lock HW and push sequence */ rc = cc_send_request(ctx->drvdata, &cc_req, desc, seq_len, &req->base); if (rc != -EINPROGRESS && rc != -EBUSY) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); cc_unmap_aead_request(dev, req); } exit: return rc; } static int cc_aead_encrypt(struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; memset(areq_ctx, 0, sizeof(*areq_ctx)); /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) req->iv = areq_ctx->backup_iv; return rc; } static int cc_rfc4309_ccm_encrypt(struct aead_request *req) { /* Very similar to cc_aead_encrypt() above. */ struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; rc = crypto_ipsec_check_assoclen(req->assoclen); if (rc) goto out; memset(areq_ctx, 0, sizeof(*areq_ctx)); /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE; cc_proc_rfc4309_ccm(req); rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) req->iv = areq_ctx->backup_iv; out: return rc; } static int cc_aead_decrypt(struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; memset(areq_ctx, 0, sizeof(*areq_ctx)); /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) req->iv = areq_ctx->backup_iv; return rc; } static int cc_rfc4309_ccm_decrypt(struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; rc = crypto_ipsec_check_assoclen(req->assoclen); if (rc) goto out; memset(areq_ctx, 0, sizeof(*areq_ctx)); /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen - CCM_BLOCK_IV_SIZE; cc_proc_rfc4309_ccm(req); rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) req->iv = areq_ctx->backup_iv; out: return rc; } static int cc_rfc4106_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key); if (keylen < 4) return -EINVAL; keylen -= 4; memcpy(ctx->ctr_nonce, key + keylen, 4); return cc_aead_setkey(tfm, key, keylen); } static int cc_rfc4543_gcm_setkey(struct crypto_aead *tfm, const u8 *key, unsigned int keylen) { struct cc_aead_ctx *ctx = crypto_aead_ctx(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "%s() keylen %d, key %p\n", __func__, keylen, key); if (keylen < 4) return -EINVAL; keylen -= 4; memcpy(ctx->ctr_nonce, key + keylen, 4); return cc_aead_setkey(tfm, key, keylen); } static int cc_gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { switch (authsize) { case 4: case 8: case 12: case 13: case 14: case 15: case 16: break; default: return -EINVAL; } return cc_aead_setauthsize(authenc, authsize); } static int cc_rfc4106_gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc); struct device *dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "authsize %d\n", authsize); switch (authsize) { case 8: case 12: case 16: break; default: return -EINVAL; } return cc_aead_setauthsize(authenc, authsize); } static int cc_rfc4543_gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize) { struct cc_aead_ctx *ctx = crypto_aead_ctx(authenc); struct device *dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "authsize %d\n", authsize); if (authsize != 16) return -EINVAL; return cc_aead_setauthsize(authenc, authsize); } static int cc_rfc4106_gcm_encrypt(struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; rc = crypto_ipsec_check_assoclen(req->assoclen); if (rc) goto out; memset(areq_ctx, 0, sizeof(*areq_ctx)); /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE; cc_proc_rfc4_gcm(req); rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) req->iv = areq_ctx->backup_iv; out: return rc; } static int cc_rfc4543_gcm_encrypt(struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; rc = crypto_ipsec_check_assoclen(req->assoclen); if (rc) goto out; memset(areq_ctx, 0, sizeof(*areq_ctx)); //plaintext is not encryped with rfc4543 areq_ctx->plaintext_authenticate_only = true; /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; cc_proc_rfc4_gcm(req); rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_ENCRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) req->iv = areq_ctx->backup_iv; out: return rc; } static int cc_rfc4106_gcm_decrypt(struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; rc = crypto_ipsec_check_assoclen(req->assoclen); if (rc) goto out; memset(areq_ctx, 0, sizeof(*areq_ctx)); /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen - GCM_BLOCK_RFC4_IV_SIZE; cc_proc_rfc4_gcm(req); rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) req->iv = areq_ctx->backup_iv; out: return rc; } static int cc_rfc4543_gcm_decrypt(struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc; rc = crypto_ipsec_check_assoclen(req->assoclen); if (rc) goto out; memset(areq_ctx, 0, sizeof(*areq_ctx)); //plaintext is not decryped with rfc4543 areq_ctx->plaintext_authenticate_only = true; /* No generated IV required */ areq_ctx->backup_iv = req->iv; areq_ctx->assoclen = req->assoclen; cc_proc_rfc4_gcm(req); rc = cc_proc_aead(req, DRV_CRYPTO_DIRECTION_DECRYPT); if (rc != -EINPROGRESS && rc != -EBUSY) req->iv = areq_ctx->backup_iv; out: return rc; } /* aead alg */ static struct cc_alg_template aead_algs[] = { { .name = "authenc(hmac(sha1),cbc(aes))", .driver_name = "authenc-hmac-sha1-cbc-aes-ccree", .blocksize = AES_BLOCK_SIZE, .template_aead = { .setkey = cc_aead_setkey, .setauthsize = cc_aead_setauthsize, .encrypt = cc_aead_encrypt, .decrypt = cc_aead_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_SHA1, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "authenc(hmac(sha1),cbc(des3_ede))", .driver_name = "authenc-hmac-sha1-cbc-des3-ccree", .blocksize = DES3_EDE_BLOCK_SIZE, .template_aead = { .setkey = cc_des3_aead_setkey, .setauthsize = cc_aead_setauthsize, .encrypt = cc_aead_encrypt, .decrypt = cc_aead_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_DES, .auth_mode = DRV_HASH_SHA1, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "authenc(hmac(sha256),cbc(aes))", .driver_name = "authenc-hmac-sha256-cbc-aes-ccree", .blocksize = AES_BLOCK_SIZE, .template_aead = { .setkey = cc_aead_setkey, .setauthsize = cc_aead_setauthsize, .encrypt = cc_aead_encrypt, .decrypt = cc_aead_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = AES_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_SHA256, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "authenc(hmac(sha256),cbc(des3_ede))", .driver_name = "authenc-hmac-sha256-cbc-des3-ccree", .blocksize = DES3_EDE_BLOCK_SIZE, .template_aead = { .setkey = cc_des3_aead_setkey, .setauthsize = cc_aead_setauthsize, .encrypt = cc_aead_encrypt, .decrypt = cc_aead_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = DES3_EDE_BLOCK_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_DES, .auth_mode = DRV_HASH_SHA256, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "authenc(xcbc(aes),cbc(aes))", .driver_name = "authenc-xcbc-aes-cbc-aes-ccree", .blocksize = AES_BLOCK_SIZE, .template_aead = { .setkey = cc_aead_setkey, .setauthsize = cc_aead_setauthsize, .encrypt = cc_aead_encrypt, .decrypt = cc_aead_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = AES_BLOCK_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_XCBC_MAC, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "authenc(hmac(sha1),rfc3686(ctr(aes)))", .driver_name = "authenc-hmac-sha1-rfc3686-ctr-aes-ccree", .blocksize = 1, .template_aead = { .setkey = cc_aead_setkey, .setauthsize = cc_aead_setauthsize, .encrypt = cc_aead_encrypt, .decrypt = cc_aead_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA1_DIGEST_SIZE, }, .cipher_mode = DRV_CIPHER_CTR, .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_SHA1, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "authenc(hmac(sha256),rfc3686(ctr(aes)))", .driver_name = "authenc-hmac-sha256-rfc3686-ctr-aes-ccree", .blocksize = 1, .template_aead = { .setkey = cc_aead_setkey, .setauthsize = cc_aead_setauthsize, .encrypt = cc_aead_encrypt, .decrypt = cc_aead_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = SHA256_DIGEST_SIZE, }, .cipher_mode = DRV_CIPHER_CTR, .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_SHA256, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "authenc(xcbc(aes),rfc3686(ctr(aes)))", .driver_name = "authenc-xcbc-aes-rfc3686-ctr-aes-ccree", .blocksize = 1, .template_aead = { .setkey = cc_aead_setkey, .setauthsize = cc_aead_setauthsize, .encrypt = cc_aead_encrypt, .decrypt = cc_aead_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = CTR_RFC3686_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CTR, .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_XCBC_MAC, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "ccm(aes)", .driver_name = "ccm-aes-ccree", .blocksize = 1, .template_aead = { .setkey = cc_aead_setkey, .setauthsize = cc_ccm_setauthsize, .encrypt = cc_aead_encrypt, .decrypt = cc_aead_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = AES_BLOCK_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CCM, .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_NULL, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "rfc4309(ccm(aes))", .driver_name = "rfc4309-ccm-aes-ccree", .blocksize = 1, .template_aead = { .setkey = cc_rfc4309_ccm_setkey, .setauthsize = cc_rfc4309_ccm_setauthsize, .encrypt = cc_rfc4309_ccm_encrypt, .decrypt = cc_rfc4309_ccm_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = CCM_BLOCK_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CCM, .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_NULL, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "gcm(aes)", .driver_name = "gcm-aes-ccree", .blocksize = 1, .template_aead = { .setkey = cc_aead_setkey, .setauthsize = cc_gcm_setauthsize, .encrypt = cc_aead_encrypt, .decrypt = cc_aead_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = 12, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_GCTR, .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_NULL, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "rfc4106(gcm(aes))", .driver_name = "rfc4106-gcm-aes-ccree", .blocksize = 1, .template_aead = { .setkey = cc_rfc4106_gcm_setkey, .setauthsize = cc_rfc4106_gcm_setauthsize, .encrypt = cc_rfc4106_gcm_encrypt, .decrypt = cc_rfc4106_gcm_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = GCM_BLOCK_RFC4_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_GCTR, .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_NULL, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "rfc4543(gcm(aes))", .driver_name = "rfc4543-gcm-aes-ccree", .blocksize = 1, .template_aead = { .setkey = cc_rfc4543_gcm_setkey, .setauthsize = cc_rfc4543_gcm_setauthsize, .encrypt = cc_rfc4543_gcm_encrypt, .decrypt = cc_rfc4543_gcm_decrypt, .init = cc_aead_init, .exit = cc_aead_exit, .ivsize = GCM_BLOCK_RFC4_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_GCTR, .flow_mode = S_DIN_to_AES, .auth_mode = DRV_HASH_NULL, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, }; static struct cc_crypto_alg *cc_create_aead_alg(struct cc_alg_template *tmpl, struct device *dev) { struct cc_crypto_alg *t_alg; struct aead_alg *alg; t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL); if (!t_alg) return ERR_PTR(-ENOMEM); alg = &tmpl->template_aead; snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->driver_name); alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CC_CRA_PRIO; alg->base.cra_ctxsize = sizeof(struct cc_aead_ctx); alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; alg->base.cra_blocksize = tmpl->blocksize; alg->init = cc_aead_init; alg->exit = cc_aead_exit; t_alg->aead_alg = *alg; t_alg->cipher_mode = tmpl->cipher_mode; t_alg->flow_mode = tmpl->flow_mode; t_alg->auth_mode = tmpl->auth_mode; return t_alg; } int cc_aead_free(struct cc_drvdata *drvdata) { struct cc_crypto_alg *t_alg, *n; struct cc_aead_handle *aead_handle = drvdata->aead_handle; /* Remove registered algs */ list_for_each_entry_safe(t_alg, n, &aead_handle->aead_list, entry) { crypto_unregister_aead(&t_alg->aead_alg); list_del(&t_alg->entry); } return 0; } int cc_aead_alloc(struct cc_drvdata *drvdata) { struct cc_aead_handle *aead_handle; struct cc_crypto_alg *t_alg; int rc = -ENOMEM; int alg; struct device *dev = drvdata_to_dev(drvdata); aead_handle = devm_kmalloc(dev, sizeof(*aead_handle), GFP_KERNEL); if (!aead_handle) { rc = -ENOMEM; goto fail0; } INIT_LIST_HEAD(&aead_handle->aead_list); drvdata->aead_handle = aead_handle; aead_handle->sram_workspace_addr = cc_sram_alloc(drvdata, MAX_HMAC_DIGEST_SIZE); if (aead_handle->sram_workspace_addr == NULL_SRAM_ADDR) { rc = -ENOMEM; goto fail1; } /* Linux crypto */ for (alg = 0; alg < ARRAY_SIZE(aead_algs); alg++) { if ((aead_algs[alg].min_hw_rev > drvdata->hw_rev) || !(drvdata->std_bodies & aead_algs[alg].std_body)) continue; t_alg = cc_create_aead_alg(&aead_algs[alg], dev); if (IS_ERR(t_alg)) { rc = PTR_ERR(t_alg); dev_err(dev, "%s alg allocation failed\n", aead_algs[alg].driver_name); goto fail1; } t_alg->drvdata = drvdata; rc = crypto_register_aead(&t_alg->aead_alg); if (rc) { dev_err(dev, "%s alg registration failed\n", t_alg->aead_alg.base.cra_driver_name); goto fail1; } list_add_tail(&t_alg->entry, &aead_handle->aead_list); dev_dbg(dev, "Registered %s\n", t_alg->aead_alg.base.cra_driver_name); } return 0; fail1: cc_aead_free(drvdata); fail0: return rc; }
linux-master
drivers/crypto/ccree/cc_aead.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ #include <linux/kernel.h> #include <linux/module.h> #include <crypto/algapi.h> #include <crypto/hash.h> #include <crypto/md5.h> #include <crypto/sm3.h> #include <crypto/internal/hash.h> #include "cc_driver.h" #include "cc_request_mgr.h" #include "cc_buffer_mgr.h" #include "cc_hash.h" #include "cc_sram_mgr.h" #define CC_MAX_HASH_SEQ_LEN 12 #define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE #define CC_SM3_HASH_LEN_SIZE 8 struct cc_hash_handle { u32 digest_len_sram_addr; /* const value in SRAM*/ u32 larval_digest_sram_addr; /* const value in SRAM */ struct list_head hash_list; }; static const u32 cc_digest_len_init[] = { 0x00000040, 0x00000000, 0x00000000, 0x00000000 }; static const u32 cc_md5_init[] = { SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; static const u32 cc_sha1_init[] = { SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 }; static const u32 cc_sha224_init[] = { SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4, SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 }; static const u32 cc_sha256_init[] = { SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4, SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 }; static const u32 cc_digest_len_sha512_init[] = { 0x00000080, 0x00000000, 0x00000000, 0x00000000 }; /* * Due to the way the HW works, every double word in the SHA384 and SHA512 * larval hashes must be stored in hi/lo order */ #define hilo(x) upper_32_bits(x), lower_32_bits(x) static const u32 cc_sha384_init[] = { hilo(SHA384_H7), hilo(SHA384_H6), hilo(SHA384_H5), hilo(SHA384_H4), hilo(SHA384_H3), hilo(SHA384_H2), hilo(SHA384_H1), hilo(SHA384_H0) }; static const u32 cc_sha512_init[] = { hilo(SHA512_H7), hilo(SHA512_H6), hilo(SHA512_H5), hilo(SHA512_H4), hilo(SHA512_H3), hilo(SHA512_H2), hilo(SHA512_H1), hilo(SHA512_H0) }; static const u32 cc_sm3_init[] = { SM3_IVH, SM3_IVG, SM3_IVF, SM3_IVE, SM3_IVD, SM3_IVC, SM3_IVB, SM3_IVA }; static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[], unsigned int *seq_size); static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[], unsigned int *seq_size); static const void *cc_larval_digest(struct device *dev, u32 mode); struct cc_hash_alg { struct list_head entry; int hash_mode; int hw_mode; int inter_digestsize; struct cc_drvdata *drvdata; struct ahash_alg ahash_alg; }; struct hash_key_req_ctx { u32 keylen; dma_addr_t key_dma_addr; u8 *key; }; /* hash per-session context */ struct cc_hash_ctx { struct cc_drvdata *drvdata; /* holds the origin digest; the digest after "setkey" if HMAC,* * the initial digest if HASH. */ u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned; u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned; dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned; dma_addr_t digest_buff_dma_addr; /* use for hmac with key large then mode block size */ struct hash_key_req_ctx key_params; int hash_mode; int hw_mode; int inter_digestsize; unsigned int hash_len; struct completion setkey_comp; bool is_hmac; }; static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx, unsigned int flow_mode, struct cc_hw_desc desc[], bool is_not_last_data, unsigned int *seq_size); static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc) { if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 || mode == DRV_HASH_SHA512) { set_bytes_swap(desc, 1); } else { set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN); } } static int cc_map_result(struct device *dev, struct ahash_req_ctx *state, unsigned int digestsize) { state->digest_result_dma_addr = dma_map_single(dev, state->digest_result_buff, digestsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, state->digest_result_dma_addr)) { dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n", digestsize); return -ENOMEM; } dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n", digestsize, state->digest_result_buff, &state->digest_result_dma_addr); return 0; } static void cc_init_req(struct device *dev, struct ahash_req_ctx *state, struct cc_hash_ctx *ctx) { bool is_hmac = ctx->is_hmac; memset(state, 0, sizeof(*state)); if (is_hmac) { if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC && ctx->hw_mode != DRV_CIPHER_CMAC) { dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); memcpy(state->digest_buff, ctx->digest_buff, ctx->inter_digestsize); if (ctx->hash_mode == DRV_HASH_SHA512 || ctx->hash_mode == DRV_HASH_SHA384) memcpy(state->digest_bytes_len, cc_digest_len_sha512_init, ctx->hash_len); else memcpy(state->digest_bytes_len, cc_digest_len_init, ctx->hash_len); } if (ctx->hash_mode != DRV_HASH_NULL) { dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); memcpy(state->opad_digest_buff, ctx->opad_tmp_keys_buff, ctx->inter_digestsize); } } else { /*hash*/ /* Copy the initial digests if hash flow. */ const void *larval = cc_larval_digest(dev, ctx->hash_mode); memcpy(state->digest_buff, larval, ctx->inter_digestsize); } } static int cc_map_req(struct device *dev, struct ahash_req_ctx *state, struct cc_hash_ctx *ctx) { bool is_hmac = ctx->is_hmac; state->digest_buff_dma_addr = dma_map_single(dev, state->digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, state->digest_buff_dma_addr)) { dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n", ctx->inter_digestsize, state->digest_buff); return -EINVAL; } dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n", ctx->inter_digestsize, state->digest_buff, &state->digest_buff_dma_addr); if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) { state->digest_bytes_len_dma_addr = dma_map_single(dev, state->digest_bytes_len, HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) { dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n", HASH_MAX_LEN_SIZE, state->digest_bytes_len); goto unmap_digest_buf; } dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n", HASH_MAX_LEN_SIZE, state->digest_bytes_len, &state->digest_bytes_len_dma_addr); } if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) { state->opad_digest_dma_addr = dma_map_single(dev, state->opad_digest_buff, ctx->inter_digestsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, state->opad_digest_dma_addr)) { dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n", ctx->inter_digestsize, state->opad_digest_buff); goto unmap_digest_len; } dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n", ctx->inter_digestsize, state->opad_digest_buff, &state->opad_digest_dma_addr); } return 0; unmap_digest_len: if (state->digest_bytes_len_dma_addr) { dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL); state->digest_bytes_len_dma_addr = 0; } unmap_digest_buf: if (state->digest_buff_dma_addr) { dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); state->digest_buff_dma_addr = 0; } return -EINVAL; } static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state, struct cc_hash_ctx *ctx) { if (state->digest_buff_dma_addr) { dma_unmap_single(dev, state->digest_buff_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n", &state->digest_buff_dma_addr); state->digest_buff_dma_addr = 0; } if (state->digest_bytes_len_dma_addr) { dma_unmap_single(dev, state->digest_bytes_len_dma_addr, HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL); dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n", &state->digest_bytes_len_dma_addr); state->digest_bytes_len_dma_addr = 0; } if (state->opad_digest_dma_addr) { dma_unmap_single(dev, state->opad_digest_dma_addr, ctx->inter_digestsize, DMA_BIDIRECTIONAL); dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n", &state->opad_digest_dma_addr); state->opad_digest_dma_addr = 0; } } static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state, unsigned int digestsize, u8 *result) { if (state->digest_result_dma_addr) { dma_unmap_single(dev, state->digest_result_dma_addr, digestsize, DMA_BIDIRECTIONAL); dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n", state->digest_result_buff, &state->digest_result_dma_addr, digestsize); memcpy(result, state->digest_result_buff, digestsize); } state->digest_result_dma_addr = 0; } static void cc_update_complete(struct device *dev, void *cc_req, int err) { struct ahash_request *req = (struct ahash_request *)cc_req; struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); dev_dbg(dev, "req=%pK\n", req); if (err != -EINPROGRESS) { /* Not a BACKLOG notification */ cc_unmap_hash_request(dev, state, req->src, false); cc_unmap_req(dev, state, ctx); } ahash_request_complete(req, err); } static void cc_digest_complete(struct device *dev, void *cc_req, int err) { struct ahash_request *req = (struct ahash_request *)cc_req; struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); dev_dbg(dev, "req=%pK\n", req); if (err != -EINPROGRESS) { /* Not a BACKLOG notification */ cc_unmap_hash_request(dev, state, req->src, false); cc_unmap_result(dev, state, digestsize, req->result); cc_unmap_req(dev, state, ctx); } ahash_request_complete(req, err); } static void cc_hash_complete(struct device *dev, void *cc_req, int err) { struct ahash_request *req = (struct ahash_request *)cc_req; struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); dev_dbg(dev, "req=%pK\n", req); if (err != -EINPROGRESS) { /* Not a BACKLOG notification */ cc_unmap_hash_request(dev, state, req->src, false); cc_unmap_result(dev, state, digestsize, req->result); cc_unmap_req(dev, state, ctx); } ahash_request_complete(req, err); } static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req, int idx) { struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); /* Get final MAC result */ hw_desc_init(&desc[idx]); set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); cc_set_endianity(ctx->hash_mode, &desc[idx]); idx++; return idx; } static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req, int idx) { struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); /* store the hash digest result in the context */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize, NS_BIT, 0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); cc_set_endianity(ctx->hash_mode, &desc[idx]); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); idx++; /* Loading hash opad xor key state */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr, ctx->inter_digestsize, NS_BIT); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); idx++; /* Load the hash current length */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_din_sram(&desc[idx], cc_digest_len_addr(ctx->drvdata, ctx->hash_mode), ctx->hash_len); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; /* Memory Barrier: wait for IPAD/OPAD axi write to complete */ hw_desc_init(&desc[idx]); set_din_no_dma(&desc[idx], 0, 0xfffff0); set_dout_no_dma(&desc[idx], 0, 0, 1); idx++; /* Perform HASH update */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, digestsize, NS_BIT); set_flow_mode(&desc[idx], DIN_HASH); idx++; return idx; } static int cc_hash_digest(struct ahash_request *req) { struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); struct scatterlist *src = req->src; unsigned int nbytes = req->nbytes; u8 *result = req->result; struct device *dev = drvdata_to_dev(ctx->drvdata); bool is_hmac = ctx->is_hmac; struct cc_crypto_req cc_req = {}; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; u32 larval_digest_addr; int idx = 0; int rc = 0; gfp_t flags = cc_gfp_flags(&req->base); dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash", nbytes); cc_init_req(dev, state, ctx); if (cc_map_req(dev, state, ctx)) { dev_err(dev, "map_ahash_source() failed\n"); return -ENOMEM; } if (cc_map_result(dev, state, digestsize)) { dev_err(dev, "map_ahash_digest() failed\n"); cc_unmap_req(dev, state, ctx); return -ENOMEM; } if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1, flags)) { dev_err(dev, "map_ahash_request_final() failed\n"); cc_unmap_result(dev, state, digestsize, result); cc_unmap_req(dev, state, ctx); return -ENOMEM; } /* Setup request structure */ cc_req.user_cb = cc_digest_complete; cc_req.user_arg = req; /* If HMAC then load hash IPAD xor key, if HASH then load initial * digest */ hw_desc_init(&desc[idx]); set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); if (is_hmac) { set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT); } else { larval_digest_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode); set_din_sram(&desc[idx], larval_digest_addr, ctx->inter_digestsize); } set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); idx++; /* Load the hash current length */ hw_desc_init(&desc[idx]); set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); if (is_hmac) { set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, ctx->hash_len, NS_BIT); } else { set_din_const(&desc[idx], 0, ctx->hash_len); if (nbytes) set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); else set_cipher_do(&desc[idx], DO_PAD); } set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx); if (is_hmac) { /* HW last hash block padding (aka. "DO_PAD") */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, ctx->hash_len, NS_BIT, 0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); set_cipher_do(&desc[idx], DO_PAD); idx++; idx = cc_fin_hmac(desc, req, idx); } idx = cc_fin_result(desc, req, idx); rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base); if (rc != -EINPROGRESS && rc != -EBUSY) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); cc_unmap_hash_request(dev, state, src, true); cc_unmap_result(dev, state, digestsize, result); cc_unmap_req(dev, state, ctx); } return rc; } static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx, struct ahash_req_ctx *state, unsigned int idx) { /* Restore hash digest */ hw_desc_init(&desc[idx]); set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); idx++; /* Restore hash current length */ hw_desc_init(&desc[idx]); set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr, ctx->hash_len, NS_BIT); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx); return idx; } static int cc_hash_update(struct ahash_request *req) { struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base); struct scatterlist *src = req->src; unsigned int nbytes = req->nbytes; struct device *dev = drvdata_to_dev(ctx->drvdata); struct cc_crypto_req cc_req = {}; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; u32 idx = 0; int rc; gfp_t flags = cc_gfp_flags(&req->base); dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ? "hmac" : "hash", nbytes); if (nbytes == 0) { /* no real updates required */ return 0; } rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes, block_size, flags); if (rc) { if (rc == 1) { dev_dbg(dev, " data size not require HW update %x\n", nbytes); /* No hardware updates are required */ return 0; } dev_err(dev, "map_ahash_request_update() failed\n"); return -ENOMEM; } if (cc_map_req(dev, state, ctx)) { dev_err(dev, "map_ahash_source() failed\n"); cc_unmap_hash_request(dev, state, src, true); return -EINVAL; } /* Setup request structure */ cc_req.user_cb = cc_update_complete; cc_req.user_arg = req; idx = cc_restore_hash(desc, ctx, state, idx); /* store the hash digest result in context */ hw_desc_init(&desc[idx]); set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); idx++; /* store current hash length in context */ hw_desc_init(&desc[idx]); set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr, ctx->hash_len, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); idx++; rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base); if (rc != -EINPROGRESS && rc != -EBUSY) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); cc_unmap_hash_request(dev, state, src, true); cc_unmap_req(dev, state, ctx); } return rc; } static int cc_do_finup(struct ahash_request *req, bool update) { struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); u32 digestsize = crypto_ahash_digestsize(tfm); struct scatterlist *src = req->src; unsigned int nbytes = req->nbytes; u8 *result = req->result; struct device *dev = drvdata_to_dev(ctx->drvdata); bool is_hmac = ctx->is_hmac; struct cc_crypto_req cc_req = {}; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; unsigned int idx = 0; int rc; gfp_t flags = cc_gfp_flags(&req->base); dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash", update ? "finup" : "final", nbytes); if (cc_map_req(dev, state, ctx)) { dev_err(dev, "map_ahash_source() failed\n"); return -EINVAL; } if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update, flags)) { dev_err(dev, "map_ahash_request_final() failed\n"); cc_unmap_req(dev, state, ctx); return -ENOMEM; } if (cc_map_result(dev, state, digestsize)) { dev_err(dev, "map_ahash_digest() failed\n"); cc_unmap_hash_request(dev, state, src, true); cc_unmap_req(dev, state, ctx); return -ENOMEM; } /* Setup request structure */ cc_req.user_cb = cc_hash_complete; cc_req.user_arg = req; idx = cc_restore_hash(desc, ctx, state, idx); /* Pad the hash */ hw_desc_init(&desc[idx]); set_cipher_do(&desc[idx], DO_PAD); set_hash_cipher_mode(&desc[idx], ctx->hw_mode, ctx->hash_mode); set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr, ctx->hash_len, NS_BIT, 0); set_setup_mode(&desc[idx], SETUP_WRITE_STATE1); set_flow_mode(&desc[idx], S_HASH_to_DOUT); idx++; if (is_hmac) idx = cc_fin_hmac(desc, req, idx); idx = cc_fin_result(desc, req, idx); rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base); if (rc != -EINPROGRESS && rc != -EBUSY) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); cc_unmap_hash_request(dev, state, src, true); cc_unmap_result(dev, state, digestsize, result); cc_unmap_req(dev, state, ctx); } return rc; } static int cc_hash_finup(struct ahash_request *req) { return cc_do_finup(req, true); } static int cc_hash_final(struct ahash_request *req) { return cc_do_finup(req, false); } static int cc_hash_init(struct ahash_request *req) { struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "===== init (%d) ====\n", req->nbytes); cc_init_req(dev, state, ctx); return 0; } static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST }; struct cc_crypto_req cc_req = {}; struct cc_hash_ctx *ctx = NULL; int blocksize = 0; int digestsize = 0; int i, idx = 0, rc = 0; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; u32 larval_addr; struct device *dev; ctx = crypto_ahash_ctx_dma(ahash); dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "start keylen: %d", keylen); blocksize = crypto_tfm_alg_blocksize(&ahash->base); digestsize = crypto_ahash_digestsize(ahash); larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode); /* The keylen value distinguishes HASH in case keylen is ZERO bytes, * any NON-ZERO value utilizes HMAC flow */ ctx->key_params.keylen = keylen; ctx->key_params.key_dma_addr = 0; ctx->is_hmac = true; ctx->key_params.key = NULL; if (keylen) { ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL); if (!ctx->key_params.key) return -ENOMEM; ctx->key_params.key_dma_addr = dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE); if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", ctx->key_params.key, keylen); kfree_sensitive(ctx->key_params.key); return -ENOMEM; } dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n", &ctx->key_params.key_dma_addr, ctx->key_params.keylen); if (keylen > blocksize) { /* Load hash initial state */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); idx++; /* Load the hash current length*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_din_const(&desc[idx], 0, ctx->hash_len); set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr, keylen, NS_BIT); set_flow_mode(&desc[idx], DIN_HASH); idx++; /* Get hashed key */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr, digestsize, NS_BIT, 0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED); cc_set_endianity(ctx->hash_mode, &desc[idx]); idx++; hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0, (blocksize - digestsize)); set_flow_mode(&desc[idx], BYPASS); set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr + digestsize), (blocksize - digestsize), NS_BIT, 0); idx++; } else { hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr, keylen, NS_BIT); set_flow_mode(&desc[idx], BYPASS); set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr, keylen, NS_BIT, 0); idx++; if ((blocksize - keylen)) { hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0, (blocksize - keylen)); set_flow_mode(&desc[idx], BYPASS); set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr + keylen), (blocksize - keylen), NS_BIT, 0); idx++; } } } else { hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0, blocksize); set_flow_mode(&desc[idx], BYPASS); set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr), blocksize, NS_BIT, 0); idx++; } rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx); if (rc) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); goto out; } /* calc derived HMAC key */ for (idx = 0, i = 0; i < 2; i++) { /* Load hash initial state */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); idx++; /* Load the hash current length*/ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_din_const(&desc[idx], 0, ctx->hash_len); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; /* Prepare ipad key */ hw_desc_init(&desc[idx]); set_xor_val(&desc[idx], hmac_pad_const[i]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_flow_mode(&desc[idx], S_DIN_to_HASH); set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); idx++; /* Perform HASH update */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr, blocksize, NS_BIT); set_cipher_mode(&desc[idx], ctx->hw_mode); set_xor_active(&desc[idx]); set_flow_mode(&desc[idx], DIN_HASH); idx++; /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest * of the first HASH "update" state) */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); if (i > 0) /* Not first iteration */ set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr, ctx->inter_digestsize, NS_BIT, 0); else /* First iteration */ set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 0); set_flow_mode(&desc[idx], S_HASH_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); idx++; } rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx); out: if (ctx->key_params.key_dma_addr) { dma_unmap_single(dev, ctx->key_params.key_dma_addr, ctx->key_params.keylen, DMA_TO_DEVICE); dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", &ctx->key_params.key_dma_addr, ctx->key_params.keylen); } kfree_sensitive(ctx->key_params.key); return rc; } static int cc_xcbc_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { struct cc_crypto_req cc_req = {}; struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct device *dev = drvdata_to_dev(ctx->drvdata); int rc = 0; unsigned int idx = 0; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; dev_dbg(dev, "===== setkey (%d) ====\n", keylen); switch (keylen) { case AES_KEYSIZE_128: case AES_KEYSIZE_192: case AES_KEYSIZE_256: break; default: return -EINVAL; } ctx->key_params.keylen = keylen; ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL); if (!ctx->key_params.key) return -ENOMEM; ctx->key_params.key_dma_addr = dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE); if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) { dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n", key, keylen); kfree_sensitive(ctx->key_params.key); return -ENOMEM; } dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n", &ctx->key_params.key_dma_addr, ctx->key_params.keylen); ctx->is_hmac = true; /* 1. Load the AES key */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr, keylen, NS_BIT); set_cipher_mode(&desc[idx], DRV_CIPHER_ECB); set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT); set_key_size_aes(&desc[idx], keylen); set_flow_mode(&desc[idx], S_DIN_to_AES); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], DIN_AES_DOUT); set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET), CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0); idx++; hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], DIN_AES_DOUT); set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET), CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0); idx++; hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], DIN_AES_DOUT); set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET), CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0); idx++; rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx); dma_unmap_single(dev, ctx->key_params.key_dma_addr, ctx->key_params.keylen, DMA_TO_DEVICE); dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n", &ctx->key_params.key_dma_addr, ctx->key_params.keylen); kfree_sensitive(ctx->key_params.key); return rc; } static int cc_cmac_setkey(struct crypto_ahash *ahash, const u8 *key, unsigned int keylen) { struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct device *dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "===== setkey (%d) ====\n", keylen); ctx->is_hmac = true; switch (keylen) { case AES_KEYSIZE_128: case AES_KEYSIZE_192: case AES_KEYSIZE_256: break; default: return -EINVAL; } ctx->key_params.keylen = keylen; /* STAT_PHASE_1: Copy key to ctx */ dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr, keylen, DMA_TO_DEVICE); memcpy(ctx->opad_tmp_keys_buff, key, keylen); if (keylen == 24) { memset(ctx->opad_tmp_keys_buff + 24, 0, CC_AES_KEY_SIZE_MAX - 24); } dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr, keylen, DMA_TO_DEVICE); ctx->key_params.keylen = keylen; return 0; } static void cc_free_ctx(struct cc_hash_ctx *ctx) { struct device *dev = drvdata_to_dev(ctx->drvdata); if (ctx->digest_buff_dma_addr) { dma_unmap_single(dev, ctx->digest_buff_dma_addr, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL); dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n", &ctx->digest_buff_dma_addr); ctx->digest_buff_dma_addr = 0; } if (ctx->opad_tmp_keys_dma_addr) { dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL); dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n", &ctx->opad_tmp_keys_dma_addr); ctx->opad_tmp_keys_dma_addr = 0; } ctx->key_params.keylen = 0; } static int cc_alloc_ctx(struct cc_hash_ctx *ctx) { struct device *dev = drvdata_to_dev(ctx->drvdata); ctx->key_params.keylen = 0; ctx->digest_buff_dma_addr = dma_map_single(dev, ctx->digest_buff, sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) { dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n", sizeof(ctx->digest_buff), ctx->digest_buff); goto fail; } dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n", sizeof(ctx->digest_buff), ctx->digest_buff, &ctx->digest_buff_dma_addr); ctx->opad_tmp_keys_dma_addr = dma_map_single(dev, ctx->opad_tmp_keys_buff, sizeof(ctx->opad_tmp_keys_buff), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) { dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n", sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff); goto fail; } dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n", sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff, &ctx->opad_tmp_keys_dma_addr); ctx->is_hmac = false; return 0; fail: cc_free_ctx(ctx); return -ENOMEM; } static int cc_get_hash_len(struct crypto_tfm *tfm) { struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); if (ctx->hash_mode == DRV_HASH_SM3) return CC_SM3_HASH_LEN_SIZE; else return cc_get_default_hash_len(ctx->drvdata); } static int cc_cra_init(struct crypto_tfm *tfm) { struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); struct hash_alg_common *hash_alg_common = container_of(tfm->__crt_alg, struct hash_alg_common, base); struct ahash_alg *ahash_alg = container_of(hash_alg_common, struct ahash_alg, halg); struct cc_hash_alg *cc_alg = container_of(ahash_alg, struct cc_hash_alg, ahash_alg); crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm), sizeof(struct ahash_req_ctx)); ctx->hash_mode = cc_alg->hash_mode; ctx->hw_mode = cc_alg->hw_mode; ctx->inter_digestsize = cc_alg->inter_digestsize; ctx->drvdata = cc_alg->drvdata; ctx->hash_len = cc_get_hash_len(tfm); return cc_alloc_ctx(ctx); } static void cc_cra_exit(struct crypto_tfm *tfm) { struct cc_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); dev_dbg(dev, "cc_cra_exit"); cc_free_ctx(ctx); } static int cc_mac_update(struct ahash_request *req) { struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base); struct cc_crypto_req cc_req = {}; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; int rc; u32 idx = 0; gfp_t flags = cc_gfp_flags(&req->base); if (req->nbytes == 0) { /* no real updates required */ return 0; } state->xcbc_count++; rc = cc_map_hash_request_update(ctx->drvdata, state, req->src, req->nbytes, block_size, flags); if (rc) { if (rc == 1) { dev_dbg(dev, " data size not require HW update %x\n", req->nbytes); /* No hardware updates are required */ return 0; } dev_err(dev, "map_ahash_request_update() failed\n"); return -ENOMEM; } if (cc_map_req(dev, state, ctx)) { dev_err(dev, "map_ahash_source() failed\n"); return -EINVAL; } if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) cc_setup_xcbc(req, desc, &idx); else cc_setup_cmac(req, desc, &idx); cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx); /* store the hash digest result in context */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, ctx->inter_digestsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); set_flow_mode(&desc[idx], S_AES_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); idx++; /* Setup request structure */ cc_req.user_cb = cc_update_complete; cc_req.user_arg = req; rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base); if (rc != -EINPROGRESS && rc != -EBUSY) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); cc_unmap_hash_request(dev, state, req->src, true); cc_unmap_req(dev, state, ctx); } return rc; } static int cc_mac_final(struct ahash_request *req) { struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); struct cc_crypto_req cc_req = {}; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; int idx = 0; int rc = 0; u32 key_size, key_len; u32 digestsize = crypto_ahash_digestsize(tfm); gfp_t flags = cc_gfp_flags(&req->base); u32 rem_cnt = *cc_hash_buf_cnt(state); if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { key_size = CC_AES_128_BIT_KEY_SIZE; key_len = CC_AES_128_BIT_KEY_SIZE; } else { key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen; key_len = ctx->key_params.keylen; } dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt); if (cc_map_req(dev, state, ctx)) { dev_err(dev, "map_ahash_source() failed\n"); return -EINVAL; } if (cc_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 0, flags)) { dev_err(dev, "map_ahash_request_final() failed\n"); cc_unmap_req(dev, state, ctx); return -ENOMEM; } if (cc_map_result(dev, state, digestsize)) { dev_err(dev, "map_ahash_digest() failed\n"); cc_unmap_hash_request(dev, state, req->src, true); cc_unmap_req(dev, state, ctx); return -ENOMEM; } /* Setup request structure */ cc_req.user_cb = cc_hash_complete; cc_req.user_arg = req; if (state->xcbc_count && rem_cnt == 0) { /* Load key for ECB decryption */ hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], DRV_CIPHER_ECB); set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT); set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET), key_size, NS_BIT); set_key_size_aes(&desc[idx], key_len); set_flow_mode(&desc[idx], S_DIN_to_AES); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); idx++; /* Initiate decryption of block state to previous * block_state-XOR-M[n] */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT); set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT, 0); set_flow_mode(&desc[idx], DIN_AES_DOUT); idx++; /* Memory Barrier: wait for axi write to complete */ hw_desc_init(&desc[idx]); set_din_no_dma(&desc[idx], 0, 0xfffff0); set_dout_no_dma(&desc[idx], 0, 0, 1); idx++; } if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) cc_setup_xcbc(req, desc, &idx); else cc_setup_cmac(req, desc, &idx); if (state->xcbc_count == 0) { hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_key_size_aes(&desc[idx], key_len); set_cmac_size0_mode(&desc[idx]); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; } else if (rem_cnt > 0) { cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx); } else { hw_desc_init(&desc[idx]); set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE); set_flow_mode(&desc[idx], DIN_AES_DOUT); idx++; } /* Get final MAC result */ hw_desc_init(&desc[idx]); set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); set_flow_mode(&desc[idx], S_AES_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_cipher_mode(&desc[idx], ctx->hw_mode); idx++; rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base); if (rc != -EINPROGRESS && rc != -EBUSY) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); cc_unmap_hash_request(dev, state, req->src, true); cc_unmap_result(dev, state, digestsize, req->result); cc_unmap_req(dev, state, ctx); } return rc; } static int cc_mac_finup(struct ahash_request *req) { struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); struct cc_crypto_req cc_req = {}; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; int idx = 0; int rc = 0; u32 key_len = 0; u32 digestsize = crypto_ahash_digestsize(tfm); gfp_t flags = cc_gfp_flags(&req->base); dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes); if (state->xcbc_count > 0 && req->nbytes == 0) { dev_dbg(dev, "No data to update. Call to fdx_mac_final\n"); return cc_mac_final(req); } if (cc_map_req(dev, state, ctx)) { dev_err(dev, "map_ahash_source() failed\n"); return -EINVAL; } if (cc_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1, flags)) { dev_err(dev, "map_ahash_request_final() failed\n"); cc_unmap_req(dev, state, ctx); return -ENOMEM; } if (cc_map_result(dev, state, digestsize)) { dev_err(dev, "map_ahash_digest() failed\n"); cc_unmap_hash_request(dev, state, req->src, true); cc_unmap_req(dev, state, ctx); return -ENOMEM; } /* Setup request structure */ cc_req.user_cb = cc_hash_complete; cc_req.user_arg = req; if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { key_len = CC_AES_128_BIT_KEY_SIZE; cc_setup_xcbc(req, desc, &idx); } else { key_len = ctx->key_params.keylen; cc_setup_cmac(req, desc, &idx); } if (req->nbytes == 0) { hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_key_size_aes(&desc[idx], key_len); set_cmac_size0_mode(&desc[idx]); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; } else { cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx); } /* Get final MAC result */ hw_desc_init(&desc[idx]); set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); set_flow_mode(&desc[idx], S_AES_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_cipher_mode(&desc[idx], ctx->hw_mode); idx++; rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base); if (rc != -EINPROGRESS && rc != -EBUSY) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); cc_unmap_hash_request(dev, state, req->src, true); cc_unmap_result(dev, state, digestsize, req->result); cc_unmap_req(dev, state, ctx); } return rc; } static int cc_mac_digest(struct ahash_request *req) { struct ahash_req_ctx *state = ahash_request_ctx_dma(req); struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); struct device *dev = drvdata_to_dev(ctx->drvdata); u32 digestsize = crypto_ahash_digestsize(tfm); struct cc_crypto_req cc_req = {}; struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN]; u32 key_len; unsigned int idx = 0; int rc; gfp_t flags = cc_gfp_flags(&req->base); dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes); cc_init_req(dev, state, ctx); if (cc_map_req(dev, state, ctx)) { dev_err(dev, "map_ahash_source() failed\n"); return -ENOMEM; } if (cc_map_result(dev, state, digestsize)) { dev_err(dev, "map_ahash_digest() failed\n"); cc_unmap_req(dev, state, ctx); return -ENOMEM; } if (cc_map_hash_request_final(ctx->drvdata, state, req->src, req->nbytes, 1, flags)) { dev_err(dev, "map_ahash_request_final() failed\n"); cc_unmap_req(dev, state, ctx); return -ENOMEM; } /* Setup request structure */ cc_req.user_cb = cc_digest_complete; cc_req.user_arg = req; if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) { key_len = CC_AES_128_BIT_KEY_SIZE; cc_setup_xcbc(req, desc, &idx); } else { key_len = ctx->key_params.keylen; cc_setup_cmac(req, desc, &idx); } if (req->nbytes == 0) { hw_desc_init(&desc[idx]); set_cipher_mode(&desc[idx], ctx->hw_mode); set_key_size_aes(&desc[idx], key_len); set_cmac_size0_mode(&desc[idx]); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; } else { cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx); } /* Get final MAC result */ hw_desc_init(&desc[idx]); set_dout_dlli(&desc[idx], state->digest_result_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT, 1); set_queue_last_ind(ctx->drvdata, &desc[idx]); set_flow_mode(&desc[idx], S_AES_to_DOUT); set_setup_mode(&desc[idx], SETUP_WRITE_STATE0); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_cipher_mode(&desc[idx], ctx->hw_mode); idx++; rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base); if (rc != -EINPROGRESS && rc != -EBUSY) { dev_err(dev, "send_request() failed (rc=%d)\n", rc); cc_unmap_hash_request(dev, state, req->src, true); cc_unmap_result(dev, state, digestsize, req->result); cc_unmap_req(dev, state, ctx); } return rc; } static int cc_hash_export(struct ahash_request *req, void *out) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct ahash_req_ctx *state = ahash_request_ctx_dma(req); u8 *curr_buff = cc_hash_buf(state); u32 curr_buff_cnt = *cc_hash_buf_cnt(state); const u32 tmp = CC_EXPORT_MAGIC; memcpy(out, &tmp, sizeof(u32)); out += sizeof(u32); memcpy(out, state->digest_buff, ctx->inter_digestsize); out += ctx->inter_digestsize; memcpy(out, state->digest_bytes_len, ctx->hash_len); out += ctx->hash_len; memcpy(out, &curr_buff_cnt, sizeof(u32)); out += sizeof(u32); memcpy(out, curr_buff, curr_buff_cnt); return 0; } static int cc_hash_import(struct ahash_request *req, const void *in) { struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash); struct device *dev = drvdata_to_dev(ctx->drvdata); struct ahash_req_ctx *state = ahash_request_ctx_dma(req); u32 tmp; memcpy(&tmp, in, sizeof(u32)); if (tmp != CC_EXPORT_MAGIC) return -EINVAL; in += sizeof(u32); cc_init_req(dev, state, ctx); memcpy(state->digest_buff, in, ctx->inter_digestsize); in += ctx->inter_digestsize; memcpy(state->digest_bytes_len, in, ctx->hash_len); in += ctx->hash_len; /* Sanity check the data as much as possible */ memcpy(&tmp, in, sizeof(u32)); if (tmp > CC_MAX_HASH_BLCK_SIZE) return -EINVAL; in += sizeof(u32); state->buf_cnt[0] = tmp; memcpy(state->buffers[0], in, tmp); return 0; } struct cc_hash_template { char name[CRYPTO_MAX_ALG_NAME]; char driver_name[CRYPTO_MAX_ALG_NAME]; char mac_name[CRYPTO_MAX_ALG_NAME]; char mac_driver_name[CRYPTO_MAX_ALG_NAME]; unsigned int blocksize; bool is_mac; bool synchronize; struct ahash_alg template_ahash; int hash_mode; int hw_mode; int inter_digestsize; struct cc_drvdata *drvdata; u32 min_hw_rev; enum cc_std_body std_body; }; #define CC_STATE_SIZE(_x) \ ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32))) /* hash descriptors */ static struct cc_hash_template driver_hash[] = { //Asynchronize hash template { .name = "sha1", .driver_name = "sha1-ccree", .mac_name = "hmac(sha1)", .mac_driver_name = "hmac-sha1-ccree", .blocksize = SHA1_BLOCK_SIZE, .is_mac = true, .synchronize = false, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, .final = cc_hash_final, .finup = cc_hash_finup, .digest = cc_hash_digest, .export = cc_hash_export, .import = cc_hash_import, .setkey = cc_hash_setkey, .halg = { .digestsize = SHA1_DIGEST_SIZE, .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE), }, }, .hash_mode = DRV_HASH_SHA1, .hw_mode = DRV_HASH_HW_SHA1, .inter_digestsize = SHA1_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "sha256", .driver_name = "sha256-ccree", .mac_name = "hmac(sha256)", .mac_driver_name = "hmac-sha256-ccree", .blocksize = SHA256_BLOCK_SIZE, .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, .final = cc_hash_final, .finup = cc_hash_finup, .digest = cc_hash_digest, .export = cc_hash_export, .import = cc_hash_import, .setkey = cc_hash_setkey, .halg = { .digestsize = SHA256_DIGEST_SIZE, .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE) }, }, .hash_mode = DRV_HASH_SHA256, .hw_mode = DRV_HASH_HW_SHA256, .inter_digestsize = SHA256_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "sha224", .driver_name = "sha224-ccree", .mac_name = "hmac(sha224)", .mac_driver_name = "hmac-sha224-ccree", .blocksize = SHA224_BLOCK_SIZE, .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, .final = cc_hash_final, .finup = cc_hash_finup, .digest = cc_hash_digest, .export = cc_hash_export, .import = cc_hash_import, .setkey = cc_hash_setkey, .halg = { .digestsize = SHA224_DIGEST_SIZE, .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE), }, }, .hash_mode = DRV_HASH_SHA224, .hw_mode = DRV_HASH_HW_SHA256, .inter_digestsize = SHA256_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "sha384", .driver_name = "sha384-ccree", .mac_name = "hmac(sha384)", .mac_driver_name = "hmac-sha384-ccree", .blocksize = SHA384_BLOCK_SIZE, .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, .final = cc_hash_final, .finup = cc_hash_finup, .digest = cc_hash_digest, .export = cc_hash_export, .import = cc_hash_import, .setkey = cc_hash_setkey, .halg = { .digestsize = SHA384_DIGEST_SIZE, .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE), }, }, .hash_mode = DRV_HASH_SHA384, .hw_mode = DRV_HASH_HW_SHA512, .inter_digestsize = SHA512_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_712, .std_body = CC_STD_NIST, }, { .name = "sha512", .driver_name = "sha512-ccree", .mac_name = "hmac(sha512)", .mac_driver_name = "hmac-sha512-ccree", .blocksize = SHA512_BLOCK_SIZE, .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, .final = cc_hash_final, .finup = cc_hash_finup, .digest = cc_hash_digest, .export = cc_hash_export, .import = cc_hash_import, .setkey = cc_hash_setkey, .halg = { .digestsize = SHA512_DIGEST_SIZE, .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE), }, }, .hash_mode = DRV_HASH_SHA512, .hw_mode = DRV_HASH_HW_SHA512, .inter_digestsize = SHA512_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_712, .std_body = CC_STD_NIST, }, { .name = "md5", .driver_name = "md5-ccree", .mac_name = "hmac(md5)", .mac_driver_name = "hmac-md5-ccree", .blocksize = MD5_HMAC_BLOCK_SIZE, .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, .final = cc_hash_final, .finup = cc_hash_finup, .digest = cc_hash_digest, .export = cc_hash_export, .import = cc_hash_import, .setkey = cc_hash_setkey, .halg = { .digestsize = MD5_DIGEST_SIZE, .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE), }, }, .hash_mode = DRV_HASH_MD5, .hw_mode = DRV_HASH_HW_MD5, .inter_digestsize = MD5_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "sm3", .driver_name = "sm3-ccree", .blocksize = SM3_BLOCK_SIZE, .is_mac = false, .template_ahash = { .init = cc_hash_init, .update = cc_hash_update, .final = cc_hash_final, .finup = cc_hash_finup, .digest = cc_hash_digest, .export = cc_hash_export, .import = cc_hash_import, .setkey = cc_hash_setkey, .halg = { .digestsize = SM3_DIGEST_SIZE, .statesize = CC_STATE_SIZE(SM3_DIGEST_SIZE), }, }, .hash_mode = DRV_HASH_SM3, .hw_mode = DRV_HASH_HW_SM3, .inter_digestsize = SM3_DIGEST_SIZE, .min_hw_rev = CC_HW_REV_713, .std_body = CC_STD_OSCCA, }, { .mac_name = "xcbc(aes)", .mac_driver_name = "xcbc-aes-ccree", .blocksize = AES_BLOCK_SIZE, .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_mac_update, .final = cc_mac_final, .finup = cc_mac_finup, .digest = cc_mac_digest, .setkey = cc_xcbc_setkey, .export = cc_hash_export, .import = cc_hash_import, .halg = { .digestsize = AES_BLOCK_SIZE, .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE), }, }, .hash_mode = DRV_HASH_NULL, .hw_mode = DRV_CIPHER_XCBC_MAC, .inter_digestsize = AES_BLOCK_SIZE, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .mac_name = "cmac(aes)", .mac_driver_name = "cmac-aes-ccree", .blocksize = AES_BLOCK_SIZE, .is_mac = true, .template_ahash = { .init = cc_hash_init, .update = cc_mac_update, .final = cc_mac_final, .finup = cc_mac_finup, .digest = cc_mac_digest, .setkey = cc_cmac_setkey, .export = cc_hash_export, .import = cc_hash_import, .halg = { .digestsize = AES_BLOCK_SIZE, .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE), }, }, .hash_mode = DRV_HASH_NULL, .hw_mode = DRV_CIPHER_CMAC, .inter_digestsize = AES_BLOCK_SIZE, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, }; static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template, struct device *dev, bool keyed) { struct cc_hash_alg *t_crypto_alg; struct crypto_alg *alg; struct ahash_alg *halg; t_crypto_alg = devm_kzalloc(dev, sizeof(*t_crypto_alg), GFP_KERNEL); if (!t_crypto_alg) return ERR_PTR(-ENOMEM); t_crypto_alg->ahash_alg = template->template_ahash; halg = &t_crypto_alg->ahash_alg; alg = &halg->halg.base; if (keyed) { snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->mac_name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->mac_driver_name); } else { halg->setkey = NULL; snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", template->name); snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", template->driver_name); } alg->cra_module = THIS_MODULE; alg->cra_ctxsize = sizeof(struct cc_hash_ctx) + crypto_dma_padding(); alg->cra_priority = CC_CRA_PRIO; alg->cra_blocksize = template->blocksize; alg->cra_alignmask = 0; alg->cra_exit = cc_cra_exit; alg->cra_init = cc_cra_init; alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; t_crypto_alg->hash_mode = template->hash_mode; t_crypto_alg->hw_mode = template->hw_mode; t_crypto_alg->inter_digestsize = template->inter_digestsize; return t_crypto_alg; } static int cc_init_copy_sram(struct cc_drvdata *drvdata, const u32 *data, unsigned int size, u32 *sram_buff_ofs) { struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)]; unsigned int larval_seq_len = 0; int rc; cc_set_sram_desc(data, *sram_buff_ofs, size / sizeof(*data), larval_seq, &larval_seq_len); rc = send_request_init(drvdata, larval_seq, larval_seq_len); if (rc) return rc; *sram_buff_ofs += size; return 0; } int cc_init_hash_sram(struct cc_drvdata *drvdata) { struct cc_hash_handle *hash_handle = drvdata->hash_handle; u32 sram_buff_ofs = hash_handle->digest_len_sram_addr; bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712); bool sm3_supported = (drvdata->hw_rev >= CC_HW_REV_713); int rc = 0; /* Copy-to-sram digest-len */ rc = cc_init_copy_sram(drvdata, cc_digest_len_init, sizeof(cc_digest_len_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; if (large_sha_supported) { /* Copy-to-sram digest-len for sha384/512 */ rc = cc_init_copy_sram(drvdata, cc_digest_len_sha512_init, sizeof(cc_digest_len_sha512_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; } /* The initial digests offset */ hash_handle->larval_digest_sram_addr = sram_buff_ofs; /* Copy-to-sram initial SHA* digests */ rc = cc_init_copy_sram(drvdata, cc_md5_init, sizeof(cc_md5_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; rc = cc_init_copy_sram(drvdata, cc_sha1_init, sizeof(cc_sha1_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; rc = cc_init_copy_sram(drvdata, cc_sha224_init, sizeof(cc_sha224_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; rc = cc_init_copy_sram(drvdata, cc_sha256_init, sizeof(cc_sha256_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; if (sm3_supported) { rc = cc_init_copy_sram(drvdata, cc_sm3_init, sizeof(cc_sm3_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; } if (large_sha_supported) { rc = cc_init_copy_sram(drvdata, cc_sha384_init, sizeof(cc_sha384_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; rc = cc_init_copy_sram(drvdata, cc_sha512_init, sizeof(cc_sha512_init), &sram_buff_ofs); if (rc) goto init_digest_const_err; } init_digest_const_err: return rc; } int cc_hash_alloc(struct cc_drvdata *drvdata) { struct cc_hash_handle *hash_handle; u32 sram_buff; u32 sram_size_to_alloc; struct device *dev = drvdata_to_dev(drvdata); int rc = 0; int alg; hash_handle = devm_kzalloc(dev, sizeof(*hash_handle), GFP_KERNEL); if (!hash_handle) return -ENOMEM; INIT_LIST_HEAD(&hash_handle->hash_list); drvdata->hash_handle = hash_handle; sram_size_to_alloc = sizeof(cc_digest_len_init) + sizeof(cc_md5_init) + sizeof(cc_sha1_init) + sizeof(cc_sha224_init) + sizeof(cc_sha256_init); if (drvdata->hw_rev >= CC_HW_REV_713) sram_size_to_alloc += sizeof(cc_sm3_init); if (drvdata->hw_rev >= CC_HW_REV_712) sram_size_to_alloc += sizeof(cc_digest_len_sha512_init) + sizeof(cc_sha384_init) + sizeof(cc_sha512_init); sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc); if (sram_buff == NULL_SRAM_ADDR) { rc = -ENOMEM; goto fail; } /* The initial digest-len offset */ hash_handle->digest_len_sram_addr = sram_buff; /*must be set before the alg registration as it is being used there*/ rc = cc_init_hash_sram(drvdata); if (rc) { dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc); goto fail; } /* ahash registration */ for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) { struct cc_hash_alg *t_alg; int hw_mode = driver_hash[alg].hw_mode; /* Check that the HW revision and variants are suitable */ if ((driver_hash[alg].min_hw_rev > drvdata->hw_rev) || !(drvdata->std_bodies & driver_hash[alg].std_body)) continue; if (driver_hash[alg].is_mac) { /* register hmac version */ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true); if (IS_ERR(t_alg)) { rc = PTR_ERR(t_alg); dev_err(dev, "%s alg allocation failed\n", driver_hash[alg].driver_name); goto fail; } t_alg->drvdata = drvdata; rc = crypto_register_ahash(&t_alg->ahash_alg); if (rc) { dev_err(dev, "%s alg registration failed\n", driver_hash[alg].driver_name); goto fail; } list_add_tail(&t_alg->entry, &hash_handle->hash_list); } if (hw_mode == DRV_CIPHER_XCBC_MAC || hw_mode == DRV_CIPHER_CMAC) continue; /* register hash version */ t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false); if (IS_ERR(t_alg)) { rc = PTR_ERR(t_alg); dev_err(dev, "%s alg allocation failed\n", driver_hash[alg].driver_name); goto fail; } t_alg->drvdata = drvdata; rc = crypto_register_ahash(&t_alg->ahash_alg); if (rc) { dev_err(dev, "%s alg registration failed\n", driver_hash[alg].driver_name); goto fail; } list_add_tail(&t_alg->entry, &hash_handle->hash_list); } return 0; fail: cc_hash_free(drvdata); return rc; } int cc_hash_free(struct cc_drvdata *drvdata) { struct cc_hash_alg *t_hash_alg, *hash_n; struct cc_hash_handle *hash_handle = drvdata->hash_handle; list_for_each_entry_safe(t_hash_alg, hash_n, &hash_handle->hash_list, entry) { crypto_unregister_ahash(&t_hash_alg->ahash_alg); list_del(&t_hash_alg->entry); } return 0; } static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[], unsigned int *seq_size) { unsigned int idx = *seq_size; struct ahash_req_ctx *state = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); /* Setup XCBC MAC K1 */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET), CC_AES_128_BIT_KEY_SIZE, NS_BIT); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); set_hash_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC, ctx->hash_mode); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; /* Setup XCBC MAC K2 */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET), CC_AES_128_BIT_KEY_SIZE, NS_BIT); set_setup_mode(&desc[idx], SETUP_LOAD_STATE1); set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; /* Setup XCBC MAC K3 */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET), CC_AES_128_BIT_KEY_SIZE, NS_BIT); set_setup_mode(&desc[idx], SETUP_LOAD_STATE2); set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; /* Loading MAC state */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; *seq_size = idx; } static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[], unsigned int *seq_size) { unsigned int idx = *seq_size; struct ahash_req_ctx *state = ahash_request_ctx_dma(areq); struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); struct cc_hash_ctx *ctx = crypto_ahash_ctx_dma(tfm); /* Setup CMAC Key */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr, ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE : ctx->key_params.keylen), NS_BIT); set_setup_mode(&desc[idx], SETUP_LOAD_KEY0); set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_key_size_aes(&desc[idx], ctx->key_params.keylen); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; /* Load MAC state */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT); set_setup_mode(&desc[idx], SETUP_LOAD_STATE0); set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC); set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT); set_key_size_aes(&desc[idx], ctx->key_params.keylen); set_flow_mode(&desc[idx], S_DIN_to_AES); idx++; *seq_size = idx; } static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx, unsigned int flow_mode, struct cc_hw_desc desc[], bool is_not_last_data, unsigned int *seq_size) { unsigned int idx = *seq_size; struct device *dev = drvdata_to_dev(ctx->drvdata); if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) { hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, sg_dma_address(areq_ctx->curr_sg), areq_ctx->curr_sg->length, NS_BIT); set_flow_mode(&desc[idx], flow_mode); idx++; } else { if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { dev_dbg(dev, " NULL mode\n"); /* nothing to build */ return; } /* bypass */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_DLLI, areq_ctx->mlli_params.mlli_dma_addr, areq_ctx->mlli_params.mlli_len, NS_BIT); set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr, areq_ctx->mlli_params.mlli_len); set_flow_mode(&desc[idx], BYPASS); idx++; /* process */ hw_desc_init(&desc[idx]); set_din_type(&desc[idx], DMA_MLLI, ctx->drvdata->mlli_sram_addr, areq_ctx->mlli_nents, NS_BIT); set_flow_mode(&desc[idx], flow_mode); idx++; } if (is_not_last_data) set_din_not_last_indication(&desc[(idx - 1)]); /* return updated desc sequence size */ *seq_size = idx; } static const void *cc_larval_digest(struct device *dev, u32 mode) { switch (mode) { case DRV_HASH_MD5: return cc_md5_init; case DRV_HASH_SHA1: return cc_sha1_init; case DRV_HASH_SHA224: return cc_sha224_init; case DRV_HASH_SHA256: return cc_sha256_init; case DRV_HASH_SHA384: return cc_sha384_init; case DRV_HASH_SHA512: return cc_sha512_init; case DRV_HASH_SM3: return cc_sm3_init; default: dev_err(dev, "Invalid hash mode (%d)\n", mode); return cc_md5_init; } } /** * cc_larval_digest_addr() - Get the address of the initial digest in SRAM * according to the given hash mode * * @drvdata: Associated device driver context * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256 * * Return: * The address of the initial digest in SRAM */ u32 cc_larval_digest_addr(void *drvdata, u32 mode) { struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata; struct cc_hash_handle *hash_handle = _drvdata->hash_handle; struct device *dev = drvdata_to_dev(_drvdata); bool sm3_supported = (_drvdata->hw_rev >= CC_HW_REV_713); u32 addr; switch (mode) { case DRV_HASH_NULL: break; /*Ignore*/ case DRV_HASH_MD5: return (hash_handle->larval_digest_sram_addr); case DRV_HASH_SHA1: return (hash_handle->larval_digest_sram_addr + sizeof(cc_md5_init)); case DRV_HASH_SHA224: return (hash_handle->larval_digest_sram_addr + sizeof(cc_md5_init) + sizeof(cc_sha1_init)); case DRV_HASH_SHA256: return (hash_handle->larval_digest_sram_addr + sizeof(cc_md5_init) + sizeof(cc_sha1_init) + sizeof(cc_sha224_init)); case DRV_HASH_SM3: return (hash_handle->larval_digest_sram_addr + sizeof(cc_md5_init) + sizeof(cc_sha1_init) + sizeof(cc_sha224_init) + sizeof(cc_sha256_init)); case DRV_HASH_SHA384: addr = (hash_handle->larval_digest_sram_addr + sizeof(cc_md5_init) + sizeof(cc_sha1_init) + sizeof(cc_sha224_init) + sizeof(cc_sha256_init)); if (sm3_supported) addr += sizeof(cc_sm3_init); return addr; case DRV_HASH_SHA512: addr = (hash_handle->larval_digest_sram_addr + sizeof(cc_md5_init) + sizeof(cc_sha1_init) + sizeof(cc_sha224_init) + sizeof(cc_sha256_init) + sizeof(cc_sha384_init)); if (sm3_supported) addr += sizeof(cc_sm3_init); return addr; default: dev_err(dev, "Invalid hash mode (%d)\n", mode); } /*This is valid wrong value to avoid kernel crash*/ return hash_handle->larval_digest_sram_addr; } u32 cc_digest_len_addr(void *drvdata, u32 mode) { struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata; struct cc_hash_handle *hash_handle = _drvdata->hash_handle; u32 digest_len_addr = hash_handle->digest_len_sram_addr; switch (mode) { case DRV_HASH_SHA1: case DRV_HASH_SHA224: case DRV_HASH_SHA256: case DRV_HASH_MD5: return digest_len_addr; case DRV_HASH_SHA384: case DRV_HASH_SHA512: return digest_len_addr + sizeof(cc_digest_len_init); default: return digest_len_addr; /*to avoid kernel crash*/ } }
linux-master
drivers/crypto/ccree/cc_hash.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ #include <linux/kernel.h> #include <linux/module.h> #include <crypto/algapi.h> #include <crypto/internal/skcipher.h> #include <crypto/internal/des.h> #include <crypto/xts.h> #include <crypto/sm4.h> #include <crypto/scatterwalk.h> #include "cc_driver.h" #include "cc_lli_defs.h" #include "cc_buffer_mgr.h" #include "cc_cipher.h" #include "cc_request_mgr.h" #define MAX_SKCIPHER_SEQ_LEN 6 #define template_skcipher template_u.skcipher struct cc_user_key_info { u8 *key; dma_addr_t key_dma_addr; }; struct cc_hw_key_info { enum cc_hw_crypto_key key1_slot; enum cc_hw_crypto_key key2_slot; }; struct cc_cpp_key_info { u8 slot; enum cc_cpp_alg alg; }; enum cc_key_type { CC_UNPROTECTED_KEY, /* User key */ CC_HW_PROTECTED_KEY, /* HW (FDE) key */ CC_POLICY_PROTECTED_KEY, /* CPP key */ CC_INVALID_PROTECTED_KEY /* Invalid key */ }; struct cc_cipher_ctx { struct cc_drvdata *drvdata; int keylen; int cipher_mode; int flow_mode; unsigned int flags; enum cc_key_type key_type; struct cc_user_key_info user; union { struct cc_hw_key_info hw; struct cc_cpp_key_info cpp; }; struct crypto_shash *shash_tfm; struct crypto_skcipher *fallback_tfm; bool fallback_on; }; static void cc_cipher_complete(struct device *dev, void *cc_req, int err); static inline enum cc_key_type cc_key_type(struct crypto_tfm *tfm) { struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); return ctx_p->key_type; } static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size) { switch (ctx_p->flow_mode) { case S_DIN_to_AES: switch (size) { case CC_AES_128_BIT_KEY_SIZE: case CC_AES_192_BIT_KEY_SIZE: if (ctx_p->cipher_mode != DRV_CIPHER_XTS) return 0; break; case CC_AES_256_BIT_KEY_SIZE: return 0; case (CC_AES_192_BIT_KEY_SIZE * 2): case (CC_AES_256_BIT_KEY_SIZE * 2): if (ctx_p->cipher_mode == DRV_CIPHER_XTS || ctx_p->cipher_mode == DRV_CIPHER_ESSIV) return 0; break; default: break; } break; case S_DIN_to_DES: if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE) return 0; break; case S_DIN_to_SM4: if (size == SM4_KEY_SIZE) return 0; break; default: break; } return -EINVAL; } static int validate_data_size(struct cc_cipher_ctx *ctx_p, unsigned int size) { switch (ctx_p->flow_mode) { case S_DIN_to_AES: switch (ctx_p->cipher_mode) { case DRV_CIPHER_XTS: case DRV_CIPHER_CBC_CTS: if (size >= AES_BLOCK_SIZE) return 0; break; case DRV_CIPHER_OFB: case DRV_CIPHER_CTR: return 0; case DRV_CIPHER_ECB: case DRV_CIPHER_CBC: case DRV_CIPHER_ESSIV: if (IS_ALIGNED(size, AES_BLOCK_SIZE)) return 0; break; default: break; } break; case S_DIN_to_DES: if (IS_ALIGNED(size, DES_BLOCK_SIZE)) return 0; break; case S_DIN_to_SM4: switch (ctx_p->cipher_mode) { case DRV_CIPHER_CTR: return 0; case DRV_CIPHER_ECB: case DRV_CIPHER_CBC: if (IS_ALIGNED(size, SM4_BLOCK_SIZE)) return 0; break; default: break; } break; default: break; } return -EINVAL; } static int cc_cipher_init(struct crypto_tfm *tfm) { struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct cc_crypto_alg *cc_alg = container_of(tfm->__crt_alg, struct cc_crypto_alg, skcipher_alg.base); struct device *dev = drvdata_to_dev(cc_alg->drvdata); unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; unsigned int fallback_req_size = 0; dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p, crypto_tfm_alg_name(tfm)); ctx_p->cipher_mode = cc_alg->cipher_mode; ctx_p->flow_mode = cc_alg->flow_mode; ctx_p->drvdata = cc_alg->drvdata; if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { const char *name = crypto_tfm_alg_name(tfm); /* Alloc hash tfm for essiv */ ctx_p->shash_tfm = crypto_alloc_shash("sha256", 0, 0); if (IS_ERR(ctx_p->shash_tfm)) { dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); return PTR_ERR(ctx_p->shash_tfm); } max_key_buf_size <<= 1; /* Alloc fallabck tfm or essiv when key size != 256 bit */ ctx_p->fallback_tfm = crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC); if (IS_ERR(ctx_p->fallback_tfm)) { /* Note we're still allowing registration with no fallback since it's * better to have most modes supported than none at all. */ dev_warn(dev, "Error allocating fallback algo %s. Some modes may be available.\n", name); ctx_p->fallback_tfm = NULL; } else { fallback_req_size = crypto_skcipher_reqsize(ctx_p->fallback_tfm); } } crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), sizeof(struct cipher_req_ctx) + fallback_req_size); /* Allocate key buffer, cache line aligned */ ctx_p->user.key = kzalloc(max_key_buf_size, GFP_KERNEL); if (!ctx_p->user.key) goto free_fallback; dev_dbg(dev, "Allocated key buffer in context. key=@%p\n", ctx_p->user.key); /* Map key buffer */ ctx_p->user.key_dma_addr = dma_map_single(dev, ctx_p->user.key, max_key_buf_size, DMA_TO_DEVICE); if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n", max_key_buf_size, ctx_p->user.key); goto free_key; } dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n", max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr); return 0; free_key: kfree(ctx_p->user.key); free_fallback: crypto_free_skcipher(ctx_p->fallback_tfm); crypto_free_shash(ctx_p->shash_tfm); return -ENOMEM; } static void cc_cipher_exit(struct crypto_tfm *tfm) { struct crypto_alg *alg = tfm->__crt_alg; struct cc_crypto_alg *cc_alg = container_of(alg, struct cc_crypto_alg, skcipher_alg.base); unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); dev_dbg(dev, "Clearing context @%p for %s\n", crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm)); if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { /* Free hash tfm for essiv */ crypto_free_shash(ctx_p->shash_tfm); ctx_p->shash_tfm = NULL; crypto_free_skcipher(ctx_p->fallback_tfm); ctx_p->fallback_tfm = NULL; } /* Unmap key buffer */ dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size, DMA_TO_DEVICE); dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n", &ctx_p->user.key_dma_addr); /* Free key buffer in context */ dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key); kfree_sensitive(ctx_p->user.key); } struct tdes_keys { u8 key1[DES_KEY_SIZE]; u8 key2[DES_KEY_SIZE]; u8 key3[DES_KEY_SIZE]; }; static enum cc_hw_crypto_key cc_slot_to_hw_key(u8 slot_num) { switch (slot_num) { case 0: return KFDE0_KEY; case 1: return KFDE1_KEY; case 2: return KFDE2_KEY; case 3: return KFDE3_KEY; } return END_OF_KEYS; } static u8 cc_slot_to_cpp_key(u8 slot_num) { return (slot_num - CC_FIRST_CPP_KEY_SLOT); } static inline enum cc_key_type cc_slot_to_key_type(u8 slot_num) { if (slot_num >= CC_FIRST_HW_KEY_SLOT && slot_num <= CC_LAST_HW_KEY_SLOT) return CC_HW_PROTECTED_KEY; else if (slot_num >= CC_FIRST_CPP_KEY_SLOT && slot_num <= CC_LAST_CPP_KEY_SLOT) return CC_POLICY_PROTECTED_KEY; else return CC_INVALID_PROTECTED_KEY; } static int cc_cipher_sethkey(struct crypto_skcipher *sktfm, const u8 *key, unsigned int keylen) { struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm); struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); struct cc_hkey_info hki; dev_dbg(dev, "Setting HW key in context @%p for %s. keylen=%u\n", ctx_p, crypto_tfm_alg_name(tfm), keylen); dump_byte_array("key", key, keylen); /* STAT_PHASE_0: Init and sanity checks */ /* This check the size of the protected key token */ if (keylen != sizeof(hki)) { dev_err(dev, "Unsupported protected key size %d.\n", keylen); return -EINVAL; } memcpy(&hki, key, keylen); /* The real key len for crypto op is the size of the HW key * referenced by the HW key slot, not the hardware key token */ keylen = hki.keylen; if (validate_keys_sizes(ctx_p, keylen)) { dev_dbg(dev, "Unsupported key size %d.\n", keylen); return -EINVAL; } ctx_p->keylen = keylen; ctx_p->fallback_on = false; switch (cc_slot_to_key_type(hki.hw_key1)) { case CC_HW_PROTECTED_KEY: if (ctx_p->flow_mode == S_DIN_to_SM4) { dev_err(dev, "Only AES HW protected keys are supported\n"); return -EINVAL; } ctx_p->hw.key1_slot = cc_slot_to_hw_key(hki.hw_key1); if (ctx_p->hw.key1_slot == END_OF_KEYS) { dev_err(dev, "Unsupported hw key1 number (%d)\n", hki.hw_key1); return -EINVAL; } if (ctx_p->cipher_mode == DRV_CIPHER_XTS || ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { if (hki.hw_key1 == hki.hw_key2) { dev_err(dev, "Illegal hw key numbers (%d,%d)\n", hki.hw_key1, hki.hw_key2); return -EINVAL; } ctx_p->hw.key2_slot = cc_slot_to_hw_key(hki.hw_key2); if (ctx_p->hw.key2_slot == END_OF_KEYS) { dev_err(dev, "Unsupported hw key2 number (%d)\n", hki.hw_key2); return -EINVAL; } } ctx_p->key_type = CC_HW_PROTECTED_KEY; dev_dbg(dev, "HW protected key %d/%d set\n.", ctx_p->hw.key1_slot, ctx_p->hw.key2_slot); break; case CC_POLICY_PROTECTED_KEY: if (ctx_p->drvdata->hw_rev < CC_HW_REV_713) { dev_err(dev, "CPP keys not supported in this hardware revision.\n"); return -EINVAL; } if (ctx_p->cipher_mode != DRV_CIPHER_CBC && ctx_p->cipher_mode != DRV_CIPHER_CTR) { dev_err(dev, "CPP keys only supported in CBC or CTR modes.\n"); return -EINVAL; } ctx_p->cpp.slot = cc_slot_to_cpp_key(hki.hw_key1); if (ctx_p->flow_mode == S_DIN_to_AES) ctx_p->cpp.alg = CC_CPP_AES; else /* Must be SM4 since due to sethkey registration */ ctx_p->cpp.alg = CC_CPP_SM4; ctx_p->key_type = CC_POLICY_PROTECTED_KEY; dev_dbg(dev, "policy protected key alg: %d slot: %d.\n", ctx_p->cpp.alg, ctx_p->cpp.slot); break; default: dev_err(dev, "Unsupported protected key (%d)\n", hki.hw_key1); return -EINVAL; } return 0; } static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, unsigned int keylen) { struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm); struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); struct cc_crypto_alg *cc_alg = container_of(tfm->__crt_alg, struct cc_crypto_alg, skcipher_alg.base); unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n", ctx_p, crypto_tfm_alg_name(tfm), keylen); dump_byte_array("key", key, keylen); /* STAT_PHASE_0: Init and sanity checks */ if (validate_keys_sizes(ctx_p, keylen)) { dev_dbg(dev, "Invalid key size %d.\n", keylen); return -EINVAL; } if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { /* We only support 256 bit ESSIV-CBC-AES keys */ if (keylen != AES_KEYSIZE_256) { unsigned int flags = crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_MASK; if (likely(ctx_p->fallback_tfm)) { ctx_p->fallback_on = true; crypto_skcipher_clear_flags(ctx_p->fallback_tfm, CRYPTO_TFM_REQ_MASK); crypto_skcipher_clear_flags(ctx_p->fallback_tfm, flags); return crypto_skcipher_setkey(ctx_p->fallback_tfm, key, keylen); } dev_dbg(dev, "Unsupported key size %d and no fallback.\n", keylen); return -EINVAL; } /* Internal ESSIV key buffer is double sized */ max_key_buf_size <<= 1; } ctx_p->fallback_on = false; ctx_p->key_type = CC_UNPROTECTED_KEY; /* * Verify DES weak keys * Note that we're dropping the expanded key since the * HW does the expansion on its own. */ if (ctx_p->flow_mode == S_DIN_to_DES) { if ((keylen == DES3_EDE_KEY_SIZE && verify_skcipher_des3_key(sktfm, key)) || verify_skcipher_des_key(sktfm, key)) { dev_dbg(dev, "weak DES key"); return -EINVAL; } } if (ctx_p->cipher_mode == DRV_CIPHER_XTS && xts_verify_key(sktfm, key, keylen)) { dev_dbg(dev, "weak XTS key"); return -EINVAL; } /* STAT_PHASE_1: Copy key to ctx */ dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr, max_key_buf_size, DMA_TO_DEVICE); memcpy(ctx_p->user.key, key, keylen); if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { /* sha256 for key2 - use sw implementation */ int err; err = crypto_shash_tfm_digest(ctx_p->shash_tfm, ctx_p->user.key, keylen, ctx_p->user.key + keylen); if (err) { dev_err(dev, "Failed to hash ESSIV key.\n"); return err; } keylen <<= 1; } dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr, max_key_buf_size, DMA_TO_DEVICE); ctx_p->keylen = keylen; dev_dbg(dev, "return safely"); return 0; } static int cc_out_setup_mode(struct cc_cipher_ctx *ctx_p) { switch (ctx_p->flow_mode) { case S_DIN_to_AES: return S_AES_to_DOUT; case S_DIN_to_DES: return S_DES_to_DOUT; case S_DIN_to_SM4: return S_SM4_to_DOUT; default: return ctx_p->flow_mode; } } static void cc_setup_readiv_desc(struct crypto_tfm *tfm, struct cipher_req_ctx *req_ctx, unsigned int ivsize, struct cc_hw_desc desc[], unsigned int *seq_size) { struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); int cipher_mode = ctx_p->cipher_mode; int flow_mode = cc_out_setup_mode(ctx_p); int direction = req_ctx->gen_ctx.op_type; dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) return; switch (cipher_mode) { case DRV_CIPHER_ECB: break; case DRV_CIPHER_CBC: case DRV_CIPHER_CBC_CTS: case DRV_CIPHER_CTR: case DRV_CIPHER_OFB: /* Read next IV */ hw_desc_init(&desc[*seq_size]); set_dout_dlli(&desc[*seq_size], iv_dma_addr, ivsize, NS_BIT, 1); set_cipher_config0(&desc[*seq_size], direction); set_flow_mode(&desc[*seq_size], flow_mode); set_cipher_mode(&desc[*seq_size], cipher_mode); if (cipher_mode == DRV_CIPHER_CTR || cipher_mode == DRV_CIPHER_OFB) { set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1); } else { set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE0); } set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]); (*seq_size)++; break; case DRV_CIPHER_XTS: case DRV_CIPHER_ESSIV: /* IV */ hw_desc_init(&desc[*seq_size]); set_setup_mode(&desc[*seq_size], SETUP_WRITE_STATE1); set_cipher_mode(&desc[*seq_size], cipher_mode); set_cipher_config0(&desc[*seq_size], direction); set_flow_mode(&desc[*seq_size], flow_mode); set_dout_dlli(&desc[*seq_size], iv_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT, 1); set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]); (*seq_size)++; break; default: dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode); } } static void cc_setup_state_desc(struct crypto_tfm *tfm, struct cipher_req_ctx *req_ctx, unsigned int ivsize, unsigned int nbytes, struct cc_hw_desc desc[], unsigned int *seq_size) { struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); int cipher_mode = ctx_p->cipher_mode; int flow_mode = ctx_p->flow_mode; int direction = req_ctx->gen_ctx.op_type; dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; switch (cipher_mode) { case DRV_CIPHER_ECB: break; case DRV_CIPHER_CBC: case DRV_CIPHER_CBC_CTS: case DRV_CIPHER_CTR: case DRV_CIPHER_OFB: /* Load IV */ hw_desc_init(&desc[*seq_size]); set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize, NS_BIT); set_cipher_config0(&desc[*seq_size], direction); set_flow_mode(&desc[*seq_size], flow_mode); set_cipher_mode(&desc[*seq_size], cipher_mode); if (cipher_mode == DRV_CIPHER_CTR || cipher_mode == DRV_CIPHER_OFB) { set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1); } else { set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0); } (*seq_size)++; break; case DRV_CIPHER_XTS: case DRV_CIPHER_ESSIV: break; default: dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode); } } static void cc_setup_xex_state_desc(struct crypto_tfm *tfm, struct cipher_req_ctx *req_ctx, unsigned int ivsize, unsigned int nbytes, struct cc_hw_desc desc[], unsigned int *seq_size) { struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); int cipher_mode = ctx_p->cipher_mode; int flow_mode = ctx_p->flow_mode; int direction = req_ctx->gen_ctx.op_type; dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr; unsigned int key_len = (ctx_p->keylen / 2); dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; unsigned int key_offset = key_len; switch (cipher_mode) { case DRV_CIPHER_ECB: break; case DRV_CIPHER_CBC: case DRV_CIPHER_CBC_CTS: case DRV_CIPHER_CTR: case DRV_CIPHER_OFB: break; case DRV_CIPHER_XTS: case DRV_CIPHER_ESSIV: if (cipher_mode == DRV_CIPHER_ESSIV) key_len = SHA256_DIGEST_SIZE; /* load XEX key */ hw_desc_init(&desc[*seq_size]); set_cipher_mode(&desc[*seq_size], cipher_mode); set_cipher_config0(&desc[*seq_size], direction); if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) { set_hw_crypto_key(&desc[*seq_size], ctx_p->hw.key2_slot); } else { set_din_type(&desc[*seq_size], DMA_DLLI, (key_dma_addr + key_offset), key_len, NS_BIT); } set_xex_data_unit_size(&desc[*seq_size], nbytes); set_flow_mode(&desc[*seq_size], S_DIN_to_AES2); set_key_size_aes(&desc[*seq_size], key_len); set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY); (*seq_size)++; /* Load IV */ hw_desc_init(&desc[*seq_size]); set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1); set_cipher_mode(&desc[*seq_size], cipher_mode); set_cipher_config0(&desc[*seq_size], direction); set_key_size_aes(&desc[*seq_size], key_len); set_flow_mode(&desc[*seq_size], flow_mode); set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, CC_AES_BLOCK_SIZE, NS_BIT); (*seq_size)++; break; default: dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode); } } static int cc_out_flow_mode(struct cc_cipher_ctx *ctx_p) { switch (ctx_p->flow_mode) { case S_DIN_to_AES: return DIN_AES_DOUT; case S_DIN_to_DES: return DIN_DES_DOUT; case S_DIN_to_SM4: return DIN_SM4_DOUT; default: return ctx_p->flow_mode; } } static void cc_setup_key_desc(struct crypto_tfm *tfm, struct cipher_req_ctx *req_ctx, unsigned int nbytes, struct cc_hw_desc desc[], unsigned int *seq_size) { struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); int cipher_mode = ctx_p->cipher_mode; int flow_mode = ctx_p->flow_mode; int direction = req_ctx->gen_ctx.op_type; dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr; unsigned int key_len = ctx_p->keylen; unsigned int din_size; switch (cipher_mode) { case DRV_CIPHER_CBC: case DRV_CIPHER_CBC_CTS: case DRV_CIPHER_CTR: case DRV_CIPHER_OFB: case DRV_CIPHER_ECB: /* Load key */ hw_desc_init(&desc[*seq_size]); set_cipher_mode(&desc[*seq_size], cipher_mode); set_cipher_config0(&desc[*seq_size], direction); if (cc_key_type(tfm) == CC_POLICY_PROTECTED_KEY) { /* We use the AES key size coding for all CPP algs */ set_key_size_aes(&desc[*seq_size], key_len); set_cpp_crypto_key(&desc[*seq_size], ctx_p->cpp.slot); flow_mode = cc_out_flow_mode(ctx_p); } else { if (flow_mode == S_DIN_to_AES) { if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) { set_hw_crypto_key(&desc[*seq_size], ctx_p->hw.key1_slot); } else { /* CC_POLICY_UNPROTECTED_KEY * Invalid keys are filtered out in * sethkey() */ din_size = (key_len == 24) ? AES_MAX_KEY_SIZE : key_len; set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr, din_size, NS_BIT); } set_key_size_aes(&desc[*seq_size], key_len); } else { /*des*/ set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr, key_len, NS_BIT); set_key_size_des(&desc[*seq_size], key_len); } set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0); } set_flow_mode(&desc[*seq_size], flow_mode); (*seq_size)++; break; case DRV_CIPHER_XTS: case DRV_CIPHER_ESSIV: /* Load AES key */ hw_desc_init(&desc[*seq_size]); set_cipher_mode(&desc[*seq_size], cipher_mode); set_cipher_config0(&desc[*seq_size], direction); if (cc_key_type(tfm) == CC_HW_PROTECTED_KEY) { set_hw_crypto_key(&desc[*seq_size], ctx_p->hw.key1_slot); } else { set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr, (key_len / 2), NS_BIT); } set_key_size_aes(&desc[*seq_size], (key_len / 2)); set_flow_mode(&desc[*seq_size], flow_mode); set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0); (*seq_size)++; break; default: dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode); } } static void cc_setup_mlli_desc(struct crypto_tfm *tfm, struct cipher_req_ctx *req_ctx, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, void *areq, struct cc_hw_desc desc[], unsigned int *seq_size) { struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { /* bypass */ dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n", &req_ctx->mlli_params.mlli_dma_addr, req_ctx->mlli_params.mlli_len, ctx_p->drvdata->mlli_sram_addr); hw_desc_init(&desc[*seq_size]); set_din_type(&desc[*seq_size], DMA_DLLI, req_ctx->mlli_params.mlli_dma_addr, req_ctx->mlli_params.mlli_len, NS_BIT); set_dout_sram(&desc[*seq_size], ctx_p->drvdata->mlli_sram_addr, req_ctx->mlli_params.mlli_len); set_flow_mode(&desc[*seq_size], BYPASS); (*seq_size)++; } } static void cc_setup_flow_desc(struct crypto_tfm *tfm, struct cipher_req_ctx *req_ctx, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, struct cc_hw_desc desc[], unsigned int *seq_size) { struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); unsigned int flow_mode = cc_out_flow_mode(ctx_p); bool last_desc = (ctx_p->key_type == CC_POLICY_PROTECTED_KEY || ctx_p->cipher_mode == DRV_CIPHER_ECB); /* Process */ if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) { dev_dbg(dev, " data params addr %pad length 0x%X\n", &sg_dma_address(src), nbytes); dev_dbg(dev, " data params addr %pad length 0x%X\n", &sg_dma_address(dst), nbytes); hw_desc_init(&desc[*seq_size]); set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src), nbytes, NS_BIT); set_dout_dlli(&desc[*seq_size], sg_dma_address(dst), nbytes, NS_BIT, (!last_desc ? 0 : 1)); if (last_desc) set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]); set_flow_mode(&desc[*seq_size], flow_mode); (*seq_size)++; } else { hw_desc_init(&desc[*seq_size]); set_din_type(&desc[*seq_size], DMA_MLLI, ctx_p->drvdata->mlli_sram_addr, req_ctx->in_mlli_nents, NS_BIT); if (req_ctx->out_nents == 0) { dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n", ctx_p->drvdata->mlli_sram_addr, ctx_p->drvdata->mlli_sram_addr); set_dout_mlli(&desc[*seq_size], ctx_p->drvdata->mlli_sram_addr, req_ctx->in_mlli_nents, NS_BIT, (!last_desc ? 0 : 1)); } else { dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n", ctx_p->drvdata->mlli_sram_addr, ctx_p->drvdata->mlli_sram_addr + (u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents); set_dout_mlli(&desc[*seq_size], (ctx_p->drvdata->mlli_sram_addr + (LLI_ENTRY_BYTE_SIZE * req_ctx->in_mlli_nents)), req_ctx->out_mlli_nents, NS_BIT, (!last_desc ? 0 : 1)); } if (last_desc) set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]); set_flow_mode(&desc[*seq_size], flow_mode); (*seq_size)++; } } static void cc_cipher_complete(struct device *dev, void *cc_req, int err) { struct skcipher_request *req = (struct skcipher_request *)cc_req; struct scatterlist *dst = req->dst; struct scatterlist *src = req->src; struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req); struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req); unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm); if (err != -EINPROGRESS) { /* Not a BACKLOG notification */ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); memcpy(req->iv, req_ctx->iv, ivsize); kfree_sensitive(req_ctx->iv); } skcipher_request_complete(req, err); } static int cc_cipher_process(struct skcipher_request *req, enum drv_crypto_direction direction) { struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req); struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm); struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req); unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm); struct scatterlist *dst = req->dst; struct scatterlist *src = req->src; unsigned int nbytes = req->cryptlen; void *iv = req->iv; struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); struct device *dev = drvdata_to_dev(ctx_p->drvdata); struct cc_hw_desc desc[MAX_SKCIPHER_SEQ_LEN]; struct cc_crypto_req cc_req = {}; int rc; unsigned int seq_len = 0; gfp_t flags = cc_gfp_flags(&req->base); dev_dbg(dev, "%s req=%p iv=%p nbytes=%d\n", ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ? "Encrypt" : "Decrypt"), req, iv, nbytes); /* STAT_PHASE_0: Init and sanity checks */ if (validate_data_size(ctx_p, nbytes)) { dev_dbg(dev, "Unsupported data size %d.\n", nbytes); rc = -EINVAL; goto exit_process; } if (nbytes == 0) { /* No data to process is valid */ rc = 0; goto exit_process; } if (ctx_p->fallback_on) { struct skcipher_request *subreq = skcipher_request_ctx(req); *subreq = *req; skcipher_request_set_tfm(subreq, ctx_p->fallback_tfm); if (direction == DRV_CRYPTO_DIRECTION_ENCRYPT) return crypto_skcipher_encrypt(subreq); else return crypto_skcipher_decrypt(subreq); } /* The IV we are handed may be allocated from the stack so * we must copy it to a DMAable buffer before use. */ req_ctx->iv = kmemdup(iv, ivsize, flags); if (!req_ctx->iv) { rc = -ENOMEM; goto exit_process; } /* Setup request structure */ cc_req.user_cb = cc_cipher_complete; cc_req.user_arg = req; /* Setup CPP operation details */ if (ctx_p->key_type == CC_POLICY_PROTECTED_KEY) { cc_req.cpp.is_cpp = true; cc_req.cpp.alg = ctx_p->cpp.alg; cc_req.cpp.slot = ctx_p->cpp.slot; } /* Setup request context */ req_ctx->gen_ctx.op_type = direction; /* STAT_PHASE_1: Map buffers */ rc = cc_map_cipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, req_ctx->iv, src, dst, flags); if (rc) { dev_err(dev, "map_request() failed\n"); goto exit_process; } /* STAT_PHASE_2: Create sequence */ /* Setup state (IV) */ cc_setup_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len); /* Setup MLLI line, if needed */ cc_setup_mlli_desc(tfm, req_ctx, dst, src, nbytes, req, desc, &seq_len); /* Setup key */ cc_setup_key_desc(tfm, req_ctx, nbytes, desc, &seq_len); /* Setup state (IV and XEX key) */ cc_setup_xex_state_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len); /* Data processing */ cc_setup_flow_desc(tfm, req_ctx, dst, src, nbytes, desc, &seq_len); /* Read next IV */ cc_setup_readiv_desc(tfm, req_ctx, ivsize, desc, &seq_len); /* STAT_PHASE_3: Lock HW and push sequence */ rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len, &req->base); if (rc != -EINPROGRESS && rc != -EBUSY) { /* Failed to send the request or request completed * synchronously */ cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); } exit_process: if (rc != -EINPROGRESS && rc != -EBUSY) { kfree_sensitive(req_ctx->iv); } return rc; } static int cc_cipher_encrypt(struct skcipher_request *req) { struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req); memset(req_ctx, 0, sizeof(*req_ctx)); return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT); } static int cc_cipher_decrypt(struct skcipher_request *req) { struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req); memset(req_ctx, 0, sizeof(*req_ctx)); return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT); } /* Block cipher alg */ static const struct cc_alg_template skcipher_algs[] = { { .name = "xts(paes)", .driver_name = "xts-paes-ccree", .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = CC_HW_KEY_SIZE, .max_keysize = CC_HW_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_XTS, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, .sec_func = true, }, { .name = "essiv(cbc(paes),sha256)", .driver_name = "essiv-paes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = CC_HW_KEY_SIZE, .max_keysize = CC_HW_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_ESSIV, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, .std_body = CC_STD_NIST, .sec_func = true, }, { .name = "ecb(paes)", .driver_name = "ecb-paes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = CC_HW_KEY_SIZE, .max_keysize = CC_HW_KEY_SIZE, .ivsize = 0, }, .cipher_mode = DRV_CIPHER_ECB, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, .std_body = CC_STD_NIST, .sec_func = true, }, { .name = "cbc(paes)", .driver_name = "cbc-paes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = CC_HW_KEY_SIZE, .max_keysize = CC_HW_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, .std_body = CC_STD_NIST, .sec_func = true, }, { .name = "ofb(paes)", .driver_name = "ofb-paes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = CC_HW_KEY_SIZE, .max_keysize = CC_HW_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_OFB, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, .std_body = CC_STD_NIST, .sec_func = true, }, { .name = "cts(cbc(paes))", .driver_name = "cts-cbc-paes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = CC_HW_KEY_SIZE, .max_keysize = CC_HW_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CBC_CTS, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, .std_body = CC_STD_NIST, .sec_func = true, }, { .name = "ctr(paes)", .driver_name = "ctr-paes-ccree", .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = CC_HW_KEY_SIZE, .max_keysize = CC_HW_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CTR, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, .std_body = CC_STD_NIST, .sec_func = true, }, { /* See https://www.mail-archive.com/[email protected]/msg40576.html * for the reason why this differs from the generic * implementation. */ .name = "xts(aes)", .driver_name = "xts-aes-ccree", .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE * 2, .max_keysize = AES_MAX_KEY_SIZE * 2, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_XTS, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "essiv(cbc(aes),sha256)", .driver_name = "essiv-aes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_ESSIV, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_712, .std_body = CC_STD_NIST, }, { .name = "ecb(aes)", .driver_name = "ecb-aes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = 0, }, .cipher_mode = DRV_CIPHER_ECB, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "cbc(aes)", .driver_name = "cbc-aes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "ofb(aes)", .driver_name = "ofb-aes-ccree", .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_OFB, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "cts(cbc(aes))", .driver_name = "cts-cbc-aes-ccree", .blocksize = AES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CBC_CTS, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "ctr(aes)", .driver_name = "ctr-aes-ccree", .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = AES_MIN_KEY_SIZE, .max_keysize = AES_MAX_KEY_SIZE, .ivsize = AES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CTR, .flow_mode = S_DIN_to_AES, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "cbc(des3_ede)", .driver_name = "cbc-3des-ccree", .blocksize = DES3_EDE_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = DES3_EDE_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_DES, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "ecb(des3_ede)", .driver_name = "ecb-3des-ccree", .blocksize = DES3_EDE_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = DES3_EDE_KEY_SIZE, .max_keysize = DES3_EDE_KEY_SIZE, .ivsize = 0, }, .cipher_mode = DRV_CIPHER_ECB, .flow_mode = S_DIN_to_DES, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "cbc(des)", .driver_name = "cbc-des-ccree", .blocksize = DES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = DES_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_DES, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "ecb(des)", .driver_name = "ecb-des-ccree", .blocksize = DES_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = DES_KEY_SIZE, .max_keysize = DES_KEY_SIZE, .ivsize = 0, }, .cipher_mode = DRV_CIPHER_ECB, .flow_mode = S_DIN_to_DES, .min_hw_rev = CC_HW_REV_630, .std_body = CC_STD_NIST, }, { .name = "cbc(sm4)", .driver_name = "cbc-sm4-ccree", .blocksize = SM4_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = SM4_KEY_SIZE, .max_keysize = SM4_KEY_SIZE, .ivsize = SM4_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_SM4, .min_hw_rev = CC_HW_REV_713, .std_body = CC_STD_OSCCA, }, { .name = "ecb(sm4)", .driver_name = "ecb-sm4-ccree", .blocksize = SM4_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = SM4_KEY_SIZE, .max_keysize = SM4_KEY_SIZE, .ivsize = 0, }, .cipher_mode = DRV_CIPHER_ECB, .flow_mode = S_DIN_to_SM4, .min_hw_rev = CC_HW_REV_713, .std_body = CC_STD_OSCCA, }, { .name = "ctr(sm4)", .driver_name = "ctr-sm4-ccree", .blocksize = 1, .template_skcipher = { .setkey = cc_cipher_setkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = SM4_KEY_SIZE, .max_keysize = SM4_KEY_SIZE, .ivsize = SM4_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CTR, .flow_mode = S_DIN_to_SM4, .min_hw_rev = CC_HW_REV_713, .std_body = CC_STD_OSCCA, }, { .name = "cbc(psm4)", .driver_name = "cbc-psm4-ccree", .blocksize = SM4_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = CC_HW_KEY_SIZE, .max_keysize = CC_HW_KEY_SIZE, .ivsize = SM4_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CBC, .flow_mode = S_DIN_to_SM4, .min_hw_rev = CC_HW_REV_713, .std_body = CC_STD_OSCCA, .sec_func = true, }, { .name = "ctr(psm4)", .driver_name = "ctr-psm4-ccree", .blocksize = SM4_BLOCK_SIZE, .template_skcipher = { .setkey = cc_cipher_sethkey, .encrypt = cc_cipher_encrypt, .decrypt = cc_cipher_decrypt, .min_keysize = CC_HW_KEY_SIZE, .max_keysize = CC_HW_KEY_SIZE, .ivsize = SM4_BLOCK_SIZE, }, .cipher_mode = DRV_CIPHER_CTR, .flow_mode = S_DIN_to_SM4, .min_hw_rev = CC_HW_REV_713, .std_body = CC_STD_OSCCA, .sec_func = true, }, }; static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl, struct device *dev) { struct cc_crypto_alg *t_alg; struct skcipher_alg *alg; t_alg = devm_kzalloc(dev, sizeof(*t_alg), GFP_KERNEL); if (!t_alg) return ERR_PTR(-ENOMEM); alg = &t_alg->skcipher_alg; memcpy(alg, &tmpl->template_skcipher, sizeof(*alg)); snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->driver_name); alg->base.cra_module = THIS_MODULE; alg->base.cra_priority = CC_CRA_PRIO; alg->base.cra_blocksize = tmpl->blocksize; alg->base.cra_alignmask = 0; alg->base.cra_ctxsize = sizeof(struct cc_cipher_ctx); alg->base.cra_init = cc_cipher_init; alg->base.cra_exit = cc_cipher_exit; alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY; t_alg->cipher_mode = tmpl->cipher_mode; t_alg->flow_mode = tmpl->flow_mode; return t_alg; } int cc_cipher_free(struct cc_drvdata *drvdata) { struct cc_crypto_alg *t_alg, *n; /* Remove registered algs */ list_for_each_entry_safe(t_alg, n, &drvdata->alg_list, entry) { crypto_unregister_skcipher(&t_alg->skcipher_alg); list_del(&t_alg->entry); } return 0; } int cc_cipher_alloc(struct cc_drvdata *drvdata) { struct cc_crypto_alg *t_alg; struct device *dev = drvdata_to_dev(drvdata); int rc = -ENOMEM; int alg; INIT_LIST_HEAD(&drvdata->alg_list); /* Linux crypto */ dev_dbg(dev, "Number of algorithms = %zu\n", ARRAY_SIZE(skcipher_algs)); for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) { if ((skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) || !(drvdata->std_bodies & skcipher_algs[alg].std_body) || (drvdata->sec_disabled && skcipher_algs[alg].sec_func)) continue; dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name); t_alg = cc_create_alg(&skcipher_algs[alg], dev); if (IS_ERR(t_alg)) { rc = PTR_ERR(t_alg); dev_err(dev, "%s alg allocation failed\n", skcipher_algs[alg].driver_name); goto fail0; } t_alg->drvdata = drvdata; dev_dbg(dev, "registering %s\n", skcipher_algs[alg].driver_name); rc = crypto_register_skcipher(&t_alg->skcipher_alg); dev_dbg(dev, "%s alg registration rc = %x\n", t_alg->skcipher_alg.base.cra_driver_name, rc); if (rc) { dev_err(dev, "%s alg registration failed\n", t_alg->skcipher_alg.base.cra_driver_name); goto fail0; } list_add_tail(&t_alg->entry, &drvdata->alg_list); dev_dbg(dev, "Registered %s\n", t_alg->skcipher_alg.base.cra_driver_name); } return 0; fail0: cc_cipher_free(drvdata); return rc; }
linux-master
drivers/crypto/ccree/cc_cipher.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2012-2019 ARM Limited or its affiliates. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/crypto.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/of.h> #include <linux/clk.h> #include <linux/of_address.h> #include <linux/pm_runtime.h> #include "cc_driver.h" #include "cc_request_mgr.h" #include "cc_buffer_mgr.h" #include "cc_debugfs.h" #include "cc_cipher.h" #include "cc_aead.h" #include "cc_hash.h" #include "cc_sram_mgr.h" #include "cc_pm.h" #include "cc_fips.h" bool cc_dump_desc; module_param_named(dump_desc, cc_dump_desc, bool, 0600); MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid"); bool cc_dump_bytes; module_param_named(dump_bytes, cc_dump_bytes, bool, 0600); MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid"); static bool cc_sec_disable; module_param_named(sec_disable, cc_sec_disable, bool, 0600); MODULE_PARM_DESC(cc_sec_disable, "Disable security functions"); struct cc_hw_data { char *name; enum cc_hw_rev rev; u32 sig; u32 cidr_0123; u32 pidr_0124; int std_bodies; }; #define CC_NUM_IDRS 4 #define CC_HW_RESET_LOOP_COUNT 10 /* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */ static const u32 pidr_0124_offsets[CC_NUM_IDRS] = { CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1), CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4) }; static const u32 cidr_0123_offsets[CC_NUM_IDRS] = { CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1), CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3) }; /* Hardware revisions defs. */ /* The 703 is a OSCCA only variant of the 713 */ static const struct cc_hw_data cc703_hw = { .name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU, .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA }; static const struct cc_hw_data cc713_hw = { .name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU, .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL }; static const struct cc_hw_data cc712_hw = { .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U, .std_bodies = CC_STD_ALL }; static const struct cc_hw_data cc710_hw = { .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U, .std_bodies = CC_STD_ALL }; static const struct cc_hw_data cc630p_hw = { .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U, .std_bodies = CC_STD_ALL }; static const struct of_device_id arm_ccree_dev_of_match[] = { { .compatible = "arm,cryptocell-703-ree", .data = &cc703_hw }, { .compatible = "arm,cryptocell-713-ree", .data = &cc713_hw }, { .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw }, { .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw }, { .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw }, {} }; MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match); static void init_cc_cache_params(struct cc_drvdata *drvdata) { struct device *dev = drvdata_to_dev(drvdata); u32 cache_params, ace_const, val; u64 mask; /* compute CC_AXIM_CACHE_PARAMS */ cache_params = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS)); dev_dbg(dev, "Cache params previous: 0x%08X\n", cache_params); /* non cached or write-back, write allocate */ val = drvdata->coherent ? 0xb : 0x2; mask = CC_GENMASK(CC_AXIM_CACHE_PARAMS_AWCACHE); cache_params &= ~mask; cache_params |= FIELD_PREP(mask, val); mask = CC_GENMASK(CC_AXIM_CACHE_PARAMS_AWCACHE_LAST); cache_params &= ~mask; cache_params |= FIELD_PREP(mask, val); mask = CC_GENMASK(CC_AXIM_CACHE_PARAMS_ARCACHE); cache_params &= ~mask; cache_params |= FIELD_PREP(mask, val); drvdata->cache_params = cache_params; dev_dbg(dev, "Cache params current: 0x%08X\n", cache_params); if (drvdata->hw_rev <= CC_HW_REV_710) return; /* compute CC_AXIM_ACE_CONST */ ace_const = cc_ioread(drvdata, CC_REG(AXIM_ACE_CONST)); dev_dbg(dev, "ACE-const previous: 0x%08X\n", ace_const); /* system or outer-sharable */ val = drvdata->coherent ? 0x2 : 0x3; mask = CC_GENMASK(CC_AXIM_ACE_CONST_ARDOMAIN); ace_const &= ~mask; ace_const |= FIELD_PREP(mask, val); mask = CC_GENMASK(CC_AXIM_ACE_CONST_AWDOMAIN); ace_const &= ~mask; ace_const |= FIELD_PREP(mask, val); dev_dbg(dev, "ACE-const current: 0x%08X\n", ace_const); drvdata->ace_const = ace_const; } static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets) { int i; union { u8 regs[CC_NUM_IDRS]; __le32 val; } idr; for (i = 0; i < CC_NUM_IDRS; ++i) idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]); return le32_to_cpu(idr.val); } void __dump_byte_array(const char *name, const u8 *buf, size_t len) { char prefix[64]; if (!buf) return; snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len); print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf, len, false); } static irqreturn_t cc_isr(int irq, void *dev_id) { struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id; struct device *dev = drvdata_to_dev(drvdata); u32 irr; u32 imr; /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */ /* if driver suspended return, probably shared interrupt */ if (pm_runtime_suspended(dev)) return IRQ_NONE; /* read the interrupt status */ irr = cc_ioread(drvdata, CC_REG(HOST_IRR)); dev_dbg(dev, "Got IRR=0x%08X\n", irr); if (irr == 0) /* Probably shared interrupt line */ return IRQ_NONE; imr = cc_ioread(drvdata, CC_REG(HOST_IMR)); /* clear interrupt - must be before processing events */ cc_iowrite(drvdata, CC_REG(HOST_ICR), irr); drvdata->irq = irr; /* Completion interrupt - most probable */ if (irr & drvdata->comp_mask) { /* Mask all completion interrupts - will be unmasked in * deferred service handler */ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask); irr &= ~drvdata->comp_mask; complete_request(drvdata); } #ifdef CONFIG_CRYPTO_FIPS /* TEE FIPS interrupt */ if (irr & CC_GPR0_IRQ_MASK) { /* Mask interrupt - will be unmasked in Deferred service * handler */ cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK); irr &= ~CC_GPR0_IRQ_MASK; fips_handler(drvdata); } #endif /* AXI error interrupt */ if (irr & CC_AXI_ERR_IRQ_MASK) { u32 axi_err; /* Read the AXI error ID */ axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR)); dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n", axi_err); irr &= ~CC_AXI_ERR_IRQ_MASK; } if (irr) { dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n", irr); /* Just warning */ } return IRQ_HANDLED; } bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata) { unsigned int val; unsigned int i; /* 712/710/63 has no reset completion indication, always return true */ if (drvdata->hw_rev <= CC_HW_REV_712) return true; for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) { /* in cc7x3 NVM_IS_IDLE indicates that CC reset is * completed and device is fully functional */ val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE)); if (val & CC_NVM_IS_IDLE_MASK) { /* hw indicate reset completed */ return true; } /* allow scheduling other process on the processor */ schedule(); } /* reset not completed */ return false; } int init_cc_regs(struct cc_drvdata *drvdata) { unsigned int val; struct device *dev = drvdata_to_dev(drvdata); /* Unmask all AXI interrupt sources AXI_CFG1 register */ /* AXI interrupt config are obsoleted startign at cc7x3 */ if (drvdata->hw_rev <= CC_HW_REV_712) { val = cc_ioread(drvdata, CC_REG(AXIM_CFG)); cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK); dev_dbg(dev, "AXIM_CFG=0x%08X\n", cc_ioread(drvdata, CC_REG(AXIM_CFG))); } /* Clear all pending interrupts */ val = cc_ioread(drvdata, CC_REG(HOST_IRR)); dev_dbg(dev, "IRR=0x%08X\n", val); cc_iowrite(drvdata, CC_REG(HOST_ICR), val); /* Unmask relevant interrupt cause */ val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK; if (drvdata->hw_rev >= CC_HW_REV_712) val |= CC_GPR0_IRQ_MASK; cc_iowrite(drvdata, CC_REG(HOST_IMR), ~val); cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), drvdata->cache_params); if (drvdata->hw_rev >= CC_HW_REV_712) cc_iowrite(drvdata, CC_REG(AXIM_ACE_CONST), drvdata->ace_const); return 0; } static int init_cc_resources(struct platform_device *plat_dev) { struct resource *req_mem_cc_regs = NULL; struct cc_drvdata *new_drvdata; struct device *dev = &plat_dev->dev; struct device_node *np = dev->of_node; u32 val, hw_rev_pidr, sig_cidr; u64 dma_mask; const struct cc_hw_data *hw_rev; struct clk *clk; int irq; int rc = 0; new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL); if (!new_drvdata) return -ENOMEM; hw_rev = of_device_get_match_data(dev); new_drvdata->hw_rev_name = hw_rev->name; new_drvdata->hw_rev = hw_rev->rev; new_drvdata->std_bodies = hw_rev->std_bodies; if (hw_rev->rev >= CC_HW_REV_712) { new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP); new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712); new_drvdata->ver_offset = CC_REG(HOST_VERSION_712); } else { new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8); new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630); new_drvdata->ver_offset = CC_REG(HOST_VERSION_630); } new_drvdata->comp_mask = CC_COMP_IRQ_MASK; platform_set_drvdata(plat_dev, new_drvdata); new_drvdata->plat_dev = plat_dev; clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(clk)) return dev_err_probe(dev, PTR_ERR(clk), "Error getting clock\n"); new_drvdata->clk = clk; new_drvdata->coherent = of_dma_is_coherent(np); /* Get device resources */ /* First CC registers space */ /* Map registers space */ new_drvdata->cc_base = devm_platform_get_and_ioremap_resource(plat_dev, 0, &req_mem_cc_regs); if (IS_ERR(new_drvdata->cc_base)) return PTR_ERR(new_drvdata->cc_base); dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name, req_mem_cc_regs); dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n", &req_mem_cc_regs->start, new_drvdata->cc_base); /* Then IRQ */ irq = platform_get_irq(plat_dev, 0); if (irq < 0) return irq; init_completion(&new_drvdata->hw_queue_avail); if (!dev->dma_mask) dev->dma_mask = &dev->coherent_dma_mask; dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN); rc = dma_set_coherent_mask(dev, dma_mask); if (rc) { dev_err(dev, "Failed in dma_set_coherent_mask, mask=%llx\n", dma_mask); return rc; } rc = clk_prepare_enable(new_drvdata->clk); if (rc) { dev_err(dev, "Failed to enable clock"); return rc; } new_drvdata->sec_disabled = cc_sec_disable; pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); rc = pm_runtime_get_sync(dev); if (rc < 0) { dev_err(dev, "pm_runtime_get_sync() failed: %d\n", rc); goto post_pm_err; } /* Wait for Cryptocell reset completion */ if (!cc_wait_for_reset_completion(new_drvdata)) { dev_err(dev, "Cryptocell reset not completed"); } if (hw_rev->rev <= CC_HW_REV_712) { /* Verify correct mapping */ val = cc_ioread(new_drvdata, new_drvdata->sig_offset); if (val != hw_rev->sig) { dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n", val, hw_rev->sig); rc = -EINVAL; goto post_pm_err; } sig_cidr = val; hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset); } else { /* Verify correct mapping */ val = cc_read_idr(new_drvdata, pidr_0124_offsets); if (val != hw_rev->pidr_0124) { dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n", val, hw_rev->pidr_0124); rc = -EINVAL; goto post_pm_err; } hw_rev_pidr = val; val = cc_read_idr(new_drvdata, cidr_0123_offsets); if (val != hw_rev->cidr_0123) { dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n", val, hw_rev->cidr_0123); rc = -EINVAL; goto post_pm_err; } sig_cidr = val; /* Check HW engine configuration */ val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS)); switch (val) { case CC_PINS_FULL: /* This is fine */ break; case CC_PINS_SLIM: if (new_drvdata->std_bodies & CC_STD_NIST) { dev_warn(dev, "703 mode forced due to HW configuration.\n"); new_drvdata->std_bodies = CC_STD_OSCCA; } break; default: dev_err(dev, "Unsupported engines configuration.\n"); rc = -EINVAL; goto post_pm_err; } /* Check security disable state */ val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED)); val &= CC_SECURITY_DISABLED_MASK; new_drvdata->sec_disabled |= !!val; if (!new_drvdata->sec_disabled) { new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK; if (new_drvdata->std_bodies & CC_STD_NIST) new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK; } } if (new_drvdata->sec_disabled) dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n"); /* Display HW versions */ dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n", hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION); /* register the driver isr function */ rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree", new_drvdata); if (rc) { dev_err(dev, "Could not register to interrupt %d\n", irq); goto post_pm_err; } dev_dbg(dev, "Registered to IRQ: %d\n", irq); init_cc_cache_params(new_drvdata); rc = init_cc_regs(new_drvdata); if (rc) { dev_err(dev, "init_cc_regs failed\n"); goto post_pm_err; } rc = cc_debugfs_init(new_drvdata); if (rc) { dev_err(dev, "Failed registering debugfs interface\n"); goto post_regs_err; } rc = cc_fips_init(new_drvdata); if (rc) { dev_err(dev, "cc_fips_init failed 0x%x\n", rc); goto post_debugfs_err; } rc = cc_sram_mgr_init(new_drvdata); if (rc) { dev_err(dev, "cc_sram_mgr_init failed\n"); goto post_fips_init_err; } new_drvdata->mlli_sram_addr = cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE); if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) { rc = -ENOMEM; goto post_fips_init_err; } rc = cc_req_mgr_init(new_drvdata); if (rc) { dev_err(dev, "cc_req_mgr_init failed\n"); goto post_fips_init_err; } rc = cc_buffer_mgr_init(new_drvdata); if (rc) { dev_err(dev, "cc_buffer_mgr_init failed\n"); goto post_req_mgr_err; } /* hash must be allocated first due to use of send_request_init() * and dependency of AEAD on it */ rc = cc_hash_alloc(new_drvdata); if (rc) { dev_err(dev, "cc_hash_alloc failed\n"); goto post_buf_mgr_err; } /* Allocate crypto algs */ rc = cc_cipher_alloc(new_drvdata); if (rc) { dev_err(dev, "cc_cipher_alloc failed\n"); goto post_hash_err; } rc = cc_aead_alloc(new_drvdata); if (rc) { dev_err(dev, "cc_aead_alloc failed\n"); goto post_cipher_err; } /* If we got here and FIPS mode is enabled * it means all FIPS test passed, so let TEE * know we're good. */ cc_set_ree_fips_status(new_drvdata, true); pm_runtime_put(dev); return 0; post_cipher_err: cc_cipher_free(new_drvdata); post_hash_err: cc_hash_free(new_drvdata); post_buf_mgr_err: cc_buffer_mgr_fini(new_drvdata); post_req_mgr_err: cc_req_mgr_fini(new_drvdata); post_fips_init_err: cc_fips_fini(new_drvdata); post_debugfs_err: cc_debugfs_fini(new_drvdata); post_regs_err: fini_cc_regs(new_drvdata); post_pm_err: pm_runtime_put_noidle(dev); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); clk_disable_unprepare(new_drvdata->clk); return rc; } void fini_cc_regs(struct cc_drvdata *drvdata) { /* Mask all interrupts */ cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF); } static void cleanup_cc_resources(struct platform_device *plat_dev) { struct device *dev = &plat_dev->dev; struct cc_drvdata *drvdata = (struct cc_drvdata *)platform_get_drvdata(plat_dev); cc_aead_free(drvdata); cc_cipher_free(drvdata); cc_hash_free(drvdata); cc_buffer_mgr_fini(drvdata); cc_req_mgr_fini(drvdata); cc_fips_fini(drvdata); cc_debugfs_fini(drvdata); fini_cc_regs(drvdata); pm_runtime_put_noidle(dev); pm_runtime_disable(dev); pm_runtime_set_suspended(dev); clk_disable_unprepare(drvdata->clk); } unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata) { if (drvdata->hw_rev >= CC_HW_REV_712) return HASH_LEN_SIZE_712; else return HASH_LEN_SIZE_630; } static int ccree_probe(struct platform_device *plat_dev) { int rc; struct device *dev = &plat_dev->dev; /* Map registers space */ rc = init_cc_resources(plat_dev); if (rc) return rc; dev_info(dev, "ARM ccree device initialized\n"); return 0; } static int ccree_remove(struct platform_device *plat_dev) { struct device *dev = &plat_dev->dev; dev_dbg(dev, "Releasing ccree resources...\n"); cleanup_cc_resources(plat_dev); dev_info(dev, "ARM ccree device terminated\n"); return 0; } static struct platform_driver ccree_driver = { .driver = { .name = "ccree", .of_match_table = arm_ccree_dev_of_match, #ifdef CONFIG_PM .pm = &ccree_pm, #endif }, .probe = ccree_probe, .remove = ccree_remove, }; static int __init ccree_init(void) { int rc; cc_debugfs_global_init(); rc = platform_driver_register(&ccree_driver); if (rc) { cc_debugfs_global_fini(); return rc; } return 0; } module_init(ccree_init); static void __exit ccree_exit(void) { platform_driver_unregister(&ccree_driver); cc_debugfs_global_fini(); } module_exit(ccree_exit); /* Module description */ MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver"); MODULE_VERSION(DRV_MODULE_VERSION); MODULE_AUTHOR("ARM"); MODULE_LICENSE("GPL v2");
linux-master
drivers/crypto/ccree/cc_driver.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2012-2019 ARM Limited or its affiliates. */ #include <linux/kernel.h> #include <linux/debugfs.h> #include <linux/stringify.h> #include "cc_driver.h" #include "cc_crypto_ctx.h" #include "cc_debugfs.h" #define CC_DEBUG_REG(_X) { \ .name = __stringify(_X),\ .offset = CC_REG(_X) \ } /* * This is a global var for the dentry of the * debugfs ccree/ dir. It is not tied down to * a specific instance of ccree, hence it is * global. */ static struct dentry *cc_debugfs_dir; static struct debugfs_reg32 ver_sig_regs[] = { { .name = "SIGNATURE" }, /* Must be 0th */ { .name = "VERSION" }, /* Must be 1st */ }; static const struct debugfs_reg32 pid_cid_regs[] = { CC_DEBUG_REG(PERIPHERAL_ID_0), CC_DEBUG_REG(PERIPHERAL_ID_1), CC_DEBUG_REG(PERIPHERAL_ID_2), CC_DEBUG_REG(PERIPHERAL_ID_3), CC_DEBUG_REG(PERIPHERAL_ID_4), CC_DEBUG_REG(COMPONENT_ID_0), CC_DEBUG_REG(COMPONENT_ID_1), CC_DEBUG_REG(COMPONENT_ID_2), CC_DEBUG_REG(COMPONENT_ID_3), }; static const struct debugfs_reg32 debug_regs[] = { CC_DEBUG_REG(HOST_IRR), CC_DEBUG_REG(HOST_POWER_DOWN_EN), CC_DEBUG_REG(AXIM_MON_ERR), CC_DEBUG_REG(DSCRPTR_QUEUE_CONTENT), CC_DEBUG_REG(HOST_IMR), CC_DEBUG_REG(AXIM_CFG), CC_DEBUG_REG(AXIM_CACHE_PARAMS), CC_DEBUG_REG(GPR_HOST), CC_DEBUG_REG(AXIM_MON_COMP), }; void __init cc_debugfs_global_init(void) { cc_debugfs_dir = debugfs_create_dir("ccree", NULL); } void cc_debugfs_global_fini(void) { debugfs_remove(cc_debugfs_dir); } int cc_debugfs_init(struct cc_drvdata *drvdata) { struct device *dev = drvdata_to_dev(drvdata); struct debugfs_regset32 *regset, *verset; regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); if (!regset) return -ENOMEM; regset->regs = debug_regs; regset->nregs = ARRAY_SIZE(debug_regs); regset->base = drvdata->cc_base; regset->dev = dev; drvdata->dir = debugfs_create_dir(drvdata->plat_dev->name, cc_debugfs_dir); debugfs_create_regset32("regs", 0400, drvdata->dir, regset); debugfs_create_bool("coherent", 0400, drvdata->dir, &drvdata->coherent); verset = devm_kzalloc(dev, sizeof(*verset), GFP_KERNEL); /* Failing here is not important enough to fail the module load */ if (!verset) return 0; if (drvdata->hw_rev <= CC_HW_REV_712) { ver_sig_regs[0].offset = drvdata->sig_offset; ver_sig_regs[1].offset = drvdata->ver_offset; verset->regs = ver_sig_regs; verset->nregs = ARRAY_SIZE(ver_sig_regs); } else { verset->regs = pid_cid_regs; verset->nregs = ARRAY_SIZE(pid_cid_regs); } verset->base = drvdata->cc_base; verset->dev = dev; debugfs_create_regset32("version", 0400, drvdata->dir, verset); return 0; } void cc_debugfs_fini(struct cc_drvdata *drvdata) { debugfs_remove_recursive(drvdata->dir); }
linux-master
drivers/crypto/ccree/cc_debugfs.c
// SPDX-License-Identifier: MIT /* * vgaarb.c: Implements VGA arbitration. For details refer to * Documentation/gpu/vgaarbiter.rst * * (C) Copyright 2005 Benjamin Herrenschmidt <[email protected]> * (C) Copyright 2007 Paulo R. Zanoni <[email protected]> * (C) Copyright 2007, 2009 Tiago Vignatti <[email protected]> */ #define pr_fmt(fmt) "vgaarb: " fmt #define vgaarb_dbg(dev, fmt, arg...) dev_dbg(dev, "vgaarb: " fmt, ##arg) #define vgaarb_info(dev, fmt, arg...) dev_info(dev, "vgaarb: " fmt, ##arg) #define vgaarb_err(dev, fmt, arg...) dev_err(dev, "vgaarb: " fmt, ##arg) #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/list.h> #include <linux/sched/signal.h> #include <linux/wait.h> #include <linux/spinlock.h> #include <linux/poll.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/screen_info.h> #include <linux/vt.h> #include <linux/console.h> #include <linux/acpi.h> #include <linux/uaccess.h> #include <linux/vgaarb.h> static void vga_arbiter_notify_clients(void); /* * We keep a list of all VGA devices in the system to speed * up the various operations of the arbiter */ struct vga_device { struct list_head list; struct pci_dev *pdev; unsigned int decodes; /* what it decodes */ unsigned int owns; /* what it owns */ unsigned int locks; /* what it locks */ unsigned int io_lock_cnt; /* legacy IO lock count */ unsigned int mem_lock_cnt; /* legacy MEM lock count */ unsigned int io_norm_cnt; /* normal IO count */ unsigned int mem_norm_cnt; /* normal MEM count */ bool bridge_has_one_vga; bool is_firmware_default; /* device selected by firmware */ unsigned int (*set_decode)(struct pci_dev *pdev, bool decode); }; static LIST_HEAD(vga_list); static int vga_count, vga_decode_count; static bool vga_arbiter_used; static DEFINE_SPINLOCK(vga_lock); static DECLARE_WAIT_QUEUE_HEAD(vga_wait_queue); static const char *vga_iostate_to_str(unsigned int iostate) { /* Ignore VGA_RSRC_IO and VGA_RSRC_MEM */ iostate &= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; switch (iostate) { case VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM: return "io+mem"; case VGA_RSRC_LEGACY_IO: return "io"; case VGA_RSRC_LEGACY_MEM: return "mem"; } return "none"; } static int vga_str_to_iostate(char *buf, int str_size, unsigned int *io_state) { /* * In theory, we could hand out locks on IO and MEM separately to * userspace, but this can cause deadlocks. */ if (strncmp(buf, "none", 4) == 0) { *io_state = VGA_RSRC_NONE; return 1; } /* XXX We're not checking the str_size! */ if (strncmp(buf, "io+mem", 6) == 0) goto both; else if (strncmp(buf, "io", 2) == 0) goto both; else if (strncmp(buf, "mem", 3) == 0) goto both; return 0; both: *io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; return 1; } /* This is only used as a cookie, it should not be dereferenced */ static struct pci_dev *vga_default; /* Find somebody in our list */ static struct vga_device *vgadev_find(struct pci_dev *pdev) { struct vga_device *vgadev; list_for_each_entry(vgadev, &vga_list, list) if (pdev == vgadev->pdev) return vgadev; return NULL; } /** * vga_default_device - return the default VGA device, for vgacon * * This can be defined by the platform. The default implementation is * rather dumb and will probably only work properly on single VGA card * setups and/or x86 platforms. * * If your VGA default device is not PCI, you'll have to return NULL here. * In this case, I assume it will not conflict with any PCI card. If this * is not true, I'll have to define two arch hooks for enabling/disabling * the VGA default device if that is possible. This may be a problem with * real _ISA_ VGA cards, in addition to a PCI one. I don't know at this * point how to deal with that card. Can their IOs be disabled at all? If * not, then I suppose it's a matter of having the proper arch hook telling * us about it, so we basically never allow anybody to succeed a vga_get(). */ struct pci_dev *vga_default_device(void) { return vga_default; } EXPORT_SYMBOL_GPL(vga_default_device); void vga_set_default_device(struct pci_dev *pdev) { if (vga_default == pdev) return; pci_dev_put(vga_default); vga_default = pci_dev_get(pdev); } /** * vga_remove_vgacon - deactivate VGA console * * Unbind and unregister vgacon in case pdev is the default VGA device. * Can be called by GPU drivers on initialization to make sure VGA register * access done by vgacon will not disturb the device. * * @pdev: PCI device. */ #if !defined(CONFIG_VGA_CONSOLE) int vga_remove_vgacon(struct pci_dev *pdev) { return 0; } #elif !defined(CONFIG_DUMMY_CONSOLE) int vga_remove_vgacon(struct pci_dev *pdev) { return -ENODEV; } #else int vga_remove_vgacon(struct pci_dev *pdev) { int ret = 0; if (pdev != vga_default) return 0; vgaarb_info(&pdev->dev, "deactivate vga console\n"); console_lock(); if (con_is_bound(&vga_con)) ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); if (ret == 0) { ret = do_unregister_con_driver(&vga_con); /* Ignore "already unregistered". */ if (ret == -ENODEV) ret = 0; } console_unlock(); return ret; } #endif EXPORT_SYMBOL(vga_remove_vgacon); /* * If we don't ever use VGA arbitration, we should avoid turning off * anything anywhere due to old X servers getting confused about the boot * device not being VGA. */ static void vga_check_first_use(void) { /* * Inform all GPUs in the system that VGA arbitration has occurred * so they can disable resources if possible. */ if (!vga_arbiter_used) { vga_arbiter_used = true; vga_arbiter_notify_clients(); } } static struct vga_device *__vga_tryget(struct vga_device *vgadev, unsigned int rsrc) { struct device *dev = &vgadev->pdev->dev; unsigned int wants, legacy_wants, match; struct vga_device *conflict; unsigned int pci_bits; u32 flags = 0; /* * Account for "normal" resources to lock. If we decode the legacy, * counterpart, we need to request it as well */ if ((rsrc & VGA_RSRC_NORMAL_IO) && (vgadev->decodes & VGA_RSRC_LEGACY_IO)) rsrc |= VGA_RSRC_LEGACY_IO; if ((rsrc & VGA_RSRC_NORMAL_MEM) && (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) rsrc |= VGA_RSRC_LEGACY_MEM; vgaarb_dbg(dev, "%s: %d\n", __func__, rsrc); vgaarb_dbg(dev, "%s: owns: %d\n", __func__, vgadev->owns); /* Check what resources we need to acquire */ wants = rsrc & ~vgadev->owns; /* We already own everything, just mark locked & bye bye */ if (wants == 0) goto lock_them; /* * We don't need to request a legacy resource, we just enable * appropriate decoding and go. */ legacy_wants = wants & VGA_RSRC_LEGACY_MASK; if (legacy_wants == 0) goto enable_them; /* Ok, we don't, let's find out who we need to kick off */ list_for_each_entry(conflict, &vga_list, list) { unsigned int lwants = legacy_wants; unsigned int change_bridge = 0; /* Don't conflict with myself */ if (vgadev == conflict) continue; /* * We have a possible conflict. Before we go further, we must * check if we sit on the same bus as the conflicting device. * If we don't, then we must tie both IO and MEM resources * together since there is only a single bit controlling * VGA forwarding on P2P bridges. */ if (vgadev->pdev->bus != conflict->pdev->bus) { change_bridge = 1; lwants = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; } /* * Check if the guy has a lock on the resource. If he does, * return the conflicting entry. */ if (conflict->locks & lwants) return conflict; /* * Ok, now check if it owns the resource we want. We can * lock resources that are not decoded; therefore a device * can own resources it doesn't decode. */ match = lwants & conflict->owns; if (!match) continue; /* * Looks like he doesn't have a lock, we can steal them * from him. */ flags = 0; pci_bits = 0; /* * If we can't control legacy resources via the bridge, we * also need to disable normal decoding. */ if (!conflict->bridge_has_one_vga) { if ((match & conflict->decodes) & VGA_RSRC_LEGACY_MEM) pci_bits |= PCI_COMMAND_MEMORY; if ((match & conflict->decodes) & VGA_RSRC_LEGACY_IO) pci_bits |= PCI_COMMAND_IO; if (pci_bits) flags |= PCI_VGA_STATE_CHANGE_DECODES; } if (change_bridge) flags |= PCI_VGA_STATE_CHANGE_BRIDGE; pci_set_vga_state(conflict->pdev, false, pci_bits, flags); conflict->owns &= ~match; /* If we disabled normal decoding, reflect it in owns */ if (pci_bits & PCI_COMMAND_MEMORY) conflict->owns &= ~VGA_RSRC_NORMAL_MEM; if (pci_bits & PCI_COMMAND_IO) conflict->owns &= ~VGA_RSRC_NORMAL_IO; } enable_them: /* * Ok, we got it, everybody conflicting has been disabled, let's * enable us. Mark any bits in "owns" regardless of whether we * decoded them. We can lock resources we don't decode, therefore * we must track them via "owns". */ flags = 0; pci_bits = 0; if (!vgadev->bridge_has_one_vga) { flags |= PCI_VGA_STATE_CHANGE_DECODES; if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) pci_bits |= PCI_COMMAND_MEMORY; if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) pci_bits |= PCI_COMMAND_IO; } if (wants & VGA_RSRC_LEGACY_MASK) flags |= PCI_VGA_STATE_CHANGE_BRIDGE; pci_set_vga_state(vgadev->pdev, true, pci_bits, flags); vgadev->owns |= wants; lock_them: vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); if (rsrc & VGA_RSRC_LEGACY_IO) vgadev->io_lock_cnt++; if (rsrc & VGA_RSRC_LEGACY_MEM) vgadev->mem_lock_cnt++; if (rsrc & VGA_RSRC_NORMAL_IO) vgadev->io_norm_cnt++; if (rsrc & VGA_RSRC_NORMAL_MEM) vgadev->mem_norm_cnt++; return NULL; } static void __vga_put(struct vga_device *vgadev, unsigned int rsrc) { struct device *dev = &vgadev->pdev->dev; unsigned int old_locks = vgadev->locks; vgaarb_dbg(dev, "%s\n", __func__); /* * Update our counters and account for equivalent legacy resources * if we decode them. */ if ((rsrc & VGA_RSRC_NORMAL_IO) && vgadev->io_norm_cnt > 0) { vgadev->io_norm_cnt--; if (vgadev->decodes & VGA_RSRC_LEGACY_IO) rsrc |= VGA_RSRC_LEGACY_IO; } if ((rsrc & VGA_RSRC_NORMAL_MEM) && vgadev->mem_norm_cnt > 0) { vgadev->mem_norm_cnt--; if (vgadev->decodes & VGA_RSRC_LEGACY_MEM) rsrc |= VGA_RSRC_LEGACY_MEM; } if ((rsrc & VGA_RSRC_LEGACY_IO) && vgadev->io_lock_cnt > 0) vgadev->io_lock_cnt--; if ((rsrc & VGA_RSRC_LEGACY_MEM) && vgadev->mem_lock_cnt > 0) vgadev->mem_lock_cnt--; /* * Just clear lock bits, we do lazy operations so we don't really * have to bother about anything else at this point. */ if (vgadev->io_lock_cnt == 0) vgadev->locks &= ~VGA_RSRC_LEGACY_IO; if (vgadev->mem_lock_cnt == 0) vgadev->locks &= ~VGA_RSRC_LEGACY_MEM; /* * Kick the wait queue in case somebody was waiting if we actually * released something. */ if (old_locks != vgadev->locks) wake_up_all(&vga_wait_queue); } /** * vga_get - acquire & lock VGA resources * @pdev: PCI device of the VGA card or NULL for the system default * @rsrc: bit mask of resources to acquire and lock * @interruptible: blocking should be interruptible by signals ? * * Acquire VGA resources for the given card and mark those resources * locked. If the resources requested are "normal" (and not legacy) * resources, the arbiter will first check whether the card is doing legacy * decoding for that type of resource. If yes, the lock is "converted" into * a legacy resource lock. * * The arbiter will first look for all VGA cards that might conflict and disable * their IOs and/or Memory access, including VGA forwarding on P2P bridges if * necessary, so that the requested resources can be used. Then, the card is * marked as locking these resources and the IO and/or Memory accesses are * enabled on the card (including VGA forwarding on parent P2P bridges if any). * * This function will block if some conflicting card is already locking one of * the required resources (or any resource on a different bus segment, since P2P * bridges don't differentiate VGA memory and IO afaik). You can indicate * whether this blocking should be interruptible by a signal (for userland * interface) or not. * * Must not be called at interrupt time or in atomic context. If the card * already owns the resources, the function succeeds. Nested calls are * supported (a per-resource counter is maintained) * * On success, release the VGA resource again with vga_put(). * * Returns: * * 0 on success, negative error code on failure. */ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { struct vga_device *vgadev, *conflict; unsigned long flags; wait_queue_entry_t wait; int rc = 0; vga_check_first_use(); /* The caller should check for this, but let's be sure */ if (pdev == NULL) pdev = vga_default_device(); if (pdev == NULL) return 0; for (;;) { spin_lock_irqsave(&vga_lock, flags); vgadev = vgadev_find(pdev); if (vgadev == NULL) { spin_unlock_irqrestore(&vga_lock, flags); rc = -ENODEV; break; } conflict = __vga_tryget(vgadev, rsrc); spin_unlock_irqrestore(&vga_lock, flags); if (conflict == NULL) break; /* * We have a conflict; we wait until somebody kicks the * work queue. Currently we have one work queue that we * kick each time some resources are released, but it would * be fairly easy to have a per-device one so that we only * need to attach to the conflicting device. */ init_waitqueue_entry(&wait, current); add_wait_queue(&vga_wait_queue, &wait); set_current_state(interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (interruptible && signal_pending(current)) { __set_current_state(TASK_RUNNING); remove_wait_queue(&vga_wait_queue, &wait); rc = -ERESTARTSYS; break; } schedule(); remove_wait_queue(&vga_wait_queue, &wait); } return rc; } EXPORT_SYMBOL(vga_get); /** * vga_tryget - try to acquire & lock legacy VGA resources * @pdev: PCI device of VGA card or NULL for system default * @rsrc: bit mask of resources to acquire and lock * * Perform the same operation as vga_get(), but return an error (-EBUSY) * instead of blocking if the resources are already locked by another card. * Can be called in any context. * * On success, release the VGA resource again with vga_put(). * * Returns: * * 0 on success, negative error code on failure. */ static int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { struct vga_device *vgadev; unsigned long flags; int rc = 0; vga_check_first_use(); /* The caller should check for this, but let's be sure */ if (pdev == NULL) pdev = vga_default_device(); if (pdev == NULL) return 0; spin_lock_irqsave(&vga_lock, flags); vgadev = vgadev_find(pdev); if (vgadev == NULL) { rc = -ENODEV; goto bail; } if (__vga_tryget(vgadev, rsrc)) rc = -EBUSY; bail: spin_unlock_irqrestore(&vga_lock, flags); return rc; } /** * vga_put - release lock on legacy VGA resources * @pdev: PCI device of VGA card or NULL for system default * @rsrc: bit mask of resource to release * * Release resources previously locked by vga_get() or vga_tryget(). The * resources aren't disabled right away, so that a subsequent vga_get() on * the same card will succeed immediately. Resources have a counter, so * locks are only released if the counter reaches 0. */ void vga_put(struct pci_dev *pdev, unsigned int rsrc) { struct vga_device *vgadev; unsigned long flags; /* The caller should check for this, but let's be sure */ if (pdev == NULL) pdev = vga_default_device(); if (pdev == NULL) return; spin_lock_irqsave(&vga_lock, flags); vgadev = vgadev_find(pdev); if (vgadev == NULL) goto bail; __vga_put(vgadev, rsrc); bail: spin_unlock_irqrestore(&vga_lock, flags); } EXPORT_SYMBOL(vga_put); static bool vga_is_firmware_default(struct pci_dev *pdev) { #if defined(CONFIG_X86) || defined(CONFIG_IA64) u64 base = screen_info.lfb_base; u64 size = screen_info.lfb_size; struct resource *r; u64 limit; /* Select the device owning the boot framebuffer if there is one */ if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE) base |= (u64)screen_info.ext_lfb_base << 32; limit = base + size; /* Does firmware framebuffer belong to us? */ pci_dev_for_each_resource(pdev, r) { if (resource_type(r) != IORESOURCE_MEM) continue; if (!r->start || !r->end) continue; if (base < r->start || limit >= r->end) continue; return true; } #endif return false; } static bool vga_arb_integrated_gpu(struct device *dev) { #if defined(CONFIG_ACPI) struct acpi_device *adev = ACPI_COMPANION(dev); return adev && !strcmp(acpi_device_hid(adev), ACPI_VIDEO_HID); #else return false; #endif } /* * Return true if vgadev is a better default VGA device than the best one * we've seen so far. */ static bool vga_is_boot_device(struct vga_device *vgadev) { struct vga_device *boot_vga = vgadev_find(vga_default_device()); struct pci_dev *pdev = vgadev->pdev; u16 cmd, boot_cmd; /* * We select the default VGA device in this order: * Firmware framebuffer (see vga_arb_select_default_device()) * Legacy VGA device (owns VGA_RSRC_LEGACY_MASK) * Non-legacy integrated device (see vga_arb_select_default_device()) * Non-legacy discrete device (see vga_arb_select_default_device()) * Other device (see vga_arb_select_default_device()) */ /* * We always prefer a firmware default device, so if we've already * found one, there's no need to consider vgadev. */ if (boot_vga && boot_vga->is_firmware_default) return false; if (vga_is_firmware_default(pdev)) { vgadev->is_firmware_default = true; return true; } /* * A legacy VGA device has MEM and IO enabled and any bridges * leading to it have PCI_BRIDGE_CTL_VGA enabled so the legacy * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], etc) are * routed to it. * * We use the first one we find, so if we've already found one, * vgadev is no better. */ if (boot_vga && (boot_vga->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK) return false; if ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK) return true; /* * If we haven't found a legacy VGA device, accept a non-legacy * device. It may have either IO or MEM enabled, and bridges may * not have PCI_BRIDGE_CTL_VGA enabled, so it may not be able to * use legacy VGA resources. Prefer an integrated GPU over others. */ pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { /* * An integrated GPU overrides a previous non-legacy * device. We expect only a single integrated GPU, but if * there are more, we use the *last* because that was the * previous behavior. */ if (vga_arb_integrated_gpu(&pdev->dev)) return true; /* * We prefer the first non-legacy discrete device we find. * If we already found one, vgadev is no better. */ if (boot_vga) { pci_read_config_word(boot_vga->pdev, PCI_COMMAND, &boot_cmd); if (boot_cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) return false; } return true; } /* * Vgadev has neither IO nor MEM enabled. If we haven't found any * other VGA devices, it is the best candidate so far. */ if (!boot_vga) return true; return false; } /* * Rules for using a bridge to control a VGA descendant decoding: if a bridge * has only one VGA descendant then it can be used to control the VGA routing * for that device. It should always use the bridge closest to the device to * control it. If a bridge has a direct VGA descendant, but also have a sub- * bridge VGA descendant then we cannot use that bridge to control the direct * VGA descendant. So for every device we register, we need to iterate all * its parent bridges so we can invalidate any devices using them properly. */ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev) { struct vga_device *same_bridge_vgadev; struct pci_bus *new_bus, *bus; struct pci_dev *new_bridge, *bridge; vgadev->bridge_has_one_vga = true; if (list_empty(&vga_list)) { vgaarb_info(&vgadev->pdev->dev, "bridge control possible\n"); return; } /* Iterate the new device's bridge hierarchy */ new_bus = vgadev->pdev->bus; while (new_bus) { new_bridge = new_bus->self; /* Go through list of devices already registered */ list_for_each_entry(same_bridge_vgadev, &vga_list, list) { bus = same_bridge_vgadev->pdev->bus; bridge = bus->self; /* See if it shares a bridge with this device */ if (new_bridge == bridge) { /* * If its direct parent bridge is the same * as any bridge of this device then it can't * be used for that device. */ same_bridge_vgadev->bridge_has_one_vga = false; } /* * Now iterate the previous device's bridge hierarchy. * If the new device's parent bridge is in the other * device's hierarchy, we can't use it to control this * device. */ while (bus) { bridge = bus->self; if (bridge && bridge == vgadev->pdev->bus->self) vgadev->bridge_has_one_vga = false; bus = bus->parent; } } new_bus = new_bus->parent; } if (vgadev->bridge_has_one_vga) vgaarb_info(&vgadev->pdev->dev, "bridge control possible\n"); else vgaarb_info(&vgadev->pdev->dev, "no bridge control possible\n"); } /* * Currently, we assume that the "initial" setup of the system is not sane, * that is, we come up with conflicting devices and let the arbiter's * client decide if devices decodes legacy things or not. */ static bool vga_arbiter_add_pci_device(struct pci_dev *pdev) { struct vga_device *vgadev; unsigned long flags; struct pci_bus *bus; struct pci_dev *bridge; u16 cmd; /* Only deal with VGA class devices */ if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) return false; /* Allocate structure */ vgadev = kzalloc(sizeof(struct vga_device), GFP_KERNEL); if (vgadev == NULL) { vgaarb_err(&pdev->dev, "failed to allocate VGA arbiter data\n"); /* * What to do on allocation failure? For now, let's just do * nothing, I'm not sure there is anything saner to be done. */ return false; } /* Take lock & check for duplicates */ spin_lock_irqsave(&vga_lock, flags); if (vgadev_find(pdev) != NULL) { BUG_ON(1); goto fail; } vgadev->pdev = pdev; /* By default, assume we decode everything */ vgadev->decodes = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; /* By default, mark it as decoding */ vga_decode_count++; /* * Mark that we "own" resources based on our enables, we will * clear that below if the bridge isn't forwarding. */ pci_read_config_word(pdev, PCI_COMMAND, &cmd); if (cmd & PCI_COMMAND_IO) vgadev->owns |= VGA_RSRC_LEGACY_IO; if (cmd & PCI_COMMAND_MEMORY) vgadev->owns |= VGA_RSRC_LEGACY_MEM; /* Check if VGA cycles can get down to us */ bus = pdev->bus; while (bus) { bridge = bus->self; if (bridge) { u16 l; pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &l); if (!(l & PCI_BRIDGE_CTL_VGA)) { vgadev->owns = 0; break; } } bus = bus->parent; } if (vga_is_boot_device(vgadev)) { vgaarb_info(&pdev->dev, "setting as boot VGA device%s\n", vga_default_device() ? " (overriding previous)" : ""); vga_set_default_device(pdev); } vga_arbiter_check_bridge_sharing(vgadev); /* Add to the list */ list_add_tail(&vgadev->list, &vga_list); vga_count++; vgaarb_info(&pdev->dev, "VGA device added: decodes=%s,owns=%s,locks=%s\n", vga_iostate_to_str(vgadev->decodes), vga_iostate_to_str(vgadev->owns), vga_iostate_to_str(vgadev->locks)); spin_unlock_irqrestore(&vga_lock, flags); return true; fail: spin_unlock_irqrestore(&vga_lock, flags); kfree(vgadev); return false; } static bool vga_arbiter_del_pci_device(struct pci_dev *pdev) { struct vga_device *vgadev; unsigned long flags; bool ret = true; spin_lock_irqsave(&vga_lock, flags); vgadev = vgadev_find(pdev); if (vgadev == NULL) { ret = false; goto bail; } if (vga_default == pdev) vga_set_default_device(NULL); if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) vga_decode_count--; /* Remove entry from list */ list_del(&vgadev->list); vga_count--; /* Wake up all possible waiters */ wake_up_all(&vga_wait_queue); bail: spin_unlock_irqrestore(&vga_lock, flags); kfree(vgadev); return ret; } /* Called with the lock */ static void vga_update_device_decodes(struct vga_device *vgadev, unsigned int new_decodes) { struct device *dev = &vgadev->pdev->dev; unsigned int old_decodes = vgadev->decodes; unsigned int decodes_removed = ~new_decodes & old_decodes; unsigned int decodes_unlocked = vgadev->locks & decodes_removed; vgadev->decodes = new_decodes; vgaarb_info(dev, "VGA decodes changed: olddecodes=%s,decodes=%s:owns=%s\n", vga_iostate_to_str(old_decodes), vga_iostate_to_str(vgadev->decodes), vga_iostate_to_str(vgadev->owns)); /* If we removed locked decodes, lock count goes to zero, and release */ if (decodes_unlocked) { if (decodes_unlocked & VGA_RSRC_LEGACY_IO) vgadev->io_lock_cnt = 0; if (decodes_unlocked & VGA_RSRC_LEGACY_MEM) vgadev->mem_lock_cnt = 0; __vga_put(vgadev, decodes_unlocked); } /* Change decodes counter */ if (old_decodes & VGA_RSRC_LEGACY_MASK && !(new_decodes & VGA_RSRC_LEGACY_MASK)) vga_decode_count--; if (!(old_decodes & VGA_RSRC_LEGACY_MASK) && new_decodes & VGA_RSRC_LEGACY_MASK) vga_decode_count++; vgaarb_dbg(dev, "decoding count now is: %d\n", vga_decode_count); } static void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) { struct vga_device *vgadev; unsigned long flags; decodes &= VGA_RSRC_LEGACY_MASK; spin_lock_irqsave(&vga_lock, flags); vgadev = vgadev_find(pdev); if (vgadev == NULL) goto bail; /* Don't let userspace futz with kernel driver decodes */ if (userspace && vgadev->set_decode) goto bail; /* Update the device decodes + counter */ vga_update_device_decodes(vgadev, decodes); /* * XXX If somebody is going from "doesn't decode" to "decodes" * state here, additional care must be taken as we may have pending * ownership of non-legacy region. */ bail: spin_unlock_irqrestore(&vga_lock, flags); } /** * vga_set_legacy_decoding * @pdev: PCI device of the VGA card * @decodes: bit mask of what legacy regions the card decodes * * Indicate to the arbiter if the card decodes legacy VGA IOs, legacy VGA * Memory, both, or none. All cards default to both, the card driver (fbdev for * example) should tell the arbiter if it has disabled legacy decoding, so the * card can be left out of the arbitration process (and can be safe to take * interrupts at any time. */ void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes) { __vga_set_legacy_decoding(pdev, decodes, false); } EXPORT_SYMBOL(vga_set_legacy_decoding); /** * vga_client_register - register or unregister a VGA arbitration client * @pdev: PCI device of the VGA client * @set_decode: VGA decode change callback * * Clients have two callback mechanisms they can use. * * @set_decode callback: If a client can disable its GPU VGA resource, it * will get a callback from this to set the encode/decode state. * * Rationale: we cannot disable VGA decode resources unconditionally * because some single GPU laptops seem to require ACPI or BIOS access to * the VGA registers to control things like backlights etc. Hopefully newer * multi-GPU laptops do something saner, and desktops won't have any * special ACPI for this. The driver will get a callback when VGA * arbitration is first used by userspace since some older X servers have * issues. * * Does not check whether a client for @pdev has been registered already. * * To unregister, call vga_client_unregister(). * * Returns: 0 on success, -ENODEV on failure */ int vga_client_register(struct pci_dev *pdev, unsigned int (*set_decode)(struct pci_dev *pdev, bool decode)) { unsigned long flags; struct vga_device *vgadev; spin_lock_irqsave(&vga_lock, flags); vgadev = vgadev_find(pdev); if (vgadev) vgadev->set_decode = set_decode; spin_unlock_irqrestore(&vga_lock, flags); if (!vgadev) return -ENODEV; return 0; } EXPORT_SYMBOL(vga_client_register); /* * Char driver implementation * * Semantics is: * * open : Open user instance of the arbiter. By default, it's * attached to the default VGA device of the system. * * close : Close user instance, release locks * * read : Return a string indicating the status of the target. * An IO state string is of the form {io,mem,io+mem,none}, * mc and ic are respectively mem and io lock counts (for * debugging/diagnostic only). "decodes" indicate what the * card currently decodes, "owns" indicates what is currently * enabled on it, and "locks" indicates what is locked by this * card. If the card is unplugged, we get "invalid" then for * card_ID and an -ENODEV error is returned for any command * until a new card is targeted * * "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)" * * write : write a command to the arbiter. List of commands is: * * target <card_ID> : switch target to card <card_ID> (see below) * lock <io_state> : acquire locks on target ("none" is invalid io_state) * trylock <io_state> : non-blocking acquire locks on target * unlock <io_state> : release locks on target * unlock all : release all locks on target held by this user * decodes <io_state> : set the legacy decoding attributes for the card * * poll : event if something change on any card (not just the target) * * card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default" * to go back to the system default card (TODO: not implemented yet). * Currently, only PCI is supported as a prefix, but the userland API may * support other bus types in the future, even if the current kernel * implementation doesn't. * * Note about locks: * * The driver keeps track of which user has what locks on which card. It * supports stacking, like the kernel one. This complicates the implementation * a bit, but makes the arbiter more tolerant to userspace problems and able * to properly cleanup in all cases when a process dies. * Currently, a max of 16 cards simultaneously can have locks issued from * userspace for a given user (file descriptor instance) of the arbiter. * * If the device is hot-unplugged, there is a hook inside the module to notify * it being added/removed in the system and automatically added/removed in * the arbiter. */ #define MAX_USER_CARDS CONFIG_VGA_ARB_MAX_GPUS #define PCI_INVALID_CARD ((struct pci_dev *)-1UL) /* Each user has an array of these, tracking which cards have locks */ struct vga_arb_user_card { struct pci_dev *pdev; unsigned int mem_cnt; unsigned int io_cnt; }; struct vga_arb_private { struct list_head list; struct pci_dev *target; struct vga_arb_user_card cards[MAX_USER_CARDS]; spinlock_t lock; }; static LIST_HEAD(vga_user_list); static DEFINE_SPINLOCK(vga_user_lock); /* * Take a string in the format: "PCI:domain:bus:dev.fn" and return the * respective values. If the string is not in this format, return 0. */ static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain, unsigned int *bus, unsigned int *devfn) { int n; unsigned int slot, func; n = sscanf(buf, "PCI:%x:%x:%x.%x", domain, bus, &slot, &func); if (n != 4) return 0; *devfn = PCI_DEVFN(slot, func); return 1; } static ssize_t vga_arb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct vga_arb_private *priv = file->private_data; struct vga_device *vgadev; struct pci_dev *pdev; unsigned long flags; size_t len; int rc; char *lbuf; lbuf = kmalloc(1024, GFP_KERNEL); if (lbuf == NULL) return -ENOMEM; /* Protect vga_list */ spin_lock_irqsave(&vga_lock, flags); /* If we are targeting the default, use it */ pdev = priv->target; if (pdev == NULL || pdev == PCI_INVALID_CARD) { spin_unlock_irqrestore(&vga_lock, flags); len = sprintf(lbuf, "invalid"); goto done; } /* Find card vgadev structure */ vgadev = vgadev_find(pdev); if (vgadev == NULL) { /* * Wow, it's not in the list, that shouldn't happen, let's * fix us up and return invalid card. */ spin_unlock_irqrestore(&vga_lock, flags); len = sprintf(lbuf, "invalid"); goto done; } /* Fill the buffer with info */ len = snprintf(lbuf, 1024, "count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%u:%u)\n", vga_decode_count, pci_name(pdev), vga_iostate_to_str(vgadev->decodes), vga_iostate_to_str(vgadev->owns), vga_iostate_to_str(vgadev->locks), vgadev->io_lock_cnt, vgadev->mem_lock_cnt); spin_unlock_irqrestore(&vga_lock, flags); done: /* Copy that to user */ if (len > count) len = count; rc = copy_to_user(buf, lbuf, len); kfree(lbuf); if (rc) return -EFAULT; return len; } /* * TODO: To avoid parsing inside kernel and to improve the speed we may * consider use ioctl here */ static ssize_t vga_arb_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct vga_arb_private *priv = file->private_data; struct vga_arb_user_card *uc = NULL; struct pci_dev *pdev; unsigned int io_state; char kbuf[64], *curr_pos; size_t remaining = count; int ret_val; int i; if (count >= sizeof(kbuf)) return -EINVAL; if (copy_from_user(kbuf, buf, count)) return -EFAULT; curr_pos = kbuf; kbuf[count] = '\0'; if (strncmp(curr_pos, "lock ", 5) == 0) { curr_pos += 5; remaining -= 5; pr_debug("client 0x%p called 'lock'\n", priv); if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { ret_val = -EPROTO; goto done; } if (io_state == VGA_RSRC_NONE) { ret_val = -EPROTO; goto done; } pdev = priv->target; if (priv->target == NULL) { ret_val = -ENODEV; goto done; } vga_get_uninterruptible(pdev, io_state); /* Update the client's locks lists */ for (i = 0; i < MAX_USER_CARDS; i++) { if (priv->cards[i].pdev == pdev) { if (io_state & VGA_RSRC_LEGACY_IO) priv->cards[i].io_cnt++; if (io_state & VGA_RSRC_LEGACY_MEM) priv->cards[i].mem_cnt++; break; } } ret_val = count; goto done; } else if (strncmp(curr_pos, "unlock ", 7) == 0) { curr_pos += 7; remaining -= 7; pr_debug("client 0x%p called 'unlock'\n", priv); if (strncmp(curr_pos, "all", 3) == 0) io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; else { if (!vga_str_to_iostate (curr_pos, remaining, &io_state)) { ret_val = -EPROTO; goto done; } /* TODO: Add this? if (io_state == VGA_RSRC_NONE) { ret_val = -EPROTO; goto done; } */ } pdev = priv->target; if (priv->target == NULL) { ret_val = -ENODEV; goto done; } for (i = 0; i < MAX_USER_CARDS; i++) { if (priv->cards[i].pdev == pdev) uc = &priv->cards[i]; } if (!uc) { ret_val = -EINVAL; goto done; } if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) { ret_val = -EINVAL; goto done; } if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) { ret_val = -EINVAL; goto done; } vga_put(pdev, io_state); if (io_state & VGA_RSRC_LEGACY_IO) uc->io_cnt--; if (io_state & VGA_RSRC_LEGACY_MEM) uc->mem_cnt--; ret_val = count; goto done; } else if (strncmp(curr_pos, "trylock ", 8) == 0) { curr_pos += 8; remaining -= 8; pr_debug("client 0x%p called 'trylock'\n", priv); if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { ret_val = -EPROTO; goto done; } /* TODO: Add this? if (io_state == VGA_RSRC_NONE) { ret_val = -EPROTO; goto done; } */ pdev = priv->target; if (priv->target == NULL) { ret_val = -ENODEV; goto done; } if (vga_tryget(pdev, io_state)) { /* Update the client's locks lists... */ for (i = 0; i < MAX_USER_CARDS; i++) { if (priv->cards[i].pdev == pdev) { if (io_state & VGA_RSRC_LEGACY_IO) priv->cards[i].io_cnt++; if (io_state & VGA_RSRC_LEGACY_MEM) priv->cards[i].mem_cnt++; break; } } ret_val = count; goto done; } else { ret_val = -EBUSY; goto done; } } else if (strncmp(curr_pos, "target ", 7) == 0) { unsigned int domain, bus, devfn; struct vga_device *vgadev; curr_pos += 7; remaining -= 7; pr_debug("client 0x%p called 'target'\n", priv); /* If target is default */ if (!strncmp(curr_pos, "default", 7)) pdev = pci_dev_get(vga_default_device()); else { if (!vga_pci_str_to_vars(curr_pos, remaining, &domain, &bus, &devfn)) { ret_val = -EPROTO; goto done; } pdev = pci_get_domain_bus_and_slot(domain, bus, devfn); if (!pdev) { pr_debug("invalid PCI address %04x:%02x:%02x.%x\n", domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); ret_val = -ENODEV; goto done; } pr_debug("%s ==> %04x:%02x:%02x.%x pdev %p\n", curr_pos, domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn), pdev); } vgadev = vgadev_find(pdev); pr_debug("vgadev %p\n", vgadev); if (vgadev == NULL) { if (pdev) { vgaarb_dbg(&pdev->dev, "not a VGA device\n"); pci_dev_put(pdev); } ret_val = -ENODEV; goto done; } priv->target = pdev; for (i = 0; i < MAX_USER_CARDS; i++) { if (priv->cards[i].pdev == pdev) break; if (priv->cards[i].pdev == NULL) { priv->cards[i].pdev = pdev; priv->cards[i].io_cnt = 0; priv->cards[i].mem_cnt = 0; break; } } if (i == MAX_USER_CARDS) { vgaarb_dbg(&pdev->dev, "maximum user cards (%d) number reached, ignoring this one!\n", MAX_USER_CARDS); pci_dev_put(pdev); /* XXX: Which value to return? */ ret_val = -ENOMEM; goto done; } ret_val = count; pci_dev_put(pdev); goto done; } else if (strncmp(curr_pos, "decodes ", 8) == 0) { curr_pos += 8; remaining -= 8; pr_debug("client 0x%p called 'decodes'\n", priv); if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { ret_val = -EPROTO; goto done; } pdev = priv->target; if (priv->target == NULL) { ret_val = -ENODEV; goto done; } __vga_set_legacy_decoding(pdev, io_state, true); ret_val = count; goto done; } /* If we got here, the message written is not part of the protocol! */ return -EPROTO; done: return ret_val; } static __poll_t vga_arb_fpoll(struct file *file, poll_table *wait) { pr_debug("%s\n", __func__); poll_wait(file, &vga_wait_queue, wait); return EPOLLIN; } static int vga_arb_open(struct inode *inode, struct file *file) { struct vga_arb_private *priv; unsigned long flags; pr_debug("%s\n", __func__); priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (priv == NULL) return -ENOMEM; spin_lock_init(&priv->lock); file->private_data = priv; spin_lock_irqsave(&vga_user_lock, flags); list_add(&priv->list, &vga_user_list); spin_unlock_irqrestore(&vga_user_lock, flags); /* Set the client's lists of locks */ priv->target = vga_default_device(); /* Maybe this is still null! */ priv->cards[0].pdev = priv->target; priv->cards[0].io_cnt = 0; priv->cards[0].mem_cnt = 0; return 0; } static int vga_arb_release(struct inode *inode, struct file *file) { struct vga_arb_private *priv = file->private_data; struct vga_arb_user_card *uc; unsigned long flags; int i; pr_debug("%s\n", __func__); spin_lock_irqsave(&vga_user_lock, flags); list_del(&priv->list); for (i = 0; i < MAX_USER_CARDS; i++) { uc = &priv->cards[i]; if (uc->pdev == NULL) continue; vgaarb_dbg(&uc->pdev->dev, "uc->io_cnt == %d, uc->mem_cnt == %d\n", uc->io_cnt, uc->mem_cnt); while (uc->io_cnt--) vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); while (uc->mem_cnt--) vga_put(uc->pdev, VGA_RSRC_LEGACY_MEM); } spin_unlock_irqrestore(&vga_user_lock, flags); kfree(priv); return 0; } /* * Callback any registered clients to let them know we have a change in VGA * cards. */ static void vga_arbiter_notify_clients(void) { struct vga_device *vgadev; unsigned long flags; unsigned int new_decodes; bool new_state; if (!vga_arbiter_used) return; new_state = (vga_count > 1) ? false : true; spin_lock_irqsave(&vga_lock, flags); list_for_each_entry(vgadev, &vga_list, list) { if (vgadev->set_decode) { new_decodes = vgadev->set_decode(vgadev->pdev, new_state); vga_update_device_decodes(vgadev, new_decodes); } } spin_unlock_irqrestore(&vga_lock, flags); } static int pci_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct pci_dev *pdev = to_pci_dev(dev); bool notify = false; vgaarb_dbg(dev, "%s\n", __func__); /* * For now, we're only interested in devices added and removed. * I didn't test this thing here, so someone needs to double check * for the cases of hot-pluggable VGA cards. */ if (action == BUS_NOTIFY_ADD_DEVICE) notify = vga_arbiter_add_pci_device(pdev); else if (action == BUS_NOTIFY_DEL_DEVICE) notify = vga_arbiter_del_pci_device(pdev); if (notify) vga_arbiter_notify_clients(); return 0; } static struct notifier_block pci_notifier = { .notifier_call = pci_notify, }; static const struct file_operations vga_arb_device_fops = { .read = vga_arb_read, .write = vga_arb_write, .poll = vga_arb_fpoll, .open = vga_arb_open, .release = vga_arb_release, .llseek = noop_llseek, }; static struct miscdevice vga_arb_device = { MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops }; static int __init vga_arb_device_init(void) { int rc; struct pci_dev *pdev; rc = misc_register(&vga_arb_device); if (rc < 0) pr_err("error %d registering device\n", rc); bus_register_notifier(&pci_bus_type, &pci_notifier); /* Add all VGA class PCI devices by default */ pdev = NULL; while ((pdev = pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) vga_arbiter_add_pci_device(pdev); pr_info("loaded\n"); return rc; } subsys_initcall_sync(vga_arb_device_init);
linux-master
drivers/pci/vgaarb.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) Copyright 2002-2004 Greg Kroah-Hartman <[email protected]> * (C) Copyright 2002-2004 IBM Corp. * (C) Copyright 2003 Matthew Wilcox * (C) Copyright 2003 Hewlett-Packard * (C) Copyright 2004 Jon Smirl <[email protected]> * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <[email protected]> * * File attributes for PCI devices * * Modeled after usb's driverfs.c */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/pci.h> #include <linux/stat.h> #include <linux/export.h> #include <linux/topology.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/capability.h> #include <linux/security.h> #include <linux/slab.h> #include <linux/vgaarb.h> #include <linux/pm_runtime.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/aperture.h> #include "pci.h" static int sysfs_initialized; /* = 0 */ /* show configuration fields */ #define pci_config_attr(field, format_string) \ static ssize_t \ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct pci_dev *pdev; \ \ pdev = to_pci_dev(dev); \ return sysfs_emit(buf, format_string, pdev->field); \ } \ static DEVICE_ATTR_RO(field) pci_config_attr(vendor, "0x%04x\n"); pci_config_attr(device, "0x%04x\n"); pci_config_attr(subsystem_vendor, "0x%04x\n"); pci_config_attr(subsystem_device, "0x%04x\n"); pci_config_attr(revision, "0x%02x\n"); pci_config_attr(class, "0x%06x\n"); static ssize_t irq_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); #ifdef CONFIG_PCI_MSI /* * For MSI, show the first MSI IRQ; for all other cases including * MSI-X, show the legacy INTx IRQ. */ if (pdev->msi_enabled) return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0)); #endif return sysfs_emit(buf, "%u\n", pdev->irq); } static DEVICE_ATTR_RO(irq); static ssize_t broken_parity_status_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return sysfs_emit(buf, "%u\n", pdev->broken_parity_status); } static ssize_t broken_parity_status_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); unsigned long val; if (kstrtoul(buf, 0, &val) < 0) return -EINVAL; pdev->broken_parity_status = !!val; return count; } static DEVICE_ATTR_RW(broken_parity_status); static ssize_t pci_dev_show_local_cpu(struct device *dev, bool list, struct device_attribute *attr, char *buf) { const struct cpumask *mask; #ifdef CONFIG_NUMA if (dev_to_node(dev) == NUMA_NO_NODE) mask = cpu_online_mask; else mask = cpumask_of_node(dev_to_node(dev)); #else mask = cpumask_of_pcibus(to_pci_dev(dev)->bus); #endif return cpumap_print_to_pagebuf(list, buf, mask); } static ssize_t local_cpus_show(struct device *dev, struct device_attribute *attr, char *buf) { return pci_dev_show_local_cpu(dev, false, attr, buf); } static DEVICE_ATTR_RO(local_cpus); static ssize_t local_cpulist_show(struct device *dev, struct device_attribute *attr, char *buf) { return pci_dev_show_local_cpu(dev, true, attr, buf); } static DEVICE_ATTR_RO(local_cpulist); /* * PCI Bus Class Devices */ static ssize_t cpuaffinity_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); return cpumap_print_to_pagebuf(false, buf, cpumask); } static DEVICE_ATTR_RO(cpuaffinity); static ssize_t cpulistaffinity_show(struct device *dev, struct device_attribute *attr, char *buf) { const struct cpumask *cpumask = cpumask_of_pcibus(to_pci_bus(dev)); return cpumap_print_to_pagebuf(true, buf, cpumask); } static DEVICE_ATTR_RO(cpulistaffinity); static ssize_t power_state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return sysfs_emit(buf, "%s\n", pci_power_name(pdev->current_state)); } static DEVICE_ATTR_RO(power_state); /* show resources */ static ssize_t resource_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pci_dev = to_pci_dev(dev); int i; int max; resource_size_t start, end; size_t len = 0; if (pci_dev->subordinate) max = DEVICE_COUNT_RESOURCE; else max = PCI_BRIDGE_RESOURCES; for (i = 0; i < max; i++) { struct resource *res = &pci_dev->resource[i]; pci_resource_to_user(pci_dev, i, res, &start, &end); len += sysfs_emit_at(buf, len, "0x%016llx 0x%016llx 0x%016llx\n", (unsigned long long)start, (unsigned long long)end, (unsigned long long)res->flags); } return len; } static DEVICE_ATTR_RO(resource); static ssize_t max_link_speed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return sysfs_emit(buf, "%s\n", pci_speed_string(pcie_get_speed_cap(pdev))); } static DEVICE_ATTR_RO(max_link_speed); static ssize_t max_link_width_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return sysfs_emit(buf, "%u\n", pcie_get_width_cap(pdev)); } static DEVICE_ATTR_RO(max_link_width); static ssize_t current_link_speed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pci_dev = to_pci_dev(dev); u16 linkstat; int err; enum pci_bus_speed speed; err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); if (err) return -EINVAL; speed = pcie_link_speed[linkstat & PCI_EXP_LNKSTA_CLS]; return sysfs_emit(buf, "%s\n", pci_speed_string(speed)); } static DEVICE_ATTR_RO(current_link_speed); static ssize_t current_link_width_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pci_dev = to_pci_dev(dev); u16 linkstat; int err; err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); if (err) return -EINVAL; return sysfs_emit(buf, "%u\n", (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); } static DEVICE_ATTR_RO(current_link_width); static ssize_t secondary_bus_number_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pci_dev = to_pci_dev(dev); u8 sec_bus; int err; err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); if (err) return -EINVAL; return sysfs_emit(buf, "%u\n", sec_bus); } static DEVICE_ATTR_RO(secondary_bus_number); static ssize_t subordinate_bus_number_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pci_dev = to_pci_dev(dev); u8 sub_bus; int err; err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); if (err) return -EINVAL; return sysfs_emit(buf, "%u\n", sub_bus); } static DEVICE_ATTR_RO(subordinate_bus_number); static ssize_t ari_enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pci_dev = to_pci_dev(dev); return sysfs_emit(buf, "%u\n", pci_ari_enabled(pci_dev->bus)); } static DEVICE_ATTR_RO(ari_enabled); static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pci_dev = to_pci_dev(dev); return sysfs_emit(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n", pci_dev->vendor, pci_dev->device, pci_dev->subsystem_vendor, pci_dev->subsystem_device, (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8), (u8)(pci_dev->class)); } static DEVICE_ATTR_RO(modalias); static ssize_t enable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); unsigned long val; ssize_t result = 0; /* this can crash the machine when done on the "wrong" device */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (kstrtoul(buf, 0, &val) < 0) return -EINVAL; device_lock(dev); if (dev->driver) result = -EBUSY; else if (val) result = pci_enable_device(pdev); else if (pci_is_enabled(pdev)) pci_disable_device(pdev); else result = -EIO; device_unlock(dev); return result < 0 ? result : count; } static ssize_t enable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev; pdev = to_pci_dev(dev); return sysfs_emit(buf, "%u\n", atomic_read(&pdev->enable_cnt)); } static DEVICE_ATTR_RW(enable); #ifdef CONFIG_NUMA static ssize_t numa_node_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); int node; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (kstrtoint(buf, 0, &node) < 0) return -EINVAL; if ((node < 0 && node != NUMA_NO_NODE) || node >= MAX_NUMNODES) return -EINVAL; if (node != NUMA_NO_NODE && !node_online(node)) return -EINVAL; add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); pci_alert(pdev, FW_BUG "Overriding NUMA node to %d. Contact your vendor for updates.", node); dev->numa_node = node; return count; } static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", dev->numa_node); } static DEVICE_ATTR_RW(numa_node); #endif static ssize_t dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return sysfs_emit(buf, "%d\n", fls64(pdev->dma_mask)); } static DEVICE_ATTR_RO(dma_mask_bits); static ssize_t consistent_dma_mask_bits_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", fls64(dev->coherent_dma_mask)); } static DEVICE_ATTR_RO(consistent_dma_mask_bits); static ssize_t msi_bus_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_bus *subordinate = pdev->subordinate; return sysfs_emit(buf, "%u\n", subordinate ? !(subordinate->bus_flags & PCI_BUS_FLAGS_NO_MSI) : !pdev->no_msi); } static ssize_t msi_bus_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_bus *subordinate = pdev->subordinate; unsigned long val; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (kstrtoul(buf, 0, &val) < 0) return -EINVAL; /* * "no_msi" and "bus_flags" only affect what happens when a driver * requests MSI or MSI-X. They don't affect any drivers that have * already requested MSI or MSI-X. */ if (!subordinate) { pdev->no_msi = !val; pci_info(pdev, "MSI/MSI-X %s for future drivers\n", val ? "allowed" : "disallowed"); return count; } if (val) subordinate->bus_flags &= ~PCI_BUS_FLAGS_NO_MSI; else subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; dev_info(&subordinate->dev, "MSI/MSI-X %s for future drivers of devices on this bus\n", val ? "allowed" : "disallowed"); return count; } static DEVICE_ATTR_RW(msi_bus); static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count) { unsigned long val; struct pci_bus *b = NULL; if (kstrtoul(buf, 0, &val) < 0) return -EINVAL; if (val) { pci_lock_rescan_remove(); while ((b = pci_find_next_bus(b)) != NULL) pci_rescan_bus(b); pci_unlock_rescan_remove(); } return count; } static BUS_ATTR_WO(rescan); static struct attribute *pci_bus_attrs[] = { &bus_attr_rescan.attr, NULL, }; static const struct attribute_group pci_bus_group = { .attrs = pci_bus_attrs, }; const struct attribute_group *pci_bus_groups[] = { &pci_bus_group, NULL, }; static ssize_t dev_rescan_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; struct pci_dev *pdev = to_pci_dev(dev); if (kstrtoul(buf, 0, &val) < 0) return -EINVAL; if (val) { pci_lock_rescan_remove(); pci_rescan_bus(pdev->bus); pci_unlock_rescan_remove(); } return count; } static struct device_attribute dev_attr_dev_rescan = __ATTR(rescan, 0200, NULL, dev_rescan_store); static ssize_t remove_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; if (kstrtoul(buf, 0, &val) < 0) return -EINVAL; if (val && device_remove_file_self(dev, attr)) pci_stop_and_remove_bus_device_locked(to_pci_dev(dev)); return count; } static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0220, NULL, remove_store); static ssize_t bus_rescan_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; struct pci_bus *bus = to_pci_bus(dev); if (kstrtoul(buf, 0, &val) < 0) return -EINVAL; if (val) { pci_lock_rescan_remove(); if (!pci_is_root_bus(bus) && list_empty(&bus->devices)) pci_rescan_bus_bridge_resize(bus->self); else pci_rescan_bus(bus); pci_unlock_rescan_remove(); } return count; } static struct device_attribute dev_attr_bus_rescan = __ATTR(rescan, 0200, NULL, bus_rescan_store); #if defined(CONFIG_PM) && defined(CONFIG_ACPI) static ssize_t d3cold_allowed_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); unsigned long val; if (kstrtoul(buf, 0, &val) < 0) return -EINVAL; pdev->d3cold_allowed = !!val; if (pdev->d3cold_allowed) pci_d3cold_enable(pdev); else pci_d3cold_disable(pdev); pm_runtime_resume(dev); return count; } static ssize_t d3cold_allowed_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return sysfs_emit(buf, "%u\n", pdev->d3cold_allowed); } static DEVICE_ATTR_RW(d3cold_allowed); #endif #ifdef CONFIG_OF static ssize_t devspec_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); struct device_node *np = pci_device_to_OF_node(pdev); if (np == NULL) return 0; return sysfs_emit(buf, "%pOF\n", np); } static DEVICE_ATTR_RO(devspec); #endif static ssize_t driver_override_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); int ret; ret = driver_set_override(dev, &pdev->driver_override, buf, count); if (ret) return ret; return count; } static ssize_t driver_override_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); ssize_t len; device_lock(dev); len = sysfs_emit(buf, "%s\n", pdev->driver_override); device_unlock(dev); return len; } static DEVICE_ATTR_RW(driver_override); static struct attribute *pci_dev_attrs[] = { &dev_attr_power_state.attr, &dev_attr_resource.attr, &dev_attr_vendor.attr, &dev_attr_device.attr, &dev_attr_subsystem_vendor.attr, &dev_attr_subsystem_device.attr, &dev_attr_revision.attr, &dev_attr_class.attr, &dev_attr_irq.attr, &dev_attr_local_cpus.attr, &dev_attr_local_cpulist.attr, &dev_attr_modalias.attr, #ifdef CONFIG_NUMA &dev_attr_numa_node.attr, #endif &dev_attr_dma_mask_bits.attr, &dev_attr_consistent_dma_mask_bits.attr, &dev_attr_enable.attr, &dev_attr_broken_parity_status.attr, &dev_attr_msi_bus.attr, #if defined(CONFIG_PM) && defined(CONFIG_ACPI) &dev_attr_d3cold_allowed.attr, #endif #ifdef CONFIG_OF &dev_attr_devspec.attr, #endif &dev_attr_driver_override.attr, &dev_attr_ari_enabled.attr, NULL, }; static struct attribute *pci_bridge_attrs[] = { &dev_attr_subordinate_bus_number.attr, &dev_attr_secondary_bus_number.attr, NULL, }; static struct attribute *pcie_dev_attrs[] = { &dev_attr_current_link_speed.attr, &dev_attr_current_link_width.attr, &dev_attr_max_link_width.attr, &dev_attr_max_link_speed.attr, NULL, }; static struct attribute *pcibus_attrs[] = { &dev_attr_bus_rescan.attr, &dev_attr_cpuaffinity.attr, &dev_attr_cpulistaffinity.attr, NULL, }; static const struct attribute_group pcibus_group = { .attrs = pcibus_attrs, }; const struct attribute_group *pcibus_groups[] = { &pcibus_group, NULL, }; static ssize_t boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *vga_dev = vga_default_device(); if (vga_dev) return sysfs_emit(buf, "%u\n", (pdev == vga_dev)); return sysfs_emit(buf, "%u\n", !!(pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)); } static DEVICE_ATTR_RO(boot_vga); static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); unsigned int size = 64; loff_t init_off = off; u8 *data = (u8 *) buf; /* Several chips lock up trying to read undefined config space */ if (file_ns_capable(filp, &init_user_ns, CAP_SYS_ADMIN)) size = dev->cfg_size; else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) size = 128; if (off > size) return 0; if (off + count > size) { size -= off; count = size; } else { size = count; } pci_config_pm_runtime_get(dev); if ((off & 1) && size) { u8 val; pci_user_read_config_byte(dev, off, &val); data[off - init_off] = val; off++; size--; } if ((off & 3) && size > 2) { u16 val; pci_user_read_config_word(dev, off, &val); data[off - init_off] = val & 0xff; data[off - init_off + 1] = (val >> 8) & 0xff; off += 2; size -= 2; } while (size > 3) { u32 val; pci_user_read_config_dword(dev, off, &val); data[off - init_off] = val & 0xff; data[off - init_off + 1] = (val >> 8) & 0xff; data[off - init_off + 2] = (val >> 16) & 0xff; data[off - init_off + 3] = (val >> 24) & 0xff; off += 4; size -= 4; cond_resched(); } if (size >= 2) { u16 val; pci_user_read_config_word(dev, off, &val); data[off - init_off] = val & 0xff; data[off - init_off + 1] = (val >> 8) & 0xff; off += 2; size -= 2; } if (size > 0) { u8 val; pci_user_read_config_byte(dev, off, &val); data[off - init_off] = val; } pci_config_pm_runtime_put(dev); return count; } static ssize_t pci_write_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); unsigned int size = count; loff_t init_off = off; u8 *data = (u8 *) buf; int ret; ret = security_locked_down(LOCKDOWN_PCI_ACCESS); if (ret) return ret; if (resource_is_exclusive(&dev->driver_exclusive_resource, off, count)) { pci_warn_once(dev, "%s: Unexpected write to kernel-exclusive config offset %llx", current->comm, off); add_taint(TAINT_USER, LOCKDEP_STILL_OK); } if (off > dev->cfg_size) return 0; if (off + count > dev->cfg_size) { size = dev->cfg_size - off; count = size; } pci_config_pm_runtime_get(dev); if ((off & 1) && size) { pci_user_write_config_byte(dev, off, data[off - init_off]); off++; size--; } if ((off & 3) && size > 2) { u16 val = data[off - init_off]; val |= (u16) data[off - init_off + 1] << 8; pci_user_write_config_word(dev, off, val); off += 2; size -= 2; } while (size > 3) { u32 val = data[off - init_off]; val |= (u32) data[off - init_off + 1] << 8; val |= (u32) data[off - init_off + 2] << 16; val |= (u32) data[off - init_off + 3] << 24; pci_user_write_config_dword(dev, off, val); off += 4; size -= 4; } if (size >= 2) { u16 val = data[off - init_off]; val |= (u16) data[off - init_off + 1] << 8; pci_user_write_config_word(dev, off, val); off += 2; size -= 2; } if (size) pci_user_write_config_byte(dev, off, data[off - init_off]); pci_config_pm_runtime_put(dev); return count; } static BIN_ATTR(config, 0644, pci_read_config, pci_write_config, 0); static struct bin_attribute *pci_dev_config_attrs[] = { &bin_attr_config, NULL, }; static umode_t pci_dev_config_attr_is_visible(struct kobject *kobj, struct bin_attribute *a, int n) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); a->size = PCI_CFG_SPACE_SIZE; if (pdev->cfg_size > PCI_CFG_SPACE_SIZE) a->size = PCI_CFG_SPACE_EXP_SIZE; return a->attr.mode; } static const struct attribute_group pci_dev_config_attr_group = { .bin_attrs = pci_dev_config_attrs, .is_bin_visible = pci_dev_config_attr_is_visible, }; #ifdef HAVE_PCI_LEGACY /** * pci_read_legacy_io - read byte(s) from legacy I/O port space * @filp: open sysfs file * @kobj: kobject corresponding to file to read from * @bin_attr: struct bin_attribute for this file * @buf: buffer to store results * @off: offset into legacy I/O port space * @count: number of bytes to read * * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific * callback routine (pci_legacy_read). */ static ssize_t pci_read_legacy_io(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); /* Only support 1, 2 or 4 byte accesses */ if (count != 1 && count != 2 && count != 4) return -EINVAL; return pci_legacy_read(bus, off, (u32 *)buf, count); } /** * pci_write_legacy_io - write byte(s) to legacy I/O port space * @filp: open sysfs file * @kobj: kobject corresponding to file to read from * @bin_attr: struct bin_attribute for this file * @buf: buffer containing value to be written * @off: offset into legacy I/O port space * @count: number of bytes to write * * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific * callback routine (pci_legacy_write). */ static ssize_t pci_write_legacy_io(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); /* Only support 1, 2 or 4 byte accesses */ if (count != 1 && count != 2 && count != 4) return -EINVAL; return pci_legacy_write(bus, off, *(u32 *)buf, count); } /** * pci_mmap_legacy_mem - map legacy PCI memory into user memory space * @filp: open sysfs file * @kobj: kobject corresponding to device to be mapped * @attr: struct bin_attribute for this file * @vma: struct vm_area_struct passed to mmap * * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap * legacy memory space (first meg of bus space) into application virtual * memory space. */ static int pci_mmap_legacy_mem(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem); } /** * pci_mmap_legacy_io - map legacy PCI IO into user memory space * @filp: open sysfs file * @kobj: kobject corresponding to device to be mapped * @attr: struct bin_attribute for this file * @vma: struct vm_area_struct passed to mmap * * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap * legacy IO space (first meg of bus space) into application virtual * memory space. Returns -ENOSYS if the operation isn't supported */ static int pci_mmap_legacy_io(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { struct pci_bus *bus = to_pci_bus(kobj_to_dev(kobj)); return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io); } /** * pci_adjust_legacy_attr - adjustment of legacy file attributes * @b: bus to create files under * @mmap_type: I/O port or memory * * Stub implementation. Can be overridden by arch if necessary. */ void __weak pci_adjust_legacy_attr(struct pci_bus *b, enum pci_mmap_state mmap_type) { } /** * pci_create_legacy_files - create legacy I/O port and memory files * @b: bus to create files under * * Some platforms allow access to legacy I/O port and ISA memory space on * a per-bus basis. This routine creates the files and ties them into * their associated read, write and mmap files from pci-sysfs.c * * On error unwind, but don't propagate the error to the caller * as it is ok to set up the PCI bus without these files. */ void pci_create_legacy_files(struct pci_bus *b) { int error; if (!sysfs_initialized) return; b->legacy_io = kcalloc(2, sizeof(struct bin_attribute), GFP_ATOMIC); if (!b->legacy_io) goto kzalloc_err; sysfs_bin_attr_init(b->legacy_io); b->legacy_io->attr.name = "legacy_io"; b->legacy_io->size = 0xffff; b->legacy_io->attr.mode = 0600; b->legacy_io->read = pci_read_legacy_io; b->legacy_io->write = pci_write_legacy_io; b->legacy_io->mmap = pci_mmap_legacy_io; b->legacy_io->f_mapping = iomem_get_mapping; pci_adjust_legacy_attr(b, pci_mmap_io); error = device_create_bin_file(&b->dev, b->legacy_io); if (error) goto legacy_io_err; /* Allocated above after the legacy_io struct */ b->legacy_mem = b->legacy_io + 1; sysfs_bin_attr_init(b->legacy_mem); b->legacy_mem->attr.name = "legacy_mem"; b->legacy_mem->size = 1024*1024; b->legacy_mem->attr.mode = 0600; b->legacy_mem->mmap = pci_mmap_legacy_mem; b->legacy_mem->f_mapping = iomem_get_mapping; pci_adjust_legacy_attr(b, pci_mmap_mem); error = device_create_bin_file(&b->dev, b->legacy_mem); if (error) goto legacy_mem_err; return; legacy_mem_err: device_remove_bin_file(&b->dev, b->legacy_io); legacy_io_err: kfree(b->legacy_io); b->legacy_io = NULL; kzalloc_err: dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n"); } void pci_remove_legacy_files(struct pci_bus *b) { if (b->legacy_io) { device_remove_bin_file(&b->dev, b->legacy_io); device_remove_bin_file(&b->dev, b->legacy_mem); kfree(b->legacy_io); /* both are allocated here */ } } #endif /* HAVE_PCI_LEGACY */ #if defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE) int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma, enum pci_mmap_api mmap_api) { unsigned long nr, start, size; resource_size_t pci_start = 0, pci_end; if (pci_resource_len(pdev, resno) == 0) return 0; nr = vma_pages(vma); start = vma->vm_pgoff; size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1; if (mmap_api == PCI_MMAP_PROCFS) { pci_resource_to_user(pdev, resno, &pdev->resource[resno], &pci_start, &pci_end); pci_start >>= PAGE_SHIFT; } if (start >= pci_start && start < pci_start + size && start + nr <= pci_start + size) return 1; return 0; } /** * pci_mmap_resource - map a PCI resource into user memory space * @kobj: kobject for mapping * @attr: struct bin_attribute for the file being mapped * @vma: struct vm_area_struct passed into the mmap * @write_combine: 1 for write_combine mapping * * Use the regular PCI mapping routines to map a PCI resource into userspace. */ static int pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma, int write_combine) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); int bar = (unsigned long)attr->private; enum pci_mmap_state mmap_type; struct resource *res = &pdev->resource[bar]; int ret; ret = security_locked_down(LOCKDOWN_PCI_ACCESS); if (ret) return ret; if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) return -EINVAL; if (!pci_mmap_fits(pdev, bar, vma, PCI_MMAP_SYSFS)) return -EINVAL; mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; return pci_mmap_resource_range(pdev, bar, vma, mmap_type, write_combine); } static int pci_mmap_resource_uc(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { return pci_mmap_resource(kobj, attr, vma, 0); } static int pci_mmap_resource_wc(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { return pci_mmap_resource(kobj, attr, vma, 1); } static ssize_t pci_resource_io(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count, bool write) { #ifdef CONFIG_HAS_IOPORT struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); int bar = (unsigned long)attr->private; unsigned long port = off; port += pci_resource_start(pdev, bar); if (port > pci_resource_end(pdev, bar)) return 0; if (port + count - 1 > pci_resource_end(pdev, bar)) return -EINVAL; switch (count) { case 1: if (write) outb(*(u8 *)buf, port); else *(u8 *)buf = inb(port); return 1; case 2: if (write) outw(*(u16 *)buf, port); else *(u16 *)buf = inw(port); return 2; case 4: if (write) outl(*(u32 *)buf, port); else *(u32 *)buf = inl(port); return 4; } return -EINVAL; #else return -ENXIO; #endif } static ssize_t pci_read_resource_io(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { return pci_resource_io(filp, kobj, attr, buf, off, count, false); } static ssize_t pci_write_resource_io(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { int ret; ret = security_locked_down(LOCKDOWN_PCI_ACCESS); if (ret) return ret; return pci_resource_io(filp, kobj, attr, buf, off, count, true); } /** * pci_remove_resource_files - cleanup resource files * @pdev: dev to cleanup * * If we created resource files for @pdev, remove them from sysfs and * free their resources. */ static void pci_remove_resource_files(struct pci_dev *pdev) { int i; for (i = 0; i < PCI_STD_NUM_BARS; i++) { struct bin_attribute *res_attr; res_attr = pdev->res_attr[i]; if (res_attr) { sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); kfree(res_attr); } res_attr = pdev->res_attr_wc[i]; if (res_attr) { sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); kfree(res_attr); } } } static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine) { /* allocate attribute structure, piggyback attribute name */ int name_len = write_combine ? 13 : 10; struct bin_attribute *res_attr; char *res_attr_name; int retval; res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC); if (!res_attr) return -ENOMEM; res_attr_name = (char *)(res_attr + 1); sysfs_bin_attr_init(res_attr); if (write_combine) { sprintf(res_attr_name, "resource%d_wc", num); res_attr->mmap = pci_mmap_resource_wc; } else { sprintf(res_attr_name, "resource%d", num); if (pci_resource_flags(pdev, num) & IORESOURCE_IO) { res_attr->read = pci_read_resource_io; res_attr->write = pci_write_resource_io; if (arch_can_pci_mmap_io()) res_attr->mmap = pci_mmap_resource_uc; } else { res_attr->mmap = pci_mmap_resource_uc; } } if (res_attr->mmap) res_attr->f_mapping = iomem_get_mapping; res_attr->attr.name = res_attr_name; res_attr->attr.mode = 0600; res_attr->size = pci_resource_len(pdev, num); res_attr->private = (void *)(unsigned long)num; retval = sysfs_create_bin_file(&pdev->dev.kobj, res_attr); if (retval) { kfree(res_attr); return retval; } if (write_combine) pdev->res_attr_wc[num] = res_attr; else pdev->res_attr[num] = res_attr; return 0; } /** * pci_create_resource_files - create resource files in sysfs for @dev * @pdev: dev in question * * Walk the resources in @pdev creating files for each resource available. */ static int pci_create_resource_files(struct pci_dev *pdev) { int i; int retval; /* Expose the PCI resources from this device as files */ for (i = 0; i < PCI_STD_NUM_BARS; i++) { /* skip empty resources */ if (!pci_resource_len(pdev, i)) continue; retval = pci_create_attr(pdev, i, 0); /* for prefetchable resources, create a WC mappable file */ if (!retval && arch_can_pci_mmap_wc() && pdev->resource[i].flags & IORESOURCE_PREFETCH) retval = pci_create_attr(pdev, i, 1); if (retval) { pci_remove_resource_files(pdev); return retval; } } return 0; } #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */ int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; } void __weak pci_remove_resource_files(struct pci_dev *dev) { return; } #endif /** * pci_write_rom - used to enable access to the PCI ROM display * @filp: sysfs file * @kobj: kernel object handle * @bin_attr: struct bin_attribute for this file * @buf: user input * @off: file offset * @count: number of byte in input * * writing anything except 0 enables it */ static ssize_t pci_write_rom(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); if ((off == 0) && (*buf == '0') && (count == 2)) pdev->rom_attr_enabled = 0; else pdev->rom_attr_enabled = 1; return count; } /** * pci_read_rom - read a PCI ROM * @filp: sysfs file * @kobj: kernel object handle * @bin_attr: struct bin_attribute for this file * @buf: where to put the data we read from the ROM * @off: file offset * @count: number of bytes to read * * Put @count bytes starting at @off into @buf from the ROM in the PCI * device corresponding to @kobj. */ static ssize_t pci_read_rom(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); void __iomem *rom; size_t size; if (!pdev->rom_attr_enabled) return -EINVAL; rom = pci_map_rom(pdev, &size); /* size starts out as PCI window size */ if (!rom || !size) return -EIO; if (off >= size) count = 0; else { if (off + count > size) count = size - off; memcpy_fromio(buf, rom + off, count); } pci_unmap_rom(pdev, rom); return count; } static BIN_ATTR(rom, 0600, pci_read_rom, pci_write_rom, 0); static struct bin_attribute *pci_dev_rom_attrs[] = { &bin_attr_rom, NULL, }; static umode_t pci_dev_rom_attr_is_visible(struct kobject *kobj, struct bin_attribute *a, int n) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); size_t rom_size; /* If the device has a ROM, try to expose it in sysfs. */ rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); if (!rom_size) return 0; a->size = rom_size; return a->attr.mode; } static const struct attribute_group pci_dev_rom_attr_group = { .bin_attrs = pci_dev_rom_attrs, .is_bin_visible = pci_dev_rom_attr_is_visible, }; static ssize_t reset_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); unsigned long val; ssize_t result; if (kstrtoul(buf, 0, &val) < 0) return -EINVAL; if (val != 1) return -EINVAL; pm_runtime_get_sync(dev); result = pci_reset_function(pdev); pm_runtime_put(dev); if (result < 0) return result; return count; } static DEVICE_ATTR_WO(reset); static struct attribute *pci_dev_reset_attrs[] = { &dev_attr_reset.attr, NULL, }; static umode_t pci_dev_reset_attr_is_visible(struct kobject *kobj, struct attribute *a, int n) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); if (!pci_reset_supported(pdev)) return 0; return a->mode; } static const struct attribute_group pci_dev_reset_attr_group = { .attrs = pci_dev_reset_attrs, .is_visible = pci_dev_reset_attr_is_visible, }; #define pci_dev_resource_resize_attr(n) \ static ssize_t resource##n##_resize_show(struct device *dev, \ struct device_attribute *attr, \ char * buf) \ { \ struct pci_dev *pdev = to_pci_dev(dev); \ ssize_t ret; \ \ pci_config_pm_runtime_get(pdev); \ \ ret = sysfs_emit(buf, "%016llx\n", \ (u64)pci_rebar_get_possible_sizes(pdev, n)); \ \ pci_config_pm_runtime_put(pdev); \ \ return ret; \ } \ \ static ssize_t resource##n##_resize_store(struct device *dev, \ struct device_attribute *attr,\ const char *buf, size_t count)\ { \ struct pci_dev *pdev = to_pci_dev(dev); \ unsigned long size, flags; \ int ret, i; \ u16 cmd; \ \ if (kstrtoul(buf, 0, &size) < 0) \ return -EINVAL; \ \ device_lock(dev); \ if (dev->driver) { \ ret = -EBUSY; \ goto unlock; \ } \ \ pci_config_pm_runtime_get(pdev); \ \ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { \ ret = aperture_remove_conflicting_pci_devices(pdev, \ "resourceN_resize"); \ if (ret) \ goto pm_put; \ } \ \ pci_read_config_word(pdev, PCI_COMMAND, &cmd); \ pci_write_config_word(pdev, PCI_COMMAND, \ cmd & ~PCI_COMMAND_MEMORY); \ \ flags = pci_resource_flags(pdev, n); \ \ pci_remove_resource_files(pdev); \ \ for (i = 0; i < PCI_STD_NUM_BARS; i++) { \ if (pci_resource_len(pdev, i) && \ pci_resource_flags(pdev, i) == flags) \ pci_release_resource(pdev, i); \ } \ \ ret = pci_resize_resource(pdev, n, size); \ \ pci_assign_unassigned_bus_resources(pdev->bus); \ \ if (pci_create_resource_files(pdev)) \ pci_warn(pdev, "Failed to recreate resource files after BAR resizing\n");\ \ pci_write_config_word(pdev, PCI_COMMAND, cmd); \ pm_put: \ pci_config_pm_runtime_put(pdev); \ unlock: \ device_unlock(dev); \ \ return ret ? ret : count; \ } \ static DEVICE_ATTR_RW(resource##n##_resize) pci_dev_resource_resize_attr(0); pci_dev_resource_resize_attr(1); pci_dev_resource_resize_attr(2); pci_dev_resource_resize_attr(3); pci_dev_resource_resize_attr(4); pci_dev_resource_resize_attr(5); static struct attribute *resource_resize_attrs[] = { &dev_attr_resource0_resize.attr, &dev_attr_resource1_resize.attr, &dev_attr_resource2_resize.attr, &dev_attr_resource3_resize.attr, &dev_attr_resource4_resize.attr, &dev_attr_resource5_resize.attr, NULL, }; static umode_t resource_resize_is_visible(struct kobject *kobj, struct attribute *a, int n) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); return pci_rebar_get_current_size(pdev, n) < 0 ? 0 : a->mode; } static const struct attribute_group pci_dev_resource_resize_group = { .attrs = resource_resize_attrs, .is_visible = resource_resize_is_visible, }; int __must_check pci_create_sysfs_dev_files(struct pci_dev *pdev) { if (!sysfs_initialized) return -EACCES; return pci_create_resource_files(pdev); } /** * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files * @pdev: device whose entries we should free * * Cleanup when @pdev is removed from sysfs. */ void pci_remove_sysfs_dev_files(struct pci_dev *pdev) { if (!sysfs_initialized) return; pci_remove_resource_files(pdev); } static int __init pci_sysfs_init(void) { struct pci_dev *pdev = NULL; struct pci_bus *pbus = NULL; int retval; sysfs_initialized = 1; for_each_pci_dev(pdev) { retval = pci_create_sysfs_dev_files(pdev); if (retval) { pci_dev_put(pdev); return retval; } } while ((pbus = pci_find_next_bus(pbus))) pci_create_legacy_files(pbus); return 0; } late_initcall(pci_sysfs_init); static struct attribute *pci_dev_dev_attrs[] = { &dev_attr_boot_vga.attr, NULL, }; static umode_t pci_dev_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); struct pci_dev *pdev = to_pci_dev(dev); if (a == &dev_attr_boot_vga.attr) if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) return 0; return a->mode; } static struct attribute *pci_dev_hp_attrs[] = { &dev_attr_remove.attr, &dev_attr_dev_rescan.attr, NULL, }; static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); struct pci_dev *pdev = to_pci_dev(dev); if (pdev->is_virtfn) return 0; return a->mode; } static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); struct pci_dev *pdev = to_pci_dev(dev); if (pci_is_bridge(pdev)) return a->mode; return 0; } static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); struct pci_dev *pdev = to_pci_dev(dev); if (pci_is_pcie(pdev)) return a->mode; return 0; } static const struct attribute_group pci_dev_group = { .attrs = pci_dev_attrs, }; const struct attribute_group *pci_dev_groups[] = { &pci_dev_group, &pci_dev_config_attr_group, &pci_dev_rom_attr_group, &pci_dev_reset_attr_group, &pci_dev_reset_method_attr_group, &pci_dev_vpd_attr_group, #ifdef CONFIG_DMI &pci_dev_smbios_attr_group, #endif #ifdef CONFIG_ACPI &pci_dev_acpi_attr_group, #endif &pci_dev_resource_resize_group, NULL, }; static const struct attribute_group pci_dev_hp_attr_group = { .attrs = pci_dev_hp_attrs, .is_visible = pci_dev_hp_attrs_are_visible, }; static const struct attribute_group pci_dev_attr_group = { .attrs = pci_dev_dev_attrs, .is_visible = pci_dev_attrs_are_visible, }; static const struct attribute_group pci_bridge_attr_group = { .attrs = pci_bridge_attrs, .is_visible = pci_bridge_attrs_are_visible, }; static const struct attribute_group pcie_dev_attr_group = { .attrs = pcie_dev_attrs, .is_visible = pcie_dev_attrs_are_visible, }; static const struct attribute_group *pci_dev_attr_groups[] = { &pci_dev_attr_group, &pci_dev_hp_attr_group, #ifdef CONFIG_PCI_IOV &sriov_pf_dev_attr_group, &sriov_vf_dev_attr_group, #endif &pci_bridge_attr_group, &pcie_dev_attr_group, #ifdef CONFIG_PCIEAER &aer_stats_attr_group, #endif #ifdef CONFIG_PCIEASPM &aspm_ctrl_attr_group, #endif NULL, }; const struct device_type pci_dev_type = { .groups = pci_dev_attr_groups, };
linux-master
drivers/pci/pci-sysfs.c
// SPDX-License-Identifier: GPL-2.0 /* * Intel MID platform PM support * * Copyright (C) 2016, Intel Corporation * * Author: Andy Shevchenko <[email protected]> */ #include <linux/init.h> #include <linux/pci.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/intel-mid.h> #include "pci.h" static bool pci_mid_pm_enabled __read_mostly; bool pci_use_mid_pm(void) { return pci_mid_pm_enabled; } int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) { return intel_mid_pci_set_power_state(pdev, state); } pci_power_t mid_pci_get_power_state(struct pci_dev *pdev) { return intel_mid_pci_get_power_state(pdev); } /* * This table should be in sync with the one in * arch/x86/platform/intel-mid/pwr.c. */ static const struct x86_cpu_id lpss_cpu_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_SALTWELL_MID, NULL), X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, NULL), {} }; static int __init mid_pci_init(void) { const struct x86_cpu_id *id; id = x86_match_cpu(lpss_cpu_ids); if (id) pci_mid_pm_enabled = true; return 0; } arch_initcall(mid_pci_init);
linux-master
drivers/pci/pci-mid.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI IRQ handling code * * Copyright (c) 2008 James Bottomley <[email protected]> * Copyright (C) 2017 Christoph Hellwig. */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/pci.h> /** * pci_request_irq - allocate an interrupt line for a PCI device * @dev: PCI device to operate on * @nr: device-relative interrupt vector index (0-based). * @handler: Function to be called when the IRQ occurs. * Primary handler for threaded interrupts. * If NULL and thread_fn != NULL the default primary handler is * installed. * @thread_fn: Function called from the IRQ handler thread * If NULL, no IRQ thread is created * @dev_id: Cookie passed back to the handler function * @fmt: Printf-like format string naming the handler * * This call allocates interrupt resources and enables the interrupt line and * IRQ handling. From the point this call is made @handler and @thread_fn may * be invoked. All interrupts requested using this function might be shared. * * @dev_id must not be NULL and must be globally unique. */ int pci_request_irq(struct pci_dev *dev, unsigned int nr, irq_handler_t handler, irq_handler_t thread_fn, void *dev_id, const char *fmt, ...) { va_list ap; int ret; char *devname; unsigned long irqflags = IRQF_SHARED; if (!handler) irqflags |= IRQF_ONESHOT; va_start(ap, fmt); devname = kvasprintf(GFP_KERNEL, fmt, ap); va_end(ap); if (!devname) return -ENOMEM; ret = request_threaded_irq(pci_irq_vector(dev, nr), handler, thread_fn, irqflags, devname, dev_id); if (ret) kfree(devname); return ret; } EXPORT_SYMBOL(pci_request_irq); /** * pci_free_irq - free an interrupt allocated with pci_request_irq * @dev: PCI device to operate on * @nr: device-relative interrupt vector index (0-based). * @dev_id: Device identity to free * * Remove an interrupt handler. The handler is removed and if the interrupt * line is no longer in use by any driver it is disabled. The caller must * ensure the interrupt is disabled on the device before calling this function. * The function does not return until any executing interrupts for this IRQ * have completed. * * This function must not be called from interrupt context. */ void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id) { kfree(free_irq(pci_irq_vector(dev, nr), dev_id)); } EXPORT_SYMBOL(pci_free_irq);
linux-master
drivers/pci/irq.c
// SPDX-License-Identifier: GPL-2.0 /* * Support routines for initializing a PCI subsystem * * Extruded from code written by * Dave Rusling ([email protected]) * David Mosberger ([email protected]) * David Miller ([email protected]) * * Nov 2000, Ivan Kokshaysky <[email protected]> * PCI-PCI bridges cleanup, sorted resource allocation. * Feb 2002, Ivan Kokshaysky <[email protected]> * Converted to allocation in 3 passes, which gives * tighter packing. Prefetchable range support. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/cache.h> #include <linux/slab.h> #include <linux/acpi.h> #include "pci.h" unsigned int pci_flags; EXPORT_SYMBOL_GPL(pci_flags); struct pci_dev_resource { struct list_head list; struct resource *res; struct pci_dev *dev; resource_size_t start; resource_size_t end; resource_size_t add_size; resource_size_t min_align; unsigned long flags; }; static void free_list(struct list_head *head) { struct pci_dev_resource *dev_res, *tmp; list_for_each_entry_safe(dev_res, tmp, head, list) { list_del(&dev_res->list); kfree(dev_res); } } /** * add_to_list() - Add a new resource tracker to the list * @head: Head of the list * @dev: Device to which the resource belongs * @res: Resource to be tracked * @add_size: Additional size to be optionally added to the resource * @min_align: Minimum memory window alignment */ static int add_to_list(struct list_head *head, struct pci_dev *dev, struct resource *res, resource_size_t add_size, resource_size_t min_align) { struct pci_dev_resource *tmp; tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; tmp->res = res; tmp->dev = dev; tmp->start = res->start; tmp->end = res->end; tmp->flags = res->flags; tmp->add_size = add_size; tmp->min_align = min_align; list_add(&tmp->list, head); return 0; } static void remove_from_list(struct list_head *head, struct resource *res) { struct pci_dev_resource *dev_res, *tmp; list_for_each_entry_safe(dev_res, tmp, head, list) { if (dev_res->res == res) { list_del(&dev_res->list); kfree(dev_res); break; } } } static struct pci_dev_resource *res_to_dev_res(struct list_head *head, struct resource *res) { struct pci_dev_resource *dev_res; list_for_each_entry(dev_res, head, list) { if (dev_res->res == res) return dev_res; } return NULL; } static resource_size_t get_res_add_size(struct list_head *head, struct resource *res) { struct pci_dev_resource *dev_res; dev_res = res_to_dev_res(head, res); return dev_res ? dev_res->add_size : 0; } static resource_size_t get_res_add_align(struct list_head *head, struct resource *res) { struct pci_dev_resource *dev_res; dev_res = res_to_dev_res(head, res); return dev_res ? dev_res->min_align : 0; } /* Sort resources by alignment */ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) { struct resource *r; int i; pci_dev_for_each_resource(dev, r, i) { struct pci_dev_resource *dev_res, *tmp; resource_size_t r_align; struct list_head *n; if (r->flags & IORESOURCE_PCI_FIXED) continue; if (!(r->flags) || r->parent) continue; r_align = pci_resource_alignment(dev, r); if (!r_align) { pci_warn(dev, "BAR %d: %pR has bogus alignment\n", i, r); continue; } tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) panic("%s: kzalloc() failed!\n", __func__); tmp->res = r; tmp->dev = dev; /* Fallback is smallest one or list is empty */ n = head; list_for_each_entry(dev_res, head, list) { resource_size_t align; align = pci_resource_alignment(dev_res->dev, dev_res->res); if (r_align > align) { n = &dev_res->list; break; } } /* Insert it just before n */ list_add_tail(&tmp->list, n); } } static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head) { u16 class = dev->class >> 8; /* Don't touch classless devices or host bridges or IOAPICs */ if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST) return; /* Don't touch IOAPIC devices already enabled by firmware */ if (class == PCI_CLASS_SYSTEM_PIC) { u16 command; pci_read_config_word(dev, PCI_COMMAND, &command); if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) return; } pdev_sort_resources(dev, head); } static inline void reset_resource(struct resource *res) { res->start = 0; res->end = 0; res->flags = 0; } /** * reassign_resources_sorted() - Satisfy any additional resource requests * * @realloc_head: Head of the list tracking requests requiring * additional resources * @head: Head of the list tracking requests with allocated * resources * * Walk through each element of the realloc_head and try to procure additional * resources for the element, provided the element is in the head list. */ static void reassign_resources_sorted(struct list_head *realloc_head, struct list_head *head) { struct resource *res; struct pci_dev_resource *add_res, *tmp; struct pci_dev_resource *dev_res; resource_size_t add_size, align; int idx; list_for_each_entry_safe(add_res, tmp, realloc_head, list) { bool found_match = false; res = add_res->res; /* Skip resource that has been reset */ if (!res->flags) goto out; /* Skip this resource if not found in head list */ list_for_each_entry(dev_res, head, list) { if (dev_res->res == res) { found_match = true; break; } } if (!found_match) /* Just skip */ continue; idx = res - &add_res->dev->resource[0]; add_size = add_res->add_size; align = add_res->min_align; if (!resource_size(res)) { res->start = align; res->end = res->start + add_size - 1; if (pci_assign_resource(add_res->dev, idx)) reset_resource(res); } else { res->flags |= add_res->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); if (pci_reassign_resource(add_res->dev, idx, add_size, align)) pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n", (unsigned long long) add_size, idx, res); } out: list_del(&add_res->list); kfree(add_res); } } /** * assign_requested_resources_sorted() - Satisfy resource requests * * @head: Head of the list tracking requests for resources * @fail_head: Head of the list tracking requests that could not be * allocated * * Satisfy resource requests of each element in the list. Add requests that * could not be satisfied to the failed_list. */ static void assign_requested_resources_sorted(struct list_head *head, struct list_head *fail_head) { struct resource *res; struct pci_dev_resource *dev_res; int idx; list_for_each_entry(dev_res, head, list) { res = dev_res->res; idx = res - &dev_res->dev->resource[0]; if (resource_size(res) && pci_assign_resource(dev_res->dev, idx)) { if (fail_head) { /* * If the failed resource is a ROM BAR and * it will be enabled later, don't add it * to the list. */ if (!((idx == PCI_ROM_RESOURCE) && (!(res->flags & IORESOURCE_ROM_ENABLE)))) add_to_list(fail_head, dev_res->dev, res, 0 /* don't care */, 0 /* don't care */); } reset_resource(res); } } } static unsigned long pci_fail_res_type_mask(struct list_head *fail_head) { struct pci_dev_resource *fail_res; unsigned long mask = 0; /* Check failed type */ list_for_each_entry(fail_res, fail_head, list) mask |= fail_res->flags; /* * One pref failed resource will set IORESOURCE_MEM, as we can * allocate pref in non-pref range. Will release all assigned * non-pref sibling resources according to that bit. */ return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH); } static bool pci_need_to_release(unsigned long mask, struct resource *res) { if (res->flags & IORESOURCE_IO) return !!(mask & IORESOURCE_IO); /* Check pref at first */ if (res->flags & IORESOURCE_PREFETCH) { if (mask & IORESOURCE_PREFETCH) return true; /* Count pref if its parent is non-pref */ else if ((mask & IORESOURCE_MEM) && !(res->parent->flags & IORESOURCE_PREFETCH)) return true; else return false; } if (res->flags & IORESOURCE_MEM) return !!(mask & IORESOURCE_MEM); return false; /* Should not get here */ } static void __assign_resources_sorted(struct list_head *head, struct list_head *realloc_head, struct list_head *fail_head) { /* * Should not assign requested resources at first. They could be * adjacent, so later reassign can not reallocate them one by one in * parent resource window. * * Try to assign requested + add_size at beginning. If could do that, * could get out early. If could not do that, we still try to assign * requested at first, then try to reassign add_size for some resources. * * Separate three resource type checking if we need to release * assigned resource after requested + add_size try. * * 1. If IO port assignment fails, will release assigned IO * port. * 2. If pref MMIO assignment fails, release assigned pref * MMIO. If assigned pref MMIO's parent is non-pref MMIO * and non-pref MMIO assignment fails, will release that * assigned pref MMIO. * 3. If non-pref MMIO assignment fails or pref MMIO * assignment fails, will release assigned non-pref MMIO. */ LIST_HEAD(save_head); LIST_HEAD(local_fail_head); struct pci_dev_resource *save_res; struct pci_dev_resource *dev_res, *tmp_res, *dev_res2; unsigned long fail_type; resource_size_t add_align, align; /* Check if optional add_size is there */ if (!realloc_head || list_empty(realloc_head)) goto requested_and_reassign; /* Save original start, end, flags etc at first */ list_for_each_entry(dev_res, head, list) { if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) { free_list(&save_head); goto requested_and_reassign; } } /* Update res in head list with add_size in realloc_head list */ list_for_each_entry_safe(dev_res, tmp_res, head, list) { dev_res->res->end += get_res_add_size(realloc_head, dev_res->res); /* * There are two kinds of additional resources in the list: * 1. bridge resource -- IORESOURCE_STARTALIGN * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN * Here just fix the additional alignment for bridge */ if (!(dev_res->res->flags & IORESOURCE_STARTALIGN)) continue; add_align = get_res_add_align(realloc_head, dev_res->res); /* * The "head" list is sorted by alignment so resources with * bigger alignment will be assigned first. After we * change the alignment of a dev_res in "head" list, we * need to reorder the list by alignment to make it * consistent. */ if (add_align > dev_res->res->start) { resource_size_t r_size = resource_size(dev_res->res); dev_res->res->start = add_align; dev_res->res->end = add_align + r_size - 1; list_for_each_entry(dev_res2, head, list) { align = pci_resource_alignment(dev_res2->dev, dev_res2->res); if (add_align > align) { list_move_tail(&dev_res->list, &dev_res2->list); break; } } } } /* Try updated head list with add_size added */ assign_requested_resources_sorted(head, &local_fail_head); /* All assigned with add_size? */ if (list_empty(&local_fail_head)) { /* Remove head list from realloc_head list */ list_for_each_entry(dev_res, head, list) remove_from_list(realloc_head, dev_res->res); free_list(&save_head); free_list(head); return; } /* Check failed type */ fail_type = pci_fail_res_type_mask(&local_fail_head); /* Remove not need to be released assigned res from head list etc */ list_for_each_entry_safe(dev_res, tmp_res, head, list) if (dev_res->res->parent && !pci_need_to_release(fail_type, dev_res->res)) { /* Remove it from realloc_head list */ remove_from_list(realloc_head, dev_res->res); remove_from_list(&save_head, dev_res->res); list_del(&dev_res->list); kfree(dev_res); } free_list(&local_fail_head); /* Release assigned resource */ list_for_each_entry(dev_res, head, list) if (dev_res->res->parent) release_resource(dev_res->res); /* Restore start/end/flags from saved list */ list_for_each_entry(save_res, &save_head, list) { struct resource *res = save_res->res; res->start = save_res->start; res->end = save_res->end; res->flags = save_res->flags; } free_list(&save_head); requested_and_reassign: /* Satisfy the must-have resource requests */ assign_requested_resources_sorted(head, fail_head); /* Try to satisfy any additional optional resource requests */ if (realloc_head) reassign_resources_sorted(realloc_head, head); free_list(head); } static void pdev_assign_resources_sorted(struct pci_dev *dev, struct list_head *add_head, struct list_head *fail_head) { LIST_HEAD(head); __dev_sort_resources(dev, &head); __assign_resources_sorted(&head, add_head, fail_head); } static void pbus_assign_resources_sorted(const struct pci_bus *bus, struct list_head *realloc_head, struct list_head *fail_head) { struct pci_dev *dev; LIST_HEAD(head); list_for_each_entry(dev, &bus->devices, bus_list) __dev_sort_resources(dev, &head); __assign_resources_sorted(&head, realloc_head, fail_head); } void pci_setup_cardbus(struct pci_bus *bus) { struct pci_dev *bridge = bus->self; struct resource *res; struct pci_bus_region region; pci_info(bridge, "CardBus bridge to %pR\n", &bus->busn_res); res = bus->resource[0]; pcibios_resource_to_bus(bridge->bus, &region, res); if (res->flags & IORESOURCE_IO) { /* * The IO resource is allocated a range twice as large as it * would normally need. This allows us to set both IO regs. */ pci_info(bridge, " bridge window %pR\n", res); pci_write_config_dword(bridge, PCI_CB_IO_BASE_0, region.start); pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0, region.end); } res = bus->resource[1]; pcibios_resource_to_bus(bridge->bus, &region, res); if (res->flags & IORESOURCE_IO) { pci_info(bridge, " bridge window %pR\n", res); pci_write_config_dword(bridge, PCI_CB_IO_BASE_1, region.start); pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1, region.end); } res = bus->resource[2]; pcibios_resource_to_bus(bridge->bus, &region, res); if (res->flags & IORESOURCE_MEM) { pci_info(bridge, " bridge window %pR\n", res); pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0, region.start); pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0, region.end); } res = bus->resource[3]; pcibios_resource_to_bus(bridge->bus, &region, res); if (res->flags & IORESOURCE_MEM) { pci_info(bridge, " bridge window %pR\n", res); pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1, region.start); pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1, region.end); } } EXPORT_SYMBOL(pci_setup_cardbus); /* * Initialize bridges with base/limit values we have collected. PCI-to-PCI * Bridge Architecture Specification rev. 1.1 (1998) requires that if there * are no I/O ports or memory behind the bridge, the corresponding range * must be turned off by writing base value greater than limit to the * bridge's base/limit registers. * * Note: care must be taken when updating I/O base/limit registers of * bridges which support 32-bit I/O. This update requires two config space * writes, so it's quite possible that an I/O window of the bridge will * have some undesirable address (e.g. 0) after the first write. Ditto * 64-bit prefetchable MMIO. */ static void pci_setup_bridge_io(struct pci_dev *bridge) { struct resource *res; struct pci_bus_region region; unsigned long io_mask; u8 io_base_lo, io_limit_lo; u16 l; u32 io_upper16; io_mask = PCI_IO_RANGE_MASK; if (bridge->io_window_1k) io_mask = PCI_IO_1K_RANGE_MASK; /* Set up the top and bottom of the PCI I/O segment for this bus */ res = &bridge->resource[PCI_BRIDGE_IO_WINDOW]; pcibios_resource_to_bus(bridge->bus, &region, res); if (res->flags & IORESOURCE_IO) { pci_read_config_word(bridge, PCI_IO_BASE, &l); io_base_lo = (region.start >> 8) & io_mask; io_limit_lo = (region.end >> 8) & io_mask; l = ((u16) io_limit_lo << 8) | io_base_lo; /* Set up upper 16 bits of I/O base/limit */ io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); pci_info(bridge, " bridge window %pR\n", res); } else { /* Clear upper 16 bits of I/O base/limit */ io_upper16 = 0; l = 0x00f0; } /* Temporarily disable the I/O range before updating PCI_IO_BASE */ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); /* Update lower 16 bits of I/O base/limit */ pci_write_config_word(bridge, PCI_IO_BASE, l); /* Update upper 16 bits of I/O base/limit */ pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); } static void pci_setup_bridge_mmio(struct pci_dev *bridge) { struct resource *res; struct pci_bus_region region; u32 l; /* Set up the top and bottom of the PCI Memory segment for this bus */ res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW]; pcibios_resource_to_bus(bridge->bus, &region, res); if (res->flags & IORESOURCE_MEM) { l = (region.start >> 16) & 0xfff0; l |= region.end & 0xfff00000; pci_info(bridge, " bridge window %pR\n", res); } else { l = 0x0000fff0; } pci_write_config_dword(bridge, PCI_MEMORY_BASE, l); } static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) { struct resource *res; struct pci_bus_region region; u32 l, bu, lu; /* * Clear out the upper 32 bits of PREF limit. If * PCI_PREF_BASE_UPPER32 was non-zero, this temporarily disables * PREF range, which is ok. */ pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); /* Set up PREF base/limit */ bu = lu = 0; res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; pcibios_resource_to_bus(bridge->bus, &region, res); if (res->flags & IORESOURCE_PREFETCH) { l = (region.start >> 16) & 0xfff0; l |= region.end & 0xfff00000; if (res->flags & IORESOURCE_MEM_64) { bu = upper_32_bits(region.start); lu = upper_32_bits(region.end); } pci_info(bridge, " bridge window %pR\n", res); } else { l = 0x0000fff0; } pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); /* Set the upper 32 bits of PREF base & limit */ pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); } static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type) { struct pci_dev *bridge = bus->self; pci_info(bridge, "PCI bridge to %pR\n", &bus->busn_res); if (type & IORESOURCE_IO) pci_setup_bridge_io(bridge); if (type & IORESOURCE_MEM) pci_setup_bridge_mmio(bridge); if (type & IORESOURCE_PREFETCH) pci_setup_bridge_mmio_pref(bridge); pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl); } void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type) { } void pci_setup_bridge(struct pci_bus *bus) { unsigned long type = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; pcibios_setup_bridge(bus, type); __pci_setup_bridge(bus, type); } int pci_claim_bridge_resource(struct pci_dev *bridge, int i) { if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END) return 0; if (pci_claim_resource(bridge, i) == 0) return 0; /* Claimed the window */ if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI) return 0; if (!pci_bus_clip_resource(bridge, i)) return -EINVAL; /* Clipping didn't change anything */ switch (i) { case PCI_BRIDGE_IO_WINDOW: pci_setup_bridge_io(bridge); break; case PCI_BRIDGE_MEM_WINDOW: pci_setup_bridge_mmio(bridge); break; case PCI_BRIDGE_PREF_MEM_WINDOW: pci_setup_bridge_mmio_pref(bridge); break; default: return -EINVAL; } if (pci_claim_resource(bridge, i) == 0) return 0; /* Claimed a smaller window */ return -EINVAL; } /* * Check whether the bridge supports optional I/O and prefetchable memory * ranges. If not, the respective base/limit registers must be read-only * and read as 0. */ static void pci_bridge_check_ranges(struct pci_bus *bus) { struct pci_dev *bridge = bus->self; struct resource *b_res; b_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW]; b_res->flags |= IORESOURCE_MEM; if (bridge->io_window) { b_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW]; b_res->flags |= IORESOURCE_IO; } if (bridge->pref_window) { b_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; if (bridge->pref_64_window) { b_res->flags |= IORESOURCE_MEM_64 | PCI_PREF_RANGE_TYPE_64; } } } /* * Helper function for sizing routines. Assigned resources have non-NULL * parent resource. * * Return first unassigned resource of the correct type. If there is none, * return first assigned resource of the correct type. If none of the * above, return NULL. * * Returning an assigned resource of the correct type allows the caller to * distinguish between already assigned and no resource of the correct type. */ static struct resource *find_bus_resource_of_type(struct pci_bus *bus, unsigned long type_mask, unsigned long type) { struct resource *r, *r_assigned = NULL; pci_bus_for_each_resource(bus, r) { if (r == &ioport_resource || r == &iomem_resource) continue; if (r && (r->flags & type_mask) == type && !r->parent) return r; if (r && (r->flags & type_mask) == type && !r_assigned) r_assigned = r; } return r_assigned; } static resource_size_t calculate_iosize(resource_size_t size, resource_size_t min_size, resource_size_t size1, resource_size_t add_size, resource_size_t children_add_size, resource_size_t old_size, resource_size_t align) { if (size < min_size) size = min_size; if (old_size == 1) old_size = 0; /* * To be fixed in 2.5: we should have sort of HAVE_ISA flag in the * struct pci_bus. */ #if defined(CONFIG_ISA) || defined(CONFIG_EISA) size = (size & 0xff) + ((size & ~0xffUL) << 2); #endif size = size + size1; if (size < old_size) size = old_size; size = ALIGN(max(size, add_size) + children_add_size, align); return size; } static resource_size_t calculate_memsize(resource_size_t size, resource_size_t min_size, resource_size_t add_size, resource_size_t children_add_size, resource_size_t old_size, resource_size_t align) { if (size < min_size) size = min_size; if (old_size == 1) old_size = 0; if (size < old_size) size = old_size; size = ALIGN(max(size, add_size) + children_add_size, align); return size; } resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus, unsigned long type) { return 1; } #define PCI_P2P_DEFAULT_MEM_ALIGN 0x100000 /* 1MiB */ #define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */ #define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */ static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type) { resource_size_t align = 1, arch_align; if (type & IORESOURCE_MEM) align = PCI_P2P_DEFAULT_MEM_ALIGN; else if (type & IORESOURCE_IO) { /* * Per spec, I/O windows are 4K-aligned, but some bridges have * an extension to support 1K alignment. */ if (bus->self && bus->self->io_window_1k) align = PCI_P2P_DEFAULT_IO_ALIGN_1K; else align = PCI_P2P_DEFAULT_IO_ALIGN; } arch_align = pcibios_window_alignment(bus, type); return max(align, arch_align); } /** * pbus_size_io() - Size the I/O window of a given bus * * @bus: The bus * @min_size: The minimum I/O window that must be allocated * @add_size: Additional optional I/O window * @realloc_head: Track the additional I/O window on this list * * Sizing the I/O windows of the PCI-PCI bridge is trivial, since these * windows have 1K or 4K granularity and the I/O ranges of non-bridge PCI * devices are limited to 256 bytes. We must be careful with the ISA * aliasing though. */ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, resource_size_t add_size, struct list_head *realloc_head) { struct pci_dev *dev; struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO, IORESOURCE_IO); resource_size_t size = 0, size0 = 0, size1 = 0; resource_size_t children_add_size = 0; resource_size_t min_align, align; if (!b_res) return; /* If resource is already assigned, nothing more to do */ if (b_res->parent) return; min_align = window_alignment(bus, IORESOURCE_IO); list_for_each_entry(dev, &bus->devices, bus_list) { struct resource *r; pci_dev_for_each_resource(dev, r) { unsigned long r_size; if (r->parent || !(r->flags & IORESOURCE_IO)) continue; r_size = resource_size(r); if (r_size < 0x400) /* Might be re-aligned for ISA */ size += r_size; else size1 += r_size; align = pci_resource_alignment(dev, r); if (align > min_align) min_align = align; if (realloc_head) children_add_size += get_res_add_size(realloc_head, r); } } size0 = calculate_iosize(size, min_size, size1, 0, 0, resource_size(b_res), min_align); size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 : calculate_iosize(size, min_size, size1, add_size, children_add_size, resource_size(b_res), min_align); if (!size0 && !size1) { if (bus->self && (b_res->start || b_res->end)) pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n", b_res, &bus->busn_res); b_res->flags = 0; return; } b_res->start = min_align; b_res->end = b_res->start + size0 - 1; b_res->flags |= IORESOURCE_STARTALIGN; if (bus->self && size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, min_align); pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n", b_res, &bus->busn_res, (unsigned long long) size1 - size0); } } static inline resource_size_t calculate_mem_align(resource_size_t *aligns, int max_order) { resource_size_t align = 0; resource_size_t min_align = 0; int order; for (order = 0; order <= max_order; order++) { resource_size_t align1 = 1; align1 <<= (order + 20); if (!align) min_align = align1; else if (ALIGN(align + min_align, min_align) < align1) min_align = align1 >> 1; align += aligns[order]; } return min_align; } /** * pbus_size_mem() - Size the memory window of a given bus * * @bus: The bus * @mask: Mask the resource flag, then compare it with type * @type: The type of free resource from bridge * @type2: Second match type * @type3: Third match type * @min_size: The minimum memory window that must be allocated * @add_size: Additional optional memory window * @realloc_head: Track the additional memory window on this list * * Calculate the size of the bus and minimal alignment which guarantees * that all child resources fit in this size. * * Return -ENOSPC if there's no available bus resource of the desired * type. Otherwise, set the bus resource start/end to indicate the * required size, add things to realloc_head (if supplied), and return 0. */ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long type, unsigned long type2, unsigned long type3, resource_size_t min_size, resource_size_t add_size, struct list_head *realloc_head) { struct pci_dev *dev; resource_size_t min_align, align, size, size0, size1; resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */ int order, max_order; struct resource *b_res = find_bus_resource_of_type(bus, mask | IORESOURCE_PREFETCH, type); resource_size_t children_add_size = 0; resource_size_t children_add_align = 0; resource_size_t add_align = 0; if (!b_res) return -ENOSPC; /* If resource is already assigned, nothing more to do */ if (b_res->parent) return 0; memset(aligns, 0, sizeof(aligns)); max_order = 0; size = 0; list_for_each_entry(dev, &bus->devices, bus_list) { struct resource *r; int i; pci_dev_for_each_resource(dev, r, i) { resource_size_t r_size; if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) || ((r->flags & mask) != type && (r->flags & mask) != type2 && (r->flags & mask) != type3)) continue; r_size = resource_size(r); #ifdef CONFIG_PCI_IOV /* Put SRIOV requested res to the optional list */ if (realloc_head && i >= PCI_IOV_RESOURCES && i <= PCI_IOV_RESOURCE_END) { add_align = max(pci_resource_alignment(dev, r), add_align); r->end = r->start - 1; add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */); children_add_size += r_size; continue; } #endif /* * aligns[0] is for 1MB (since bridge memory * windows are always at least 1MB aligned), so * keep "order" from being negative for smaller * resources. */ align = pci_resource_alignment(dev, r); order = __ffs(align) - 20; if (order < 0) order = 0; if (order >= ARRAY_SIZE(aligns)) { pci_warn(dev, "disabling BAR %d: %pR (bad alignment %#llx)\n", i, r, (unsigned long long) align); r->flags = 0; continue; } size += max(r_size, align); /* * Exclude ranges with size > align from calculation of * the alignment. */ if (r_size <= align) aligns[order] += align; if (order > max_order) max_order = order; if (realloc_head) { children_add_size += get_res_add_size(realloc_head, r); children_add_align = get_res_add_align(realloc_head, r); add_align = max(add_align, children_add_align); } } } min_align = calculate_mem_align(aligns, max_order); min_align = max(min_align, window_alignment(bus, b_res->flags)); size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align); add_align = max(min_align, add_align); size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 : calculate_memsize(size, min_size, add_size, children_add_size, resource_size(b_res), add_align); if (!size0 && !size1) { if (bus->self && (b_res->start || b_res->end)) pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n", b_res, &bus->busn_res); b_res->flags = 0; return 0; } b_res->start = min_align; b_res->end = size0 + min_align - 1; b_res->flags |= IORESOURCE_STARTALIGN; if (bus->self && size1 > size0 && realloc_head) { add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align); pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n", b_res, &bus->busn_res, (unsigned long long) (size1 - size0), (unsigned long long) add_align); } return 0; } unsigned long pci_cardbus_resource_alignment(struct resource *res) { if (res->flags & IORESOURCE_IO) return pci_cardbus_io_size; if (res->flags & IORESOURCE_MEM) return pci_cardbus_mem_size; return 0; } static void pci_bus_size_cardbus(struct pci_bus *bus, struct list_head *realloc_head) { struct pci_dev *bridge = bus->self; struct resource *b_res; resource_size_t b_res_3_size = pci_cardbus_mem_size * 2; u16 ctrl; b_res = &bridge->resource[PCI_CB_BRIDGE_IO_0_WINDOW]; if (b_res->parent) goto handle_b_res_1; /* * Reserve some resources for CardBus. We reserve a fixed amount * of bus space for CardBus bridges. */ b_res->start = pci_cardbus_io_size; b_res->end = b_res->start + pci_cardbus_io_size - 1; b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN; if (realloc_head) { b_res->end -= pci_cardbus_io_size; add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, pci_cardbus_io_size); } handle_b_res_1: b_res = &bridge->resource[PCI_CB_BRIDGE_IO_1_WINDOW]; if (b_res->parent) goto handle_b_res_2; b_res->start = pci_cardbus_io_size; b_res->end = b_res->start + pci_cardbus_io_size - 1; b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN; if (realloc_head) { b_res->end -= pci_cardbus_io_size; add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size, pci_cardbus_io_size); } handle_b_res_2: /* MEM1 must not be pref MMIO */ pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) { ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1; pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl); pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); } /* Check whether prefetchable memory is supported by this bridge. */ pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) { ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0; pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl); pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); } b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_0_WINDOW]; if (b_res->parent) goto handle_b_res_3; /* * If we have prefetchable memory support, allocate two regions. * Otherwise, allocate one region of twice the size. */ if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { b_res->start = pci_cardbus_mem_size; b_res->end = b_res->start + pci_cardbus_mem_size - 1; b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_STARTALIGN; if (realloc_head) { b_res->end -= pci_cardbus_mem_size; add_to_list(realloc_head, bridge, b_res, pci_cardbus_mem_size, pci_cardbus_mem_size); } /* Reduce that to half */ b_res_3_size = pci_cardbus_mem_size; } handle_b_res_3: b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_1_WINDOW]; if (b_res->parent) goto handle_done; b_res->start = pci_cardbus_mem_size; b_res->end = b_res->start + b_res_3_size - 1; b_res->flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN; if (realloc_head) { b_res->end -= b_res_3_size; add_to_list(realloc_head, bridge, b_res, b_res_3_size, pci_cardbus_mem_size); } handle_done: ; } void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) { struct pci_dev *dev; unsigned long mask, prefmask, type2 = 0, type3 = 0; resource_size_t additional_io_size = 0, additional_mmio_size = 0, additional_mmio_pref_size = 0; struct resource *pref; struct pci_host_bridge *host; int hdr_type, ret; list_for_each_entry(dev, &bus->devices, bus_list) { struct pci_bus *b = dev->subordinate; if (!b) continue; switch (dev->hdr_type) { case PCI_HEADER_TYPE_CARDBUS: pci_bus_size_cardbus(b, realloc_head); break; case PCI_HEADER_TYPE_BRIDGE: default: __pci_bus_size_bridges(b, realloc_head); break; } } /* The root bus? */ if (pci_is_root_bus(bus)) { host = to_pci_host_bridge(bus->bridge); if (!host->size_windows) return; pci_bus_for_each_resource(bus, pref) if (pref && (pref->flags & IORESOURCE_PREFETCH)) break; hdr_type = -1; /* Intentionally invalid - not a PCI device. */ } else { pref = &bus->self->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; hdr_type = bus->self->hdr_type; } switch (hdr_type) { case PCI_HEADER_TYPE_CARDBUS: /* Don't size CardBuses yet */ break; case PCI_HEADER_TYPE_BRIDGE: pci_bridge_check_ranges(bus); if (bus->self->is_hotplug_bridge) { additional_io_size = pci_hotplug_io_size; additional_mmio_size = pci_hotplug_mmio_size; additional_mmio_pref_size = pci_hotplug_mmio_pref_size; } fallthrough; default: pbus_size_io(bus, realloc_head ? 0 : additional_io_size, additional_io_size, realloc_head); /* * If there's a 64-bit prefetchable MMIO window, compute * the size required to put all 64-bit prefetchable * resources in it. */ mask = IORESOURCE_MEM; prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH; if (pref && (pref->flags & IORESOURCE_MEM_64)) { prefmask |= IORESOURCE_MEM_64; ret = pbus_size_mem(bus, prefmask, prefmask, prefmask, prefmask, realloc_head ? 0 : additional_mmio_pref_size, additional_mmio_pref_size, realloc_head); /* * If successful, all non-prefetchable resources * and any 32-bit prefetchable resources will go in * the non-prefetchable window. */ if (ret == 0) { mask = prefmask; type2 = prefmask & ~IORESOURCE_MEM_64; type3 = prefmask & ~IORESOURCE_PREFETCH; } } /* * If there is no 64-bit prefetchable window, compute the * size required to put all prefetchable resources in the * 32-bit prefetchable window (if there is one). */ if (!type2) { prefmask &= ~IORESOURCE_MEM_64; ret = pbus_size_mem(bus, prefmask, prefmask, prefmask, prefmask, realloc_head ? 0 : additional_mmio_pref_size, additional_mmio_pref_size, realloc_head); /* * If successful, only non-prefetchable resources * will go in the non-prefetchable window. */ if (ret == 0) mask = prefmask; else additional_mmio_size += additional_mmio_pref_size; type2 = type3 = IORESOURCE_MEM; } /* * Compute the size required to put everything else in the * non-prefetchable window. This includes: * * - all non-prefetchable resources * - 32-bit prefetchable resources if there's a 64-bit * prefetchable window or no prefetchable window at all * - 64-bit prefetchable resources if there's no prefetchable * window at all * * Note that the strategy in __pci_assign_resource() must match * that used here. Specifically, we cannot put a 32-bit * prefetchable resource in a 64-bit prefetchable window. */ pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3, realloc_head ? 0 : additional_mmio_size, additional_mmio_size, realloc_head); break; } } void pci_bus_size_bridges(struct pci_bus *bus) { __pci_bus_size_bridges(bus, NULL); } EXPORT_SYMBOL(pci_bus_size_bridges); static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r) { struct resource *parent_r; unsigned long mask = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH; pci_bus_for_each_resource(b, parent_r) { if (!parent_r) continue; if ((r->flags & mask) == (parent_r->flags & mask) && resource_contains(parent_r, r)) request_resource(parent_r, r); } } /* * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they are * skipped by pbus_assign_resources_sorted(). */ static void pdev_assign_fixed_resources(struct pci_dev *dev) { struct resource *r; pci_dev_for_each_resource(dev, r) { struct pci_bus *b; if (r->parent || !(r->flags & IORESOURCE_PCI_FIXED) || !(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) continue; b = dev->bus; while (b && !r->parent) { assign_fixed_resource_on_bus(b, r); b = b->parent; } } } void __pci_bus_assign_resources(const struct pci_bus *bus, struct list_head *realloc_head, struct list_head *fail_head) { struct pci_bus *b; struct pci_dev *dev; pbus_assign_resources_sorted(bus, realloc_head, fail_head); list_for_each_entry(dev, &bus->devices, bus_list) { pdev_assign_fixed_resources(dev); b = dev->subordinate; if (!b) continue; __pci_bus_assign_resources(b, realloc_head, fail_head); switch (dev->hdr_type) { case PCI_HEADER_TYPE_BRIDGE: if (!pci_is_enabled(dev)) pci_setup_bridge(b); break; case PCI_HEADER_TYPE_CARDBUS: pci_setup_cardbus(b); break; default: pci_info(dev, "not setting up bridge for bus %04x:%02x\n", pci_domain_nr(b), b->number); break; } } } void pci_bus_assign_resources(const struct pci_bus *bus) { __pci_bus_assign_resources(bus, NULL, NULL); } EXPORT_SYMBOL(pci_bus_assign_resources); static void pci_claim_device_resources(struct pci_dev *dev) { int i; for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) { struct resource *r = &dev->resource[i]; if (!r->flags || r->parent) continue; pci_claim_resource(dev, i); } } static void pci_claim_bridge_resources(struct pci_dev *dev) { int i; for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { struct resource *r = &dev->resource[i]; if (!r->flags || r->parent) continue; pci_claim_bridge_resource(dev, i); } } static void pci_bus_allocate_dev_resources(struct pci_bus *b) { struct pci_dev *dev; struct pci_bus *child; list_for_each_entry(dev, &b->devices, bus_list) { pci_claim_device_resources(dev); child = dev->subordinate; if (child) pci_bus_allocate_dev_resources(child); } } static void pci_bus_allocate_resources(struct pci_bus *b) { struct pci_bus *child; /* * Carry out a depth-first search on the PCI bus tree to allocate * bridge apertures. Read the programmed bridge bases and * recursively claim the respective bridge resources. */ if (b->self) { pci_read_bridge_bases(b); pci_claim_bridge_resources(b->self); } list_for_each_entry(child, &b->children, node) pci_bus_allocate_resources(child); } void pci_bus_claim_resources(struct pci_bus *b) { pci_bus_allocate_resources(b); pci_bus_allocate_dev_resources(b); } EXPORT_SYMBOL(pci_bus_claim_resources); static void __pci_bridge_assign_resources(const struct pci_dev *bridge, struct list_head *add_head, struct list_head *fail_head) { struct pci_bus *b; pdev_assign_resources_sorted((struct pci_dev *)bridge, add_head, fail_head); b = bridge->subordinate; if (!b) return; __pci_bus_assign_resources(b, add_head, fail_head); switch (bridge->class >> 8) { case PCI_CLASS_BRIDGE_PCI: pci_setup_bridge(b); break; case PCI_CLASS_BRIDGE_CARDBUS: pci_setup_cardbus(b); break; default: pci_info(bridge, "not setting up bridge for bus %04x:%02x\n", pci_domain_nr(b), b->number); break; } } #define PCI_RES_TYPE_MASK \ (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\ IORESOURCE_MEM_64) static void pci_bridge_release_resources(struct pci_bus *bus, unsigned long type) { struct pci_dev *dev = bus->self; struct resource *r; unsigned int old_flags; struct resource *b_res; int idx = 1; b_res = &dev->resource[PCI_BRIDGE_RESOURCES]; /* * 1. If IO port assignment fails, release bridge IO port. * 2. If non pref MMIO assignment fails, release bridge nonpref MMIO. * 3. If 64bit pref MMIO assignment fails, and bridge pref is 64bit, * release bridge pref MMIO. * 4. If pref MMIO assignment fails, and bridge pref is 32bit, * release bridge pref MMIO. * 5. If pref MMIO assignment fails, and bridge pref is not * assigned, release bridge nonpref MMIO. */ if (type & IORESOURCE_IO) idx = 0; else if (!(type & IORESOURCE_PREFETCH)) idx = 1; else if ((type & IORESOURCE_MEM_64) && (b_res[2].flags & IORESOURCE_MEM_64)) idx = 2; else if (!(b_res[2].flags & IORESOURCE_MEM_64) && (b_res[2].flags & IORESOURCE_PREFETCH)) idx = 2; else idx = 1; r = &b_res[idx]; if (!r->parent) return; /* If there are children, release them all */ release_child_resources(r); if (!release_resource(r)) { type = old_flags = r->flags & PCI_RES_TYPE_MASK; pci_info(dev, "resource %d %pR released\n", PCI_BRIDGE_RESOURCES + idx, r); /* Keep the old size */ r->end = resource_size(r) - 1; r->start = 0; r->flags = 0; /* Avoiding touch the one without PREF */ if (type & IORESOURCE_PREFETCH) type = IORESOURCE_PREFETCH; __pci_setup_bridge(bus, type); /* For next child res under same bridge */ r->flags = old_flags; } } enum release_type { leaf_only, whole_subtree, }; /* * Try to release PCI bridge resources from leaf bridge, so we can allocate * a larger window later. */ static void pci_bus_release_bridge_resources(struct pci_bus *bus, unsigned long type, enum release_type rel_type) { struct pci_dev *dev; bool is_leaf_bridge = true; list_for_each_entry(dev, &bus->devices, bus_list) { struct pci_bus *b = dev->subordinate; if (!b) continue; is_leaf_bridge = false; if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) continue; if (rel_type == whole_subtree) pci_bus_release_bridge_resources(b, type, whole_subtree); } if (pci_is_root_bus(bus)) return; if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI) return; if ((rel_type == whole_subtree) || is_leaf_bridge) pci_bridge_release_resources(bus, type); } static void pci_bus_dump_res(struct pci_bus *bus) { struct resource *res; int i; pci_bus_for_each_resource(bus, res, i) { if (!res || !res->end || !res->flags) continue; dev_info(&bus->dev, "resource %d %pR\n", i, res); } } static void pci_bus_dump_resources(struct pci_bus *bus) { struct pci_bus *b; struct pci_dev *dev; pci_bus_dump_res(bus); list_for_each_entry(dev, &bus->devices, bus_list) { b = dev->subordinate; if (!b) continue; pci_bus_dump_resources(b); } } static int pci_bus_get_depth(struct pci_bus *bus) { int depth = 0; struct pci_bus *child_bus; list_for_each_entry(child_bus, &bus->children, node) { int ret; ret = pci_bus_get_depth(child_bus); if (ret + 1 > depth) depth = ret + 1; } return depth; } /* * -1: undefined, will auto detect later * 0: disabled by user * 1: disabled by auto detect * 2: enabled by user * 3: enabled by auto detect */ enum enable_type { undefined = -1, user_disabled, auto_disabled, user_enabled, auto_enabled, }; static enum enable_type pci_realloc_enable = undefined; void __init pci_realloc_get_opt(char *str) { if (!strncmp(str, "off", 3)) pci_realloc_enable = user_disabled; else if (!strncmp(str, "on", 2)) pci_realloc_enable = user_enabled; } static bool pci_realloc_enabled(enum enable_type enable) { return enable >= user_enabled; } #if defined(CONFIG_PCI_IOV) && defined(CONFIG_PCI_REALLOC_ENABLE_AUTO) static int iov_resources_unassigned(struct pci_dev *dev, void *data) { int i; bool *unassigned = data; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES]; struct pci_bus_region region; /* Not assigned or rejected by kernel? */ if (!r->flags) continue; pcibios_resource_to_bus(dev->bus, &region, r); if (!region.start) { *unassigned = true; return 1; /* Return early from pci_walk_bus() */ } } return 0; } static enum enable_type pci_realloc_detect(struct pci_bus *bus, enum enable_type enable_local) { bool unassigned = false; struct pci_host_bridge *host; if (enable_local != undefined) return enable_local; host = pci_find_host_bridge(bus); if (host->preserve_config) return auto_disabled; pci_walk_bus(bus, iov_resources_unassigned, &unassigned); if (unassigned) return auto_enabled; return enable_local; } #else static enum enable_type pci_realloc_detect(struct pci_bus *bus, enum enable_type enable_local) { return enable_local; } #endif static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res, struct list_head *add_list, resource_size_t new_size) { resource_size_t add_size, size = resource_size(res); if (res->parent) return; if (!new_size) return; if (new_size > size) { add_size = new_size - size; pci_dbg(bridge, "bridge window %pR extended by %pa\n", res, &add_size); } else if (new_size < size) { add_size = size - new_size; pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res, &add_size); } else { return; } res->end = res->start + new_size - 1; /* If the resource is part of the add_list, remove it now */ if (add_list) remove_from_list(add_list, res); } static void remove_dev_resource(struct resource *avail, struct pci_dev *dev, struct resource *res) { resource_size_t size, align, tmp; size = resource_size(res); if (!size) return; align = pci_resource_alignment(dev, res); align = align ? ALIGN(avail->start, align) - avail->start : 0; tmp = align + size; avail->start = min(avail->start + tmp, avail->end + 1); } static void remove_dev_resources(struct pci_dev *dev, struct resource *io, struct resource *mmio, struct resource *mmio_pref) { struct resource *res; pci_dev_for_each_resource(dev, res) { if (resource_type(res) == IORESOURCE_IO) { remove_dev_resource(io, dev, res); } else if (resource_type(res) == IORESOURCE_MEM) { /* * Make sure prefetchable memory is reduced from * the correct resource. Specifically we put 32-bit * prefetchable memory in non-prefetchable window * if there is an 64-bit prefetchable window. * * See comments in __pci_bus_size_bridges() for * more information. */ if ((res->flags & IORESOURCE_PREFETCH) && ((res->flags & IORESOURCE_MEM_64) == (mmio_pref->flags & IORESOURCE_MEM_64))) remove_dev_resource(mmio_pref, dev, res); else remove_dev_resource(mmio, dev, res); } } } /* * io, mmio and mmio_pref contain the total amount of bridge window space * available. This includes the minimal space needed to cover all the * existing devices on the bus and the possible extra space that can be * shared with the bridges. */ static void pci_bus_distribute_available_resources(struct pci_bus *bus, struct list_head *add_list, struct resource io, struct resource mmio, struct resource mmio_pref) { unsigned int normal_bridges = 0, hotplug_bridges = 0; struct resource *io_res, *mmio_res, *mmio_pref_res; struct pci_dev *dev, *bridge = bus->self; resource_size_t io_per_b, mmio_per_b, mmio_pref_per_b, align; io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW]; mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW]; mmio_pref_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; /* * The alignment of this bridge is yet to be considered, hence it must * be done now before extending its bridge window. */ align = pci_resource_alignment(bridge, io_res); if (!io_res->parent && align) io.start = min(ALIGN(io.start, align), io.end + 1); align = pci_resource_alignment(bridge, mmio_res); if (!mmio_res->parent && align) mmio.start = min(ALIGN(mmio.start, align), mmio.end + 1); align = pci_resource_alignment(bridge, mmio_pref_res); if (!mmio_pref_res->parent && align) mmio_pref.start = min(ALIGN(mmio_pref.start, align), mmio_pref.end + 1); /* * Now that we have adjusted for alignment, update the bridge window * resources to fill as much remaining resource space as possible. */ adjust_bridge_window(bridge, io_res, add_list, resource_size(&io)); adjust_bridge_window(bridge, mmio_res, add_list, resource_size(&mmio)); adjust_bridge_window(bridge, mmio_pref_res, add_list, resource_size(&mmio_pref)); /* * Calculate how many hotplug bridges and normal bridges there * are on this bus. We will distribute the additional available * resources between hotplug bridges. */ for_each_pci_bridge(dev, bus) { if (dev->is_hotplug_bridge) hotplug_bridges++; else normal_bridges++; } if (!(hotplug_bridges + normal_bridges)) return; /* * Calculate the amount of space we can forward from "bus" to any * downstream buses, i.e., the space left over after assigning the * BARs and windows on "bus". */ list_for_each_entry(dev, &bus->devices, bus_list) { if (!dev->is_virtfn) remove_dev_resources(dev, &io, &mmio, &mmio_pref); } /* * If there is at least one hotplug bridge on this bus it gets all * the extra resource space that was left after the reductions * above. * * If there are no hotplug bridges the extra resource space is * split between non-hotplug bridges. This is to allow possible * hotplug bridges below them to get the extra space as well. */ if (hotplug_bridges) { io_per_b = div64_ul(resource_size(&io), hotplug_bridges); mmio_per_b = div64_ul(resource_size(&mmio), hotplug_bridges); mmio_pref_per_b = div64_ul(resource_size(&mmio_pref), hotplug_bridges); } else { io_per_b = div64_ul(resource_size(&io), normal_bridges); mmio_per_b = div64_ul(resource_size(&mmio), normal_bridges); mmio_pref_per_b = div64_ul(resource_size(&mmio_pref), normal_bridges); } for_each_pci_bridge(dev, bus) { struct resource *res; struct pci_bus *b; b = dev->subordinate; if (!b) continue; if (hotplug_bridges && !dev->is_hotplug_bridge) continue; res = &dev->resource[PCI_BRIDGE_IO_WINDOW]; /* * Make sure the split resource space is properly aligned * for bridge windows (align it down to avoid going above * what is available). */ align = pci_resource_alignment(dev, res); io.end = align ? io.start + ALIGN_DOWN(io_per_b, align) - 1 : io.start + io_per_b - 1; /* * The x_per_b holds the extra resource space that can be * added for each bridge but there is the minimal already * reserved as well so adjust x.start down accordingly to * cover the whole space. */ io.start -= resource_size(res); res = &dev->resource[PCI_BRIDGE_MEM_WINDOW]; align = pci_resource_alignment(dev, res); mmio.end = align ? mmio.start + ALIGN_DOWN(mmio_per_b, align) - 1 : mmio.start + mmio_per_b - 1; mmio.start -= resource_size(res); res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; align = pci_resource_alignment(dev, res); mmio_pref.end = align ? mmio_pref.start + ALIGN_DOWN(mmio_pref_per_b, align) - 1 : mmio_pref.start + mmio_pref_per_b - 1; mmio_pref.start -= resource_size(res); pci_bus_distribute_available_resources(b, add_list, io, mmio, mmio_pref); io.start += io.end + 1; mmio.start += mmio.end + 1; mmio_pref.start += mmio_pref.end + 1; } } static void pci_bridge_distribute_available_resources(struct pci_dev *bridge, struct list_head *add_list) { struct resource available_io, available_mmio, available_mmio_pref; if (!bridge->is_hotplug_bridge) return; pci_dbg(bridge, "distributing available resources\n"); /* Take the initial extra resources from the hotplug port */ available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW]; available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW]; available_mmio_pref = bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; pci_bus_distribute_available_resources(bridge->subordinate, add_list, available_io, available_mmio, available_mmio_pref); } static bool pci_bridge_resources_not_assigned(struct pci_dev *dev) { const struct resource *r; /* * If the child device's resources are not yet assigned it means we * are configuring them (not the boot firmware), so we should be * able to extend the upstream bridge resources in the same way we * do with the normal hotplug case. */ r = &dev->resource[PCI_BRIDGE_IO_WINDOW]; if (r->flags && !(r->flags & IORESOURCE_STARTALIGN)) return false; r = &dev->resource[PCI_BRIDGE_MEM_WINDOW]; if (r->flags && !(r->flags & IORESOURCE_STARTALIGN)) return false; r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW]; if (r->flags && !(r->flags & IORESOURCE_STARTALIGN)) return false; return true; } static void pci_root_bus_distribute_available_resources(struct pci_bus *bus, struct list_head *add_list) { struct pci_dev *dev, *bridge = bus->self; for_each_pci_bridge(dev, bus) { struct pci_bus *b; b = dev->subordinate; if (!b) continue; /* * Need to check "bridge" here too because it is NULL * in case of root bus. */ if (bridge && pci_bridge_resources_not_assigned(dev)) pci_bridge_distribute_available_resources(bridge, add_list); else pci_root_bus_distribute_available_resources(b, add_list); } } /* * First try will not touch PCI bridge res. * Second and later try will clear small leaf bridge res. * Will stop till to the max depth if can not find good one. */ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) { LIST_HEAD(realloc_head); /* List of resources that want additional resources */ struct list_head *add_list = NULL; int tried_times = 0; enum release_type rel_type = leaf_only; LIST_HEAD(fail_head); struct pci_dev_resource *fail_res; int pci_try_num = 1; enum enable_type enable_local; /* Don't realloc if asked to do so */ enable_local = pci_realloc_detect(bus, pci_realloc_enable); if (pci_realloc_enabled(enable_local)) { int max_depth = pci_bus_get_depth(bus); pci_try_num = max_depth + 1; dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n", max_depth, pci_try_num); } again: /* * Last try will use add_list, otherwise will try good to have as must * have, so can realloc parent bridge resource */ if (tried_times + 1 == pci_try_num) add_list = &realloc_head; /* * Depth first, calculate sizes and alignments of all subordinate buses. */ __pci_bus_size_bridges(bus, add_list); pci_root_bus_distribute_available_resources(bus, add_list); /* Depth last, allocate resources and update the hardware. */ __pci_bus_assign_resources(bus, add_list, &fail_head); if (add_list) BUG_ON(!list_empty(add_list)); tried_times++; /* Any device complain? */ if (list_empty(&fail_head)) goto dump; if (tried_times >= pci_try_num) { if (enable_local == undefined) dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n"); else if (enable_local == auto_enabled) dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n"); free_list(&fail_head); goto dump; } dev_info(&bus->dev, "No. %d try to assign unassigned res\n", tried_times + 1); /* Third times and later will not check if it is leaf */ if ((tried_times + 1) > 2) rel_type = whole_subtree; /* * Try to release leaf bridge's resources that doesn't fit resource of * child device under that bridge. */ list_for_each_entry(fail_res, &fail_head, list) pci_bus_release_bridge_resources(fail_res->dev->bus, fail_res->flags & PCI_RES_TYPE_MASK, rel_type); /* Restore size and flags */ list_for_each_entry(fail_res, &fail_head, list) { struct resource *res = fail_res->res; int idx; res->start = fail_res->start; res->end = fail_res->end; res->flags = fail_res->flags; if (pci_is_bridge(fail_res->dev)) { idx = res - &fail_res->dev->resource[0]; if (idx >= PCI_BRIDGE_RESOURCES && idx <= PCI_BRIDGE_RESOURCE_END) res->flags = 0; } } free_list(&fail_head); goto again; dump: /* Dump the resource on buses */ pci_bus_dump_resources(bus); } void __init pci_assign_unassigned_resources(void) { struct pci_bus *root_bus; list_for_each_entry(root_bus, &pci_root_buses, node) { pci_assign_unassigned_root_bus_resources(root_bus); /* Make sure the root bridge has a companion ACPI device */ if (ACPI_HANDLE(root_bus->bridge)) acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge)); } } void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) { struct pci_bus *parent = bridge->subordinate; /* List of resources that want additional resources */ LIST_HEAD(add_list); int tried_times = 0; LIST_HEAD(fail_head); struct pci_dev_resource *fail_res; int retval; again: __pci_bus_size_bridges(parent, &add_list); /* * Distribute remaining resources (if any) equally between hotplug * bridges below. This makes it possible to extend the hierarchy * later without running out of resources. */ pci_bridge_distribute_available_resources(bridge, &add_list); __pci_bridge_assign_resources(bridge, &add_list, &fail_head); BUG_ON(!list_empty(&add_list)); tried_times++; if (list_empty(&fail_head)) goto enable_all; if (tried_times >= 2) { /* Still fail, don't need to try more */ free_list(&fail_head); goto enable_all; } printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n", tried_times + 1); /* * Try to release leaf bridge's resources that aren't big enough * to contain child device resources. */ list_for_each_entry(fail_res, &fail_head, list) pci_bus_release_bridge_resources(fail_res->dev->bus, fail_res->flags & PCI_RES_TYPE_MASK, whole_subtree); /* Restore size and flags */ list_for_each_entry(fail_res, &fail_head, list) { struct resource *res = fail_res->res; int idx; res->start = fail_res->start; res->end = fail_res->end; res->flags = fail_res->flags; if (pci_is_bridge(fail_res->dev)) { idx = res - &fail_res->dev->resource[0]; if (idx >= PCI_BRIDGE_RESOURCES && idx <= PCI_BRIDGE_RESOURCE_END) res->flags = 0; } } free_list(&fail_head); goto again; enable_all: retval = pci_reenable_device(bridge); if (retval) pci_err(bridge, "Error reenabling bridge (%d)\n", retval); pci_set_master(bridge); } EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources); int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type) { struct pci_dev_resource *dev_res; struct pci_dev *next; LIST_HEAD(saved); LIST_HEAD(added); LIST_HEAD(failed); unsigned int i; int ret; down_read(&pci_bus_sem); /* Walk to the root hub, releasing bridge BARs when possible */ next = bridge; do { bridge = next; for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END; i++) { struct resource *res = &bridge->resource[i]; if ((res->flags ^ type) & PCI_RES_TYPE_MASK) continue; /* Ignore BARs which are still in use */ if (res->child) continue; ret = add_to_list(&saved, bridge, res, 0, 0); if (ret) goto cleanup; pci_info(bridge, "BAR %d: releasing %pR\n", i, res); if (res->parent) release_resource(res); res->start = 0; res->end = 0; break; } if (i == PCI_BRIDGE_RESOURCE_END) break; next = bridge->bus ? bridge->bus->self : NULL; } while (next); if (list_empty(&saved)) { up_read(&pci_bus_sem); return -ENOENT; } __pci_bus_size_bridges(bridge->subordinate, &added); __pci_bridge_assign_resources(bridge, &added, &failed); BUG_ON(!list_empty(&added)); if (!list_empty(&failed)) { ret = -ENOSPC; goto cleanup; } list_for_each_entry(dev_res, &saved, list) { /* Skip the bridge we just assigned resources for */ if (bridge == dev_res->dev) continue; bridge = dev_res->dev; pci_setup_bridge(bridge->subordinate); } free_list(&saved); up_read(&pci_bus_sem); return 0; cleanup: /* Restore size and flags */ list_for_each_entry(dev_res, &failed, list) { struct resource *res = dev_res->res; res->start = dev_res->start; res->end = dev_res->end; res->flags = dev_res->flags; } free_list(&failed); /* Revert to the old configuration */ list_for_each_entry(dev_res, &saved, list) { struct resource *res = dev_res->res; bridge = dev_res->dev; i = res - bridge->resource; res->start = dev_res->start; res->end = dev_res->end; res->flags = dev_res->flags; pci_claim_resource(bridge, i); pci_setup_bridge(bridge->subordinate); } free_list(&saved); up_read(&pci_bus_sem); return ret; } void pci_assign_unassigned_bus_resources(struct pci_bus *bus) { struct pci_dev *dev; /* List of resources that want additional resources */ LIST_HEAD(add_list); down_read(&pci_bus_sem); for_each_pci_bridge(dev, bus) if (pci_has_subordinate(dev)) __pci_bus_size_bridges(dev->subordinate, &add_list); up_read(&pci_bus_sem); __pci_bus_assign_resources(bus, &add_list, NULL); BUG_ON(!list_empty(&add_list)); } EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources);
linux-master
drivers/pci/setup-bus.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/pci.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/wait.h> #include "pci.h" /* * This interrupt-safe spinlock protects all accesses to PCI * configuration space. */ DEFINE_RAW_SPINLOCK(pci_lock); /* * Wrappers for all PCI configuration access functions. They just check * alignment, do locking and call the low-level functions pointed to * by pci_dev->ops. */ #define PCI_byte_BAD 0 #define PCI_word_BAD (pos & 1) #define PCI_dword_BAD (pos & 3) #ifdef CONFIG_PCI_LOCKLESS_CONFIG # define pci_lock_config(f) do { (void)(f); } while (0) # define pci_unlock_config(f) do { (void)(f); } while (0) #else # define pci_lock_config(f) raw_spin_lock_irqsave(&pci_lock, f) # define pci_unlock_config(f) raw_spin_unlock_irqrestore(&pci_lock, f) #endif #define PCI_OP_READ(size, type, len) \ int noinline pci_bus_read_config_##size \ (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \ { \ int res; \ unsigned long flags; \ u32 data = 0; \ if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ pci_lock_config(flags); \ res = bus->ops->read(bus, devfn, pos, len, &data); \ if (res) \ PCI_SET_ERROR_RESPONSE(value); \ else \ *value = (type)data; \ pci_unlock_config(flags); \ return res; \ } #define PCI_OP_WRITE(size, type, len) \ int noinline pci_bus_write_config_##size \ (struct pci_bus *bus, unsigned int devfn, int pos, type value) \ { \ int res; \ unsigned long flags; \ if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ pci_lock_config(flags); \ res = bus->ops->write(bus, devfn, pos, len, value); \ pci_unlock_config(flags); \ return res; \ } PCI_OP_READ(byte, u8, 1) PCI_OP_READ(word, u16, 2) PCI_OP_READ(dword, u32, 4) PCI_OP_WRITE(byte, u8, 1) PCI_OP_WRITE(word, u16, 2) PCI_OP_WRITE(dword, u32, 4) EXPORT_SYMBOL(pci_bus_read_config_byte); EXPORT_SYMBOL(pci_bus_read_config_word); EXPORT_SYMBOL(pci_bus_read_config_dword); EXPORT_SYMBOL(pci_bus_write_config_byte); EXPORT_SYMBOL(pci_bus_write_config_word); EXPORT_SYMBOL(pci_bus_write_config_dword); int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { void __iomem *addr; addr = bus->ops->map_bus(bus, devfn, where); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; if (size == 1) *val = readb(addr); else if (size == 2) *val = readw(addr); else *val = readl(addr); return PCIBIOS_SUCCESSFUL; } EXPORT_SYMBOL_GPL(pci_generic_config_read); int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { void __iomem *addr; addr = bus->ops->map_bus(bus, devfn, where); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; if (size == 1) writeb(val, addr); else if (size == 2) writew(val, addr); else writel(val, addr); return PCIBIOS_SUCCESSFUL; } EXPORT_SYMBOL_GPL(pci_generic_config_write); int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { void __iomem *addr; addr = bus->ops->map_bus(bus, devfn, where & ~0x3); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; *val = readl(addr); if (size <= 2) *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); return PCIBIOS_SUCCESSFUL; } EXPORT_SYMBOL_GPL(pci_generic_config_read32); int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { void __iomem *addr; u32 mask, tmp; addr = bus->ops->map_bus(bus, devfn, where & ~0x3); if (!addr) return PCIBIOS_DEVICE_NOT_FOUND; if (size == 4) { writel(val, addr); return PCIBIOS_SUCCESSFUL; } /* * In general, hardware that supports only 32-bit writes on PCI is * not spec-compliant. For example, software may perform a 16-bit * write. If the hardware only supports 32-bit accesses, we must * do a 32-bit read, merge in the 16 bits we intend to write, * followed by a 32-bit write. If the 16 bits we *don't* intend to * write happen to have any RW1C (write-one-to-clear) bits set, we * just inadvertently cleared something we shouldn't have. */ if (!bus->unsafe_warn) { dev_warn(&bus->dev, "%d-byte config write to %04x:%02x:%02x.%d offset %#x may corrupt adjacent RW1C bits\n", size, pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where); bus->unsafe_warn = 1; } mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); tmp = readl(addr) & mask; tmp |= val << ((where & 0x3) * 8); writel(tmp, addr); return PCIBIOS_SUCCESSFUL; } EXPORT_SYMBOL_GPL(pci_generic_config_write32); /** * pci_bus_set_ops - Set raw operations of pci bus * @bus: pci bus struct * @ops: new raw operations * * Return previous raw operations */ struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops) { struct pci_ops *old_ops; unsigned long flags; raw_spin_lock_irqsave(&pci_lock, flags); old_ops = bus->ops; bus->ops = ops; raw_spin_unlock_irqrestore(&pci_lock, flags); return old_ops; } EXPORT_SYMBOL(pci_bus_set_ops); /* * The following routines are to prevent the user from accessing PCI config * space when it's unsafe to do so. Some devices require this during BIST and * we're required to prevent it during D-state transitions. * * We have a bit per device to indicate it's blocked and a global wait queue * for callers to sleep on until devices are unblocked. */ static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); static noinline void pci_wait_cfg(struct pci_dev *dev) __must_hold(&pci_lock) { do { raw_spin_unlock_irq(&pci_lock); wait_event(pci_cfg_wait, !dev->block_cfg_access); raw_spin_lock_irq(&pci_lock); } while (dev->block_cfg_access); } /* Returns 0 on success, negative values indicate error. */ #define PCI_USER_READ_CONFIG(size, type) \ int pci_user_read_config_##size \ (struct pci_dev *dev, int pos, type *val) \ { \ int ret = PCIBIOS_SUCCESSFUL; \ u32 data = -1; \ if (PCI_##size##_BAD) \ return -EINVAL; \ raw_spin_lock_irq(&pci_lock); \ if (unlikely(dev->block_cfg_access)) \ pci_wait_cfg(dev); \ ret = dev->bus->ops->read(dev->bus, dev->devfn, \ pos, sizeof(type), &data); \ raw_spin_unlock_irq(&pci_lock); \ if (ret) \ PCI_SET_ERROR_RESPONSE(val); \ else \ *val = (type)data; \ return pcibios_err_to_errno(ret); \ } \ EXPORT_SYMBOL_GPL(pci_user_read_config_##size); /* Returns 0 on success, negative values indicate error. */ #define PCI_USER_WRITE_CONFIG(size, type) \ int pci_user_write_config_##size \ (struct pci_dev *dev, int pos, type val) \ { \ int ret = PCIBIOS_SUCCESSFUL; \ if (PCI_##size##_BAD) \ return -EINVAL; \ raw_spin_lock_irq(&pci_lock); \ if (unlikely(dev->block_cfg_access)) \ pci_wait_cfg(dev); \ ret = dev->bus->ops->write(dev->bus, dev->devfn, \ pos, sizeof(type), val); \ raw_spin_unlock_irq(&pci_lock); \ return pcibios_err_to_errno(ret); \ } \ EXPORT_SYMBOL_GPL(pci_user_write_config_##size); PCI_USER_READ_CONFIG(byte, u8) PCI_USER_READ_CONFIG(word, u16) PCI_USER_READ_CONFIG(dword, u32) PCI_USER_WRITE_CONFIG(byte, u8) PCI_USER_WRITE_CONFIG(word, u16) PCI_USER_WRITE_CONFIG(dword, u32) /** * pci_cfg_access_lock - Lock PCI config reads/writes * @dev: pci device struct * * When access is locked, any userspace reads or writes to config * space and concurrent lock requests will sleep until access is * allowed via pci_cfg_access_unlock() again. */ void pci_cfg_access_lock(struct pci_dev *dev) { might_sleep(); raw_spin_lock_irq(&pci_lock); if (dev->block_cfg_access) pci_wait_cfg(dev); dev->block_cfg_access = 1; raw_spin_unlock_irq(&pci_lock); } EXPORT_SYMBOL_GPL(pci_cfg_access_lock); /** * pci_cfg_access_trylock - try to lock PCI config reads/writes * @dev: pci device struct * * Same as pci_cfg_access_lock, but will return 0 if access is * already locked, 1 otherwise. This function can be used from * atomic contexts. */ bool pci_cfg_access_trylock(struct pci_dev *dev) { unsigned long flags; bool locked = true; raw_spin_lock_irqsave(&pci_lock, flags); if (dev->block_cfg_access) locked = false; else dev->block_cfg_access = 1; raw_spin_unlock_irqrestore(&pci_lock, flags); return locked; } EXPORT_SYMBOL_GPL(pci_cfg_access_trylock); /** * pci_cfg_access_unlock - Unlock PCI config reads/writes * @dev: pci device struct * * This function allows PCI config accesses to resume. */ void pci_cfg_access_unlock(struct pci_dev *dev) { unsigned long flags; raw_spin_lock_irqsave(&pci_lock, flags); /* * This indicates a problem in the caller, but we don't need * to kill them, unlike a double-block above. */ WARN_ON(!dev->block_cfg_access); dev->block_cfg_access = 0; raw_spin_unlock_irqrestore(&pci_lock, flags); wake_up_all(&pci_cfg_wait); } EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); static inline int pcie_cap_version(const struct pci_dev *dev) { return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; } bool pcie_cap_has_lnkctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); return type == PCI_EXP_TYPE_ENDPOINT || type == PCI_EXP_TYPE_LEG_END || type == PCI_EXP_TYPE_ROOT_PORT || type == PCI_EXP_TYPE_UPSTREAM || type == PCI_EXP_TYPE_DOWNSTREAM || type == PCI_EXP_TYPE_PCI_BRIDGE || type == PCI_EXP_TYPE_PCIE_BRIDGE; } bool pcie_cap_has_lnkctl2(const struct pci_dev *dev) { return pcie_cap_has_lnkctl(dev) && pcie_cap_version(dev) > 1; } static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev) { return pcie_downstream_port(dev) && pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT; } bool pcie_cap_has_rtctl(const struct pci_dev *dev) { int type = pci_pcie_type(dev); return type == PCI_EXP_TYPE_ROOT_PORT || type == PCI_EXP_TYPE_RC_EC; } static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos) { if (!pci_is_pcie(dev)) return false; switch (pos) { case PCI_EXP_FLAGS: return true; case PCI_EXP_DEVCAP: case PCI_EXP_DEVCTL: case PCI_EXP_DEVSTA: return true; case PCI_EXP_LNKCAP: case PCI_EXP_LNKCTL: case PCI_EXP_LNKSTA: return pcie_cap_has_lnkctl(dev); case PCI_EXP_SLTCAP: case PCI_EXP_SLTCTL: case PCI_EXP_SLTSTA: return pcie_cap_has_sltctl(dev); case PCI_EXP_RTCTL: case PCI_EXP_RTCAP: case PCI_EXP_RTSTA: return pcie_cap_has_rtctl(dev); case PCI_EXP_DEVCAP2: case PCI_EXP_DEVCTL2: return pcie_cap_version(dev) > 1; case PCI_EXP_LNKCAP2: case PCI_EXP_LNKCTL2: case PCI_EXP_LNKSTA2: return pcie_cap_has_lnkctl2(dev); default: return false; } } /* * Note that these accessor functions are only for the "PCI Express * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) */ int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) { int ret; *val = 0; if (pos & 1) return PCIBIOS_BAD_REGISTER_NUMBER; if (pcie_capability_reg_implemented(dev, pos)) { ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); /* * Reset *val to 0 if pci_read_config_word() fails; it may * have been written as 0xFFFF (PCI_ERROR_RESPONSE) if the * config read failed on PCI. */ if (ret) *val = 0; return ret; } /* * For Functions that do not implement the Slot Capabilities, * Slot Status, and Slot Control registers, these spaces must * be hardwired to 0b, with the exception of the Presence Detect * State bit in the Slot Status register of Downstream Ports, * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) */ if (pci_is_pcie(dev) && pcie_downstream_port(dev) && pos == PCI_EXP_SLTSTA) *val = PCI_EXP_SLTSTA_PDS; return 0; } EXPORT_SYMBOL(pcie_capability_read_word); int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) { int ret; *val = 0; if (pos & 3) return PCIBIOS_BAD_REGISTER_NUMBER; if (pcie_capability_reg_implemented(dev, pos)) { ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); /* * Reset *val to 0 if pci_read_config_dword() fails; it may * have been written as 0xFFFFFFFF (PCI_ERROR_RESPONSE) if * the config read failed on PCI. */ if (ret) *val = 0; return ret; } if (pci_is_pcie(dev) && pcie_downstream_port(dev) && pos == PCI_EXP_SLTSTA) *val = PCI_EXP_SLTSTA_PDS; return 0; } EXPORT_SYMBOL(pcie_capability_read_dword); int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) { if (pos & 1) return PCIBIOS_BAD_REGISTER_NUMBER; if (!pcie_capability_reg_implemented(dev, pos)) return 0; return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); } EXPORT_SYMBOL(pcie_capability_write_word); int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) { if (pos & 3) return PCIBIOS_BAD_REGISTER_NUMBER; if (!pcie_capability_reg_implemented(dev, pos)) return 0; return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val); } EXPORT_SYMBOL(pcie_capability_write_dword); int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos, u16 clear, u16 set) { int ret; u16 val; ret = pcie_capability_read_word(dev, pos, &val); if (ret) return ret; val &= ~clear; val |= set; return pcie_capability_write_word(dev, pos, val); } EXPORT_SYMBOL(pcie_capability_clear_and_set_word_unlocked); int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos, u16 clear, u16 set) { unsigned long flags; int ret; spin_lock_irqsave(&dev->pcie_cap_lock, flags); ret = pcie_capability_clear_and_set_word_unlocked(dev, pos, clear, set); spin_unlock_irqrestore(&dev->pcie_cap_lock, flags); return ret; } EXPORT_SYMBOL(pcie_capability_clear_and_set_word_locked); int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos, u32 clear, u32 set) { int ret; u32 val; ret = pcie_capability_read_dword(dev, pos, &val); if (ret) return ret; val &= ~clear; val |= set; return pcie_capability_write_dword(dev, pos, val); } EXPORT_SYMBOL(pcie_capability_clear_and_set_dword); int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) { if (pci_dev_is_disconnected(dev)) { PCI_SET_ERROR_RESPONSE(val); return PCIBIOS_DEVICE_NOT_FOUND; } return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_read_config_byte); int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val) { if (pci_dev_is_disconnected(dev)) { PCI_SET_ERROR_RESPONSE(val); return PCIBIOS_DEVICE_NOT_FOUND; } return pci_bus_read_config_word(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_read_config_word); int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val) { if (pci_dev_is_disconnected(dev)) { PCI_SET_ERROR_RESPONSE(val); return PCIBIOS_DEVICE_NOT_FOUND; } return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_read_config_dword); int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val) { if (pci_dev_is_disconnected(dev)) return PCIBIOS_DEVICE_NOT_FOUND; return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_byte); int pci_write_config_word(const struct pci_dev *dev, int where, u16 val) { if (pci_dev_is_disconnected(dev)) return PCIBIOS_DEVICE_NOT_FOUND; return pci_bus_write_config_word(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_word); int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val) { if (pci_dev_is_disconnected(dev)) return PCIBIOS_DEVICE_NOT_FOUND; return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_dword);
linux-master
drivers/pci/access.c
// SPDX-License-Identifier: GPL-2.0 /* * Generic PCI resource mmap helper * * Copyright © 2017 Amazon.com, Inc. or its affiliates. * * Author: David Woodhouse <[email protected]> */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #ifdef ARCH_GENERIC_PCI_MMAP_RESOURCE static const struct vm_operations_struct pci_phys_vm_ops = { #ifdef CONFIG_HAVE_IOREMAP_PROT .access = generic_access_phys, #endif }; int pci_mmap_resource_range(struct pci_dev *pdev, int bar, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { unsigned long size; int ret; size = ((pci_resource_len(pdev, bar) - 1) >> PAGE_SHIFT) + 1; if (vma->vm_pgoff + vma_pages(vma) > size) return -EINVAL; if (write_combine) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); else vma->vm_page_prot = pgprot_device(vma->vm_page_prot); if (mmap_state == pci_mmap_io) { ret = pci_iobar_pfn(pdev, bar, vma); if (ret) return ret; } else vma->vm_pgoff += (pci_resource_start(pdev, bar) >> PAGE_SHIFT); vma->vm_ops = &pci_phys_vm_ops; return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); } #endif
linux-master
drivers/pci/mmap.c
// SPDX-License-Identifier: GPL-2.0 /* * Data Object Exchange * PCIe r6.0, sec 6.30 DOE * * Copyright (C) 2021 Huawei * Jonathan Cameron <[email protected]> * * Copyright (C) 2022 Intel Corporation * Ira Weiny <[email protected]> */ #define dev_fmt(fmt) "DOE: " fmt #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/pci-doe.h> #include <linux/workqueue.h> #include "pci.h" #define PCI_DOE_PROTOCOL_DISCOVERY 0 /* Timeout of 1 second from 6.30.2 Operation, PCI Spec r6.0 */ #define PCI_DOE_TIMEOUT HZ #define PCI_DOE_POLL_INTERVAL (PCI_DOE_TIMEOUT / 128) #define PCI_DOE_FLAG_CANCEL 0 #define PCI_DOE_FLAG_DEAD 1 /* Max data object length is 2^18 dwords */ #define PCI_DOE_MAX_LENGTH (1 << 18) /** * struct pci_doe_mb - State for a single DOE mailbox * * This state is used to manage a single DOE mailbox capability. All fields * should be considered opaque to the consumers and the structure passed into * the helpers below after being created by pci_doe_create_mb(). * * @pdev: PCI device this mailbox belongs to * @cap_offset: Capability offset * @prots: Array of protocols supported (encoded as long values) * @wq: Wait queue for work item * @work_queue: Queue of pci_doe_work items * @flags: Bit array of PCI_DOE_FLAG_* flags */ struct pci_doe_mb { struct pci_dev *pdev; u16 cap_offset; struct xarray prots; wait_queue_head_t wq; struct workqueue_struct *work_queue; unsigned long flags; }; struct pci_doe_protocol { u16 vid; u8 type; }; /** * struct pci_doe_task - represents a single query/response * * @prot: DOE Protocol * @request_pl: The request payload * @request_pl_sz: Size of the request payload (bytes) * @response_pl: The response payload * @response_pl_sz: Size of the response payload (bytes) * @rv: Return value. Length of received response or error (bytes) * @complete: Called when task is complete * @private: Private data for the consumer * @work: Used internally by the mailbox * @doe_mb: Used internally by the mailbox */ struct pci_doe_task { struct pci_doe_protocol prot; const __le32 *request_pl; size_t request_pl_sz; __le32 *response_pl; size_t response_pl_sz; int rv; void (*complete)(struct pci_doe_task *task); void *private; /* initialized by pci_doe_submit_task() */ struct work_struct work; struct pci_doe_mb *doe_mb; }; static int pci_doe_wait(struct pci_doe_mb *doe_mb, unsigned long timeout) { if (wait_event_timeout(doe_mb->wq, test_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags), timeout)) return -EIO; return 0; } static void pci_doe_write_ctrl(struct pci_doe_mb *doe_mb, u32 val) { struct pci_dev *pdev = doe_mb->pdev; int offset = doe_mb->cap_offset; pci_write_config_dword(pdev, offset + PCI_DOE_CTRL, val); } static int pci_doe_abort(struct pci_doe_mb *doe_mb) { struct pci_dev *pdev = doe_mb->pdev; int offset = doe_mb->cap_offset; unsigned long timeout_jiffies; pci_dbg(pdev, "[%x] Issuing Abort\n", offset); timeout_jiffies = jiffies + PCI_DOE_TIMEOUT; pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_ABORT); do { int rc; u32 val; rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL); if (rc) return rc; pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); /* Abort success! */ if (!FIELD_GET(PCI_DOE_STATUS_ERROR, val) && !FIELD_GET(PCI_DOE_STATUS_BUSY, val)) return 0; } while (!time_after(jiffies, timeout_jiffies)); /* Abort has timed out and the MB is dead */ pci_err(pdev, "[%x] ABORT timed out\n", offset); return -EIO; } static int pci_doe_send_req(struct pci_doe_mb *doe_mb, struct pci_doe_task *task) { struct pci_dev *pdev = doe_mb->pdev; int offset = doe_mb->cap_offset; size_t length, remainder; u32 val; int i; /* * Check the DOE busy bit is not set. If it is set, this could indicate * someone other than Linux (e.g. firmware) is using the mailbox. Note * it is expected that firmware and OS will negotiate access rights via * an, as yet to be defined, method. */ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); if (FIELD_GET(PCI_DOE_STATUS_BUSY, val)) return -EBUSY; if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) return -EIO; /* Length is 2 DW of header + length of payload in DW */ length = 2 + DIV_ROUND_UP(task->request_pl_sz, sizeof(__le32)); if (length > PCI_DOE_MAX_LENGTH) return -EIO; if (length == PCI_DOE_MAX_LENGTH) length = 0; /* Write DOE Header */ val = FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_VID, task->prot.vid) | FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, task->prot.type); pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val); pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, FIELD_PREP(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, length)); /* Write payload */ for (i = 0; i < task->request_pl_sz / sizeof(__le32); i++) pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, le32_to_cpu(task->request_pl[i])); /* Write last payload dword */ remainder = task->request_pl_sz % sizeof(__le32); if (remainder) { val = 0; memcpy(&val, &task->request_pl[i], remainder); le32_to_cpus(&val); pci_write_config_dword(pdev, offset + PCI_DOE_WRITE, val); } pci_doe_write_ctrl(doe_mb, PCI_DOE_CTRL_GO); return 0; } static bool pci_doe_data_obj_ready(struct pci_doe_mb *doe_mb) { struct pci_dev *pdev = doe_mb->pdev; int offset = doe_mb->cap_offset; u32 val; pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); if (FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) return true; return false; } static int pci_doe_recv_resp(struct pci_doe_mb *doe_mb, struct pci_doe_task *task) { size_t length, payload_length, remainder, received; struct pci_dev *pdev = doe_mb->pdev; int offset = doe_mb->cap_offset; int i = 0; u32 val; /* Read the first dword to get the protocol */ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); if ((FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val) != task->prot.vid) || (FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val) != task->prot.type)) { dev_err_ratelimited(&pdev->dev, "[%x] expected [VID, Protocol] = [%04x, %02x], got [%04x, %02x]\n", doe_mb->cap_offset, task->prot.vid, task->prot.type, FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_VID, val), FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_1_TYPE, val)); return -EIO; } pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); /* Read the second dword to get the length */ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); length = FIELD_GET(PCI_DOE_DATA_OBJECT_HEADER_2_LENGTH, val); /* A value of 0x0 indicates max data object length */ if (!length) length = PCI_DOE_MAX_LENGTH; if (length < 2) return -EIO; /* First 2 dwords have already been read */ length -= 2; received = task->response_pl_sz; payload_length = DIV_ROUND_UP(task->response_pl_sz, sizeof(__le32)); remainder = task->response_pl_sz % sizeof(__le32); /* remainder signifies number of data bytes in last payload dword */ if (!remainder) remainder = sizeof(__le32); if (length < payload_length) { received = length * sizeof(__le32); payload_length = length; remainder = sizeof(__le32); } if (payload_length) { /* Read all payload dwords except the last */ for (; i < payload_length - 1; i++) { pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); task->response_pl[i] = cpu_to_le32(val); pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); } /* Read last payload dword */ pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); cpu_to_le32s(&val); memcpy(&task->response_pl[i], &val, remainder); /* Prior to the last ack, ensure Data Object Ready */ if (!pci_doe_data_obj_ready(doe_mb)) return -EIO; pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); i++; } /* Flush excess length */ for (; i < length; i++) { pci_read_config_dword(pdev, offset + PCI_DOE_READ, &val); pci_write_config_dword(pdev, offset + PCI_DOE_READ, 0); } /* Final error check to pick up on any since Data Object Ready */ pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) return -EIO; return received; } static void signal_task_complete(struct pci_doe_task *task, int rv) { task->rv = rv; destroy_work_on_stack(&task->work); task->complete(task); } static void signal_task_abort(struct pci_doe_task *task, int rv) { struct pci_doe_mb *doe_mb = task->doe_mb; struct pci_dev *pdev = doe_mb->pdev; if (pci_doe_abort(doe_mb)) { /* * If the device can't process an abort; set the mailbox dead * - no more submissions */ pci_err(pdev, "[%x] Abort failed marking mailbox dead\n", doe_mb->cap_offset); set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags); } signal_task_complete(task, rv); } static void doe_statemachine_work(struct work_struct *work) { struct pci_doe_task *task = container_of(work, struct pci_doe_task, work); struct pci_doe_mb *doe_mb = task->doe_mb; struct pci_dev *pdev = doe_mb->pdev; int offset = doe_mb->cap_offset; unsigned long timeout_jiffies; u32 val; int rc; if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) { signal_task_complete(task, -EIO); return; } /* Send request */ rc = pci_doe_send_req(doe_mb, task); if (rc) { /* * The specification does not provide any guidance on how to * resolve conflicting requests from other entities. * Furthermore, it is likely that busy will not be detected * most of the time. Flag any detection of status busy with an * error. */ if (rc == -EBUSY) dev_err_ratelimited(&pdev->dev, "[%x] busy detected; another entity is sending conflicting requests\n", offset); signal_task_abort(task, rc); return; } timeout_jiffies = jiffies + PCI_DOE_TIMEOUT; /* Poll for response */ retry_resp: pci_read_config_dword(pdev, offset + PCI_DOE_STATUS, &val); if (FIELD_GET(PCI_DOE_STATUS_ERROR, val)) { signal_task_abort(task, -EIO); return; } if (!FIELD_GET(PCI_DOE_STATUS_DATA_OBJECT_READY, val)) { if (time_after(jiffies, timeout_jiffies)) { signal_task_abort(task, -EIO); return; } rc = pci_doe_wait(doe_mb, PCI_DOE_POLL_INTERVAL); if (rc) { signal_task_abort(task, rc); return; } goto retry_resp; } rc = pci_doe_recv_resp(doe_mb, task); if (rc < 0) { signal_task_abort(task, rc); return; } signal_task_complete(task, rc); } static void pci_doe_task_complete(struct pci_doe_task *task) { complete(task->private); } static int pci_doe_discovery(struct pci_doe_mb *doe_mb, u8 *index, u16 *vid, u8 *protocol) { u32 request_pl = FIELD_PREP(PCI_DOE_DATA_OBJECT_DISC_REQ_3_INDEX, *index); __le32 request_pl_le = cpu_to_le32(request_pl); __le32 response_pl_le; u32 response_pl; int rc; rc = pci_doe(doe_mb, PCI_VENDOR_ID_PCI_SIG, PCI_DOE_PROTOCOL_DISCOVERY, &request_pl_le, sizeof(request_pl_le), &response_pl_le, sizeof(response_pl_le)); if (rc < 0) return rc; if (rc != sizeof(response_pl_le)) return -EIO; response_pl = le32_to_cpu(response_pl_le); *vid = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_VID, response_pl); *protocol = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_PROTOCOL, response_pl); *index = FIELD_GET(PCI_DOE_DATA_OBJECT_DISC_RSP_3_NEXT_INDEX, response_pl); return 0; } static void *pci_doe_xa_prot_entry(u16 vid, u8 prot) { return xa_mk_value((vid << 8) | prot); } static int pci_doe_cache_protocols(struct pci_doe_mb *doe_mb) { u8 index = 0; u8 xa_idx = 0; do { int rc; u16 vid; u8 prot; rc = pci_doe_discovery(doe_mb, &index, &vid, &prot); if (rc) return rc; pci_dbg(doe_mb->pdev, "[%x] Found protocol %d vid: %x prot: %x\n", doe_mb->cap_offset, xa_idx, vid, prot); rc = xa_insert(&doe_mb->prots, xa_idx++, pci_doe_xa_prot_entry(vid, prot), GFP_KERNEL); if (rc) return rc; } while (index); return 0; } static void pci_doe_cancel_tasks(struct pci_doe_mb *doe_mb) { /* Stop all pending work items from starting */ set_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags); /* Cancel an in progress work item, if necessary */ set_bit(PCI_DOE_FLAG_CANCEL, &doe_mb->flags); wake_up(&doe_mb->wq); } /** * pci_doe_create_mb() - Create a DOE mailbox object * * @pdev: PCI device to create the DOE mailbox for * @cap_offset: Offset of the DOE mailbox * * Create a single mailbox object to manage the mailbox protocol at the * cap_offset specified. * * RETURNS: created mailbox object on success * ERR_PTR(-errno) on failure */ static struct pci_doe_mb *pci_doe_create_mb(struct pci_dev *pdev, u16 cap_offset) { struct pci_doe_mb *doe_mb; int rc; doe_mb = kzalloc(sizeof(*doe_mb), GFP_KERNEL); if (!doe_mb) return ERR_PTR(-ENOMEM); doe_mb->pdev = pdev; doe_mb->cap_offset = cap_offset; init_waitqueue_head(&doe_mb->wq); xa_init(&doe_mb->prots); doe_mb->work_queue = alloc_ordered_workqueue("%s %s DOE [%x]", 0, dev_bus_name(&pdev->dev), pci_name(pdev), doe_mb->cap_offset); if (!doe_mb->work_queue) { pci_err(pdev, "[%x] failed to allocate work queue\n", doe_mb->cap_offset); rc = -ENOMEM; goto err_free; } /* Reset the mailbox by issuing an abort */ rc = pci_doe_abort(doe_mb); if (rc) { pci_err(pdev, "[%x] failed to reset mailbox with abort command : %d\n", doe_mb->cap_offset, rc); goto err_destroy_wq; } /* * The state machine and the mailbox should be in sync now; * Use the mailbox to query protocols. */ rc = pci_doe_cache_protocols(doe_mb); if (rc) { pci_err(pdev, "[%x] failed to cache protocols : %d\n", doe_mb->cap_offset, rc); goto err_cancel; } return doe_mb; err_cancel: pci_doe_cancel_tasks(doe_mb); xa_destroy(&doe_mb->prots); err_destroy_wq: destroy_workqueue(doe_mb->work_queue); err_free: kfree(doe_mb); return ERR_PTR(rc); } /** * pci_doe_destroy_mb() - Destroy a DOE mailbox object * * @doe_mb: DOE mailbox * * Destroy all internal data structures created for the DOE mailbox. */ static void pci_doe_destroy_mb(struct pci_doe_mb *doe_mb) { pci_doe_cancel_tasks(doe_mb); xa_destroy(&doe_mb->prots); destroy_workqueue(doe_mb->work_queue); kfree(doe_mb); } /** * pci_doe_supports_prot() - Return if the DOE instance supports the given * protocol * @doe_mb: DOE mailbox capability to query * @vid: Protocol Vendor ID * @type: Protocol type * * RETURNS: True if the DOE mailbox supports the protocol specified */ static bool pci_doe_supports_prot(struct pci_doe_mb *doe_mb, u16 vid, u8 type) { unsigned long index; void *entry; /* The discovery protocol must always be supported */ if (vid == PCI_VENDOR_ID_PCI_SIG && type == PCI_DOE_PROTOCOL_DISCOVERY) return true; xa_for_each(&doe_mb->prots, index, entry) if (entry == pci_doe_xa_prot_entry(vid, type)) return true; return false; } /** * pci_doe_submit_task() - Submit a task to be processed by the state machine * * @doe_mb: DOE mailbox capability to submit to * @task: task to be queued * * Submit a DOE task (request/response) to the DOE mailbox to be processed. * Returns upon queueing the task object. If the queue is full this function * will sleep until there is room in the queue. * * task->complete will be called when the state machine is done processing this * task. * * @task must be allocated on the stack. * * Excess data will be discarded. * * RETURNS: 0 when task has been successfully queued, -ERRNO on error */ static int pci_doe_submit_task(struct pci_doe_mb *doe_mb, struct pci_doe_task *task) { if (!pci_doe_supports_prot(doe_mb, task->prot.vid, task->prot.type)) return -EINVAL; if (test_bit(PCI_DOE_FLAG_DEAD, &doe_mb->flags)) return -EIO; task->doe_mb = doe_mb; INIT_WORK_ONSTACK(&task->work, doe_statemachine_work); queue_work(doe_mb->work_queue, &task->work); return 0; } /** * pci_doe() - Perform Data Object Exchange * * @doe_mb: DOE Mailbox * @vendor: Vendor ID * @type: Data Object Type * @request: Request payload * @request_sz: Size of request payload (bytes) * @response: Response payload * @response_sz: Size of response payload (bytes) * * Submit @request to @doe_mb and store the @response. * The DOE exchange is performed synchronously and may therefore sleep. * * Payloads are treated as opaque byte streams which are transmitted verbatim, * without byte-swapping. If payloads contain little-endian register values, * the caller is responsible for conversion with cpu_to_le32() / le32_to_cpu(). * * For convenience, arbitrary payload sizes are allowed even though PCIe r6.0 * sec 6.30.1 specifies the Data Object Header 2 "Length" in dwords. The last * (partial) dword is copied with byte granularity and padded with zeroes if * necessary. Callers are thus relieved of using dword-sized bounce buffers. * * RETURNS: Length of received response or negative errno. * Received data in excess of @response_sz is discarded. * The length may be smaller than @response_sz and the caller * is responsible for checking that. */ int pci_doe(struct pci_doe_mb *doe_mb, u16 vendor, u8 type, const void *request, size_t request_sz, void *response, size_t response_sz) { DECLARE_COMPLETION_ONSTACK(c); struct pci_doe_task task = { .prot.vid = vendor, .prot.type = type, .request_pl = request, .request_pl_sz = request_sz, .response_pl = response, .response_pl_sz = response_sz, .complete = pci_doe_task_complete, .private = &c, }; int rc; rc = pci_doe_submit_task(doe_mb, &task); if (rc) return rc; wait_for_completion(&c); return task.rv; } EXPORT_SYMBOL_GPL(pci_doe); /** * pci_find_doe_mailbox() - Find Data Object Exchange mailbox * * @pdev: PCI device * @vendor: Vendor ID * @type: Data Object Type * * Find first DOE mailbox of a PCI device which supports the given protocol. * * RETURNS: Pointer to the DOE mailbox or NULL if none was found. */ struct pci_doe_mb *pci_find_doe_mailbox(struct pci_dev *pdev, u16 vendor, u8 type) { struct pci_doe_mb *doe_mb; unsigned long index; xa_for_each(&pdev->doe_mbs, index, doe_mb) if (pci_doe_supports_prot(doe_mb, vendor, type)) return doe_mb; return NULL; } EXPORT_SYMBOL_GPL(pci_find_doe_mailbox); void pci_doe_init(struct pci_dev *pdev) { struct pci_doe_mb *doe_mb; u16 offset = 0; int rc; xa_init(&pdev->doe_mbs); while ((offset = pci_find_next_ext_capability(pdev, offset, PCI_EXT_CAP_ID_DOE))) { doe_mb = pci_doe_create_mb(pdev, offset); if (IS_ERR(doe_mb)) { pci_err(pdev, "[%x] failed to create mailbox: %ld\n", offset, PTR_ERR(doe_mb)); continue; } rc = xa_insert(&pdev->doe_mbs, offset, doe_mb, GFP_KERNEL); if (rc) { pci_err(pdev, "[%x] failed to insert mailbox: %d\n", offset, rc); pci_doe_destroy_mb(doe_mb); } } } void pci_doe_destroy(struct pci_dev *pdev) { struct pci_doe_mb *doe_mb; unsigned long index; xa_for_each(&pdev->doe_mbs, index, doe_mb) pci_doe_destroy_mb(doe_mb); xa_destroy(&pdev->doe_mbs); } void pci_doe_disconnected(struct pci_dev *pdev) { struct pci_doe_mb *doe_mb; unsigned long index; xa_for_each(&pdev->doe_mbs, index, doe_mb) pci_doe_cancel_tasks(doe_mb); }
linux-master
drivers/pci/doe.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI Bus Services, see include/linux/pci.h for further explanation. * * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, * David Mosberger-Tang * * Copyright 1997 -- 2000 Martin Mares <[email protected]> */ #include <linux/acpi.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/init.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/log2.h> #include <linux/logic_pio.h> #include <linux/pm_wakeup.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/pm_runtime.h> #include <linux/pci_hotplug.h> #include <linux/vmalloc.h> #include <asm/dma.h> #include <linux/aer.h> #include <linux/bitfield.h> #include "pci.h" DEFINE_MUTEX(pci_slot_mutex); const char *pci_power_names[] = { "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", }; EXPORT_SYMBOL_GPL(pci_power_names); #ifdef CONFIG_X86_32 int isa_dma_bridge_buggy; EXPORT_SYMBOL(isa_dma_bridge_buggy); #endif int pci_pci_problems; EXPORT_SYMBOL(pci_pci_problems); unsigned int pci_pm_d3hot_delay; static void pci_pme_list_scan(struct work_struct *work); static LIST_HEAD(pci_pme_list); static DEFINE_MUTEX(pci_pme_list_mutex); static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan); struct pci_pme_device { struct list_head list; struct pci_dev *dev; }; #define PME_TIMEOUT 1000 /* How long between PME checks */ /* * Following exit from Conventional Reset, devices must be ready within 1 sec * (PCIe r6.0 sec 6.6.1). A D3cold to D0 transition implies a Conventional * Reset (PCIe r6.0 sec 5.8). */ #define PCI_RESET_WAIT 1000 /* msec */ /* * Devices may extend the 1 sec period through Request Retry Status * completions (PCIe r6.0 sec 2.3.1). The spec does not provide an upper * limit, but 60 sec ought to be enough for any device to become * responsive. */ #define PCIE_RESET_READY_POLL_MS 60000 /* msec */ static void pci_dev_d3_sleep(struct pci_dev *dev) { unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay); unsigned int upper; if (delay_ms) { /* Use a 20% upper bound, 1ms minimum */ upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U); usleep_range(delay_ms * USEC_PER_MSEC, (delay_ms + upper) * USEC_PER_MSEC); } } bool pci_reset_supported(struct pci_dev *dev) { return dev->reset_methods[0] != 0; } #ifdef CONFIG_PCI_DOMAINS int pci_domains_supported = 1; #endif #define DEFAULT_CARDBUS_IO_SIZE (256) #define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) /* pci=cbmemsize=nnM,cbiosize=nn can override this */ unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; #define DEFAULT_HOTPLUG_IO_SIZE (256) #define DEFAULT_HOTPLUG_MMIO_SIZE (2*1024*1024) #define DEFAULT_HOTPLUG_MMIO_PREF_SIZE (2*1024*1024) /* hpiosize=nn can override this */ unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; /* * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size, * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size; * pci=hpmemsize=nnM overrides both */ unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE; unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE; #define DEFAULT_HOTPLUG_BUS_SIZE 1 unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE; /* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */ #ifdef CONFIG_PCIE_BUS_TUNE_OFF enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; #elif defined CONFIG_PCIE_BUS_SAFE enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; #elif defined CONFIG_PCIE_BUS_PERFORMANCE enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE; #elif defined CONFIG_PCIE_BUS_PEER2PEER enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER; #else enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT; #endif /* * The default CLS is used if arch didn't set CLS explicitly and not * all pci devices agree on the same value. Arch can override either * the dfl or actual value as it sees fit. Don't forget this is * measured in 32-bit words, not bytes. */ u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2; u8 pci_cache_line_size; /* * If we set up a device for bus mastering, we need to check the latency * timer as certain BIOSes forget to set it properly. */ unsigned int pcibios_max_latency = 255; /* If set, the PCIe ARI capability will not be used. */ static bool pcie_ari_disabled; /* If set, the PCIe ATS capability will not be used. */ static bool pcie_ats_disabled; /* If set, the PCI config space of each device is printed during boot. */ bool pci_early_dump; bool pci_ats_disabled(void) { return pcie_ats_disabled; } EXPORT_SYMBOL_GPL(pci_ats_disabled); /* Disable bridge_d3 for all PCIe ports */ static bool pci_bridge_d3_disable; /* Force bridge_d3 for all PCIe ports */ static bool pci_bridge_d3_force; static int __init pcie_port_pm_setup(char *str) { if (!strcmp(str, "off")) pci_bridge_d3_disable = true; else if (!strcmp(str, "force")) pci_bridge_d3_force = true; return 1; } __setup("pcie_port_pm=", pcie_port_pm_setup); /** * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children * @bus: pointer to PCI bus structure to search * * Given a PCI bus, returns the highest PCI bus number present in the set * including the given PCI bus and its list of child PCI buses. */ unsigned char pci_bus_max_busnr(struct pci_bus *bus) { struct pci_bus *tmp; unsigned char max, n; max = bus->busn_res.end; list_for_each_entry(tmp, &bus->children, node) { n = pci_bus_max_busnr(tmp); if (n > max) max = n; } return max; } EXPORT_SYMBOL_GPL(pci_bus_max_busnr); /** * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS * @pdev: the PCI device * * Returns error bits set in PCI_STATUS and clears them. */ int pci_status_get_and_clear_errors(struct pci_dev *pdev) { u16 status; int ret; ret = pci_read_config_word(pdev, PCI_STATUS, &status); if (ret != PCIBIOS_SUCCESSFUL) return -EIO; status &= PCI_STATUS_ERROR_BITS; if (status) pci_write_config_word(pdev, PCI_STATUS, status); return status; } EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors); #ifdef CONFIG_HAS_IOMEM static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar, bool write_combine) { struct resource *res = &pdev->resource[bar]; resource_size_t start = res->start; resource_size_t size = resource_size(res); /* * Make sure the BAR is actually a memory resource, not an IO resource */ if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) { pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res); return NULL; } if (write_combine) return ioremap_wc(start, size); return ioremap(start, size); } void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) { return __pci_ioremap_resource(pdev, bar, false); } EXPORT_SYMBOL_GPL(pci_ioremap_bar); void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar) { return __pci_ioremap_resource(pdev, bar, true); } EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar); #endif /** * pci_dev_str_match_path - test if a path string matches a device * @dev: the PCI device to test * @path: string to match the device against * @endptr: pointer to the string after the match * * Test if a string (typically from a kernel parameter) formatted as a * path of device/function addresses matches a PCI device. The string must * be of the form: * * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]* * * A path for a device can be obtained using 'lspci -t'. Using a path * is more robust against bus renumbering than using only a single bus, * device and function address. * * Returns 1 if the string matches the device, 0 if it does not and * a negative error code if it fails to parse the string. */ static int pci_dev_str_match_path(struct pci_dev *dev, const char *path, const char **endptr) { int ret; unsigned int seg, bus, slot, func; char *wpath, *p; char end; *endptr = strchrnul(path, ';'); wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC); if (!wpath) return -ENOMEM; while (1) { p = strrchr(wpath, '/'); if (!p) break; ret = sscanf(p, "/%x.%x%c", &slot, &func, &end); if (ret != 2) { ret = -EINVAL; goto free_and_exit; } if (dev->devfn != PCI_DEVFN(slot, func)) { ret = 0; goto free_and_exit; } /* * Note: we don't need to get a reference to the upstream * bridge because we hold a reference to the top level * device which should hold a reference to the bridge, * and so on. */ dev = pci_upstream_bridge(dev); if (!dev) { ret = 0; goto free_and_exit; } *p = 0; } ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot, &func, &end); if (ret != 4) { seg = 0; ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end); if (ret != 3) { ret = -EINVAL; goto free_and_exit; } } ret = (seg == pci_domain_nr(dev->bus) && bus == dev->bus->number && dev->devfn == PCI_DEVFN(slot, func)); free_and_exit: kfree(wpath); return ret; } /** * pci_dev_str_match - test if a string matches a device * @dev: the PCI device to test * @p: string to match the device against * @endptr: pointer to the string after the match * * Test if a string (typically from a kernel parameter) matches a specified * PCI device. The string may be of one of the following formats: * * [<domain>:]<bus>:<device>.<func>[/<device>.<func>]* * pci:<vendor>:<device>[:<subvendor>:<subdevice>] * * The first format specifies a PCI bus/device/function address which * may change if new hardware is inserted, if motherboard firmware changes, * or due to changes caused in kernel parameters. If the domain is * left unspecified, it is taken to be 0. In order to be robust against * bus renumbering issues, a path of PCI device/function numbers may be used * to address the specific device. The path for a device can be determined * through the use of 'lspci -t'. * * The second format matches devices using IDs in the configuration * space which may match multiple devices in the system. A value of 0 * for any field will match all devices. (Note: this differs from * in-kernel code that uses PCI_ANY_ID which is ~0; this is for * legacy reasons and convenience so users don't have to specify * FFFFFFFFs on the command line.) * * Returns 1 if the string matches the device, 0 if it does not and * a negative error code if the string cannot be parsed. */ static int pci_dev_str_match(struct pci_dev *dev, const char *p, const char **endptr) { int ret; int count; unsigned short vendor, device, subsystem_vendor, subsystem_device; if (strncmp(p, "pci:", 4) == 0) { /* PCI vendor/device (subvendor/subdevice) IDs are specified */ p += 4; ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device, &subsystem_vendor, &subsystem_device, &count); if (ret != 4) { ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count); if (ret != 2) return -EINVAL; subsystem_vendor = 0; subsystem_device = 0; } p += count; if ((!vendor || vendor == dev->vendor) && (!device || device == dev->device) && (!subsystem_vendor || subsystem_vendor == dev->subsystem_vendor) && (!subsystem_device || subsystem_device == dev->subsystem_device)) goto found; } else { /* * PCI Bus, Device, Function IDs are specified * (optionally, may include a path of devfns following it) */ ret = pci_dev_str_match_path(dev, p, &p); if (ret < 0) return ret; else if (ret) goto found; } *endptr = p; return 0; found: *endptr = p; return 1; } static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, u8 pos, int cap, int *ttl) { u8 id; u16 ent; pci_bus_read_config_byte(bus, devfn, pos, &pos); while ((*ttl)--) { if (pos < 0x40) break; pos &= ~3; pci_bus_read_config_word(bus, devfn, pos, &ent); id = ent & 0xff; if (id == 0xff) break; if (id == cap) return pos; pos = (ent >> 8); } return 0; } static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, u8 pos, int cap) { int ttl = PCI_FIND_CAP_TTL; return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); } u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) { return __pci_find_next_cap(dev->bus, dev->devfn, pos + PCI_CAP_LIST_NEXT, cap); } EXPORT_SYMBOL_GPL(pci_find_next_capability); static u8 __pci_bus_find_cap_start(struct pci_bus *bus, unsigned int devfn, u8 hdr_type) { u16 status; pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); if (!(status & PCI_STATUS_CAP_LIST)) return 0; switch (hdr_type) { case PCI_HEADER_TYPE_NORMAL: case PCI_HEADER_TYPE_BRIDGE: return PCI_CAPABILITY_LIST; case PCI_HEADER_TYPE_CARDBUS: return PCI_CB_CAPABILITY_LIST; } return 0; } /** * pci_find_capability - query for devices' capabilities * @dev: PCI device to query * @cap: capability code * * Tell if a device supports a given PCI capability. * Returns the address of the requested capability structure within the * device's PCI configuration space or 0 in case the device does not * support it. Possible values for @cap include: * * %PCI_CAP_ID_PM Power Management * %PCI_CAP_ID_AGP Accelerated Graphics Port * %PCI_CAP_ID_VPD Vital Product Data * %PCI_CAP_ID_SLOTID Slot Identification * %PCI_CAP_ID_MSI Message Signalled Interrupts * %PCI_CAP_ID_CHSWP CompactPCI HotSwap * %PCI_CAP_ID_PCIX PCI-X * %PCI_CAP_ID_EXP PCI Express */ u8 pci_find_capability(struct pci_dev *dev, int cap) { u8 pos; pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); if (pos) pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); return pos; } EXPORT_SYMBOL(pci_find_capability); /** * pci_bus_find_capability - query for devices' capabilities * @bus: the PCI bus to query * @devfn: PCI device to query * @cap: capability code * * Like pci_find_capability() but works for PCI devices that do not have a * pci_dev structure set up yet. * * Returns the address of the requested capability structure within the * device's PCI configuration space or 0 in case the device does not * support it. */ u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) { u8 hdr_type, pos; pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); if (pos) pos = __pci_find_next_cap(bus, devfn, pos, cap); return pos; } EXPORT_SYMBOL(pci_bus_find_capability); /** * pci_find_next_ext_capability - Find an extended capability * @dev: PCI device to query * @start: address at which to start looking (0 to start at beginning of list) * @cap: capability code * * Returns the address of the next matching extended capability structure * within the device's PCI configuration space or 0 if the device does * not support it. Some capabilities can occur several times, e.g., the * vendor-specific capability, and this provides a way to find them all. */ u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap) { u32 header; int ttl; u16 pos = PCI_CFG_SPACE_SIZE; /* minimum 8 bytes per capability */ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) return 0; if (start) pos = start; if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) return 0; /* * If we have no capabilities, this is indicated by cap ID, * cap version and next pointer all being 0. */ if (header == 0) return 0; while (ttl-- > 0) { if (PCI_EXT_CAP_ID(header) == cap && pos != start) return pos; pos = PCI_EXT_CAP_NEXT(header); if (pos < PCI_CFG_SPACE_SIZE) break; if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) break; } return 0; } EXPORT_SYMBOL_GPL(pci_find_next_ext_capability); /** * pci_find_ext_capability - Find an extended capability * @dev: PCI device to query * @cap: capability code * * Returns the address of the requested extended capability structure * within the device's PCI configuration space or 0 if the device does * not support it. Possible values for @cap include: * * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting * %PCI_EXT_CAP_ID_VC Virtual Channel * %PCI_EXT_CAP_ID_DSN Device Serial Number * %PCI_EXT_CAP_ID_PWR Power Budgeting */ u16 pci_find_ext_capability(struct pci_dev *dev, int cap) { return pci_find_next_ext_capability(dev, 0, cap); } EXPORT_SYMBOL_GPL(pci_find_ext_capability); /** * pci_get_dsn - Read and return the 8-byte Device Serial Number * @dev: PCI device to query * * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial * Number. * * Returns the DSN, or zero if the capability does not exist. */ u64 pci_get_dsn(struct pci_dev *dev) { u32 dword; u64 dsn; int pos; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN); if (!pos) return 0; /* * The Device Serial Number is two dwords offset 4 bytes from the * capability position. The specification says that the first dword is * the lower half, and the second dword is the upper half. */ pos += 4; pci_read_config_dword(dev, pos, &dword); dsn = (u64)dword; pci_read_config_dword(dev, pos + 4, &dword); dsn |= ((u64)dword) << 32; return dsn; } EXPORT_SYMBOL_GPL(pci_get_dsn); static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap) { int rc, ttl = PCI_FIND_CAP_TTL; u8 cap, mask; if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) mask = HT_3BIT_CAP_MASK; else mask = HT_5BIT_CAP_MASK; pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, PCI_CAP_ID_HT, &ttl); while (pos) { rc = pci_read_config_byte(dev, pos + 3, &cap); if (rc != PCIBIOS_SUCCESSFUL) return 0; if ((cap & mask) == ht_cap) return pos; pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos + PCI_CAP_LIST_NEXT, PCI_CAP_ID_HT, &ttl); } return 0; } /** * pci_find_next_ht_capability - query a device's HyperTransport capabilities * @dev: PCI device to query * @pos: Position from which to continue searching * @ht_cap: HyperTransport capability code * * To be used in conjunction with pci_find_ht_capability() to search for * all capabilities matching @ht_cap. @pos should always be a value returned * from pci_find_ht_capability(). * * NB. To be 100% safe against broken PCI devices, the caller should take * steps to avoid an infinite loop. */ u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap) { return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); } EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); /** * pci_find_ht_capability - query a device's HyperTransport capabilities * @dev: PCI device to query * @ht_cap: HyperTransport capability code * * Tell if a device supports a given HyperTransport capability. * Returns an address within the device's PCI configuration space * or 0 in case the device does not support the request capability. * The address points to the PCI capability, of type PCI_CAP_ID_HT, * which has a HyperTransport capability matching @ht_cap. */ u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap) { u8 pos; pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); if (pos) pos = __pci_find_next_ht_cap(dev, pos, ht_cap); return pos; } EXPORT_SYMBOL_GPL(pci_find_ht_capability); /** * pci_find_vsec_capability - Find a vendor-specific extended capability * @dev: PCI device to query * @vendor: Vendor ID for which capability is defined * @cap: Vendor-specific capability ID * * If @dev has Vendor ID @vendor, search for a VSEC capability with * VSEC ID @cap. If found, return the capability offset in * config space; otherwise return 0. */ u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap) { u16 vsec = 0; u32 header; if (vendor != dev->vendor) return 0; while ((vsec = pci_find_next_ext_capability(dev, vsec, PCI_EXT_CAP_ID_VNDR))) { if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header) == PCIBIOS_SUCCESSFUL && PCI_VNDR_HEADER_ID(header) == cap) return vsec; } return 0; } EXPORT_SYMBOL_GPL(pci_find_vsec_capability); /** * pci_find_dvsec_capability - Find DVSEC for vendor * @dev: PCI device to query * @vendor: Vendor ID to match for the DVSEC * @dvsec: Designated Vendor-specific capability ID * * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability * offset in config space; otherwise return 0. */ u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec) { int pos; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC); if (!pos) return 0; while (pos) { u16 v, id; pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v); pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id); if (vendor == v && dvsec == id) return pos; pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC); } return 0; } EXPORT_SYMBOL_GPL(pci_find_dvsec_capability); /** * pci_find_parent_resource - return resource region of parent bus of given * region * @dev: PCI device structure contains resources to be searched * @res: child resource record for which parent is sought * * For given resource region of given device, return the resource region of * parent bus the given region is contained in. */ struct resource *pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) { const struct pci_bus *bus = dev->bus; struct resource *r; pci_bus_for_each_resource(bus, r) { if (!r) continue; if (resource_contains(r, res)) { /* * If the window is prefetchable but the BAR is * not, the allocator made a mistake. */ if (r->flags & IORESOURCE_PREFETCH && !(res->flags & IORESOURCE_PREFETCH)) return NULL; /* * If we're below a transparent bridge, there may * be both a positively-decoded aperture and a * subtractively-decoded region that contain the BAR. * We want the positively-decoded one, so this depends * on pci_bus_for_each_resource() giving us those * first. */ return r; } } return NULL; } EXPORT_SYMBOL(pci_find_parent_resource); /** * pci_find_resource - Return matching PCI device resource * @dev: PCI device to query * @res: Resource to look for * * Goes over standard PCI resources (BARs) and checks if the given resource * is partially or fully contained in any of them. In that case the * matching resource is returned, %NULL otherwise. */ struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res) { int i; for (i = 0; i < PCI_STD_NUM_BARS; i++) { struct resource *r = &dev->resource[i]; if (r->start && resource_contains(r, res)) return r; } return NULL; } EXPORT_SYMBOL(pci_find_resource); /** * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos * @dev: the PCI device to operate on * @pos: config space offset of status word * @mask: mask of bit(s) to care about in status word * * Return 1 when mask bit(s) in status word clear, 0 otherwise. */ int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask) { int i; /* Wait for Transaction Pending bit clean */ for (i = 0; i < 4; i++) { u16 status; if (i) msleep((1 << (i - 1)) * 100); pci_read_config_word(dev, pos, &status); if (!(status & mask)) return 1; } return 0; } static int pci_acs_enable; /** * pci_request_acs - ask for ACS to be enabled if supported */ void pci_request_acs(void) { pci_acs_enable = 1; } static const char *disable_acs_redir_param; /** * pci_disable_acs_redir - disable ACS redirect capabilities * @dev: the PCI device * * For only devices specified in the disable_acs_redir parameter. */ static void pci_disable_acs_redir(struct pci_dev *dev) { int ret = 0; const char *p; int pos; u16 ctrl; if (!disable_acs_redir_param) return; p = disable_acs_redir_param; while (*p) { ret = pci_dev_str_match(dev, p, &p); if (ret < 0) { pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n", disable_acs_redir_param); break; } else if (ret == 1) { /* Found a match */ break; } if (*p != ';' && *p != ',') { /* End of param or invalid format */ break; } p++; } if (ret != 1) return; if (!pci_dev_specific_disable_acs_redir(dev)) return; pos = dev->acs_cap; if (!pos) { pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n"); return; } pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); /* P2P Request & Completion Redirect */ ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC); pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); pci_info(dev, "disabled ACS redirect\n"); } /** * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities * @dev: the PCI device */ static void pci_std_enable_acs(struct pci_dev *dev) { int pos; u16 cap; u16 ctrl; pos = dev->acs_cap; if (!pos) return; pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); /* Source Validation */ ctrl |= (cap & PCI_ACS_SV); /* P2P Request Redirect */ ctrl |= (cap & PCI_ACS_RR); /* P2P Completion Redirect */ ctrl |= (cap & PCI_ACS_CR); /* Upstream Forwarding */ ctrl |= (cap & PCI_ACS_UF); /* Enable Translation Blocking for external devices and noats */ if (pci_ats_disabled() || dev->external_facing || dev->untrusted) ctrl |= (cap & PCI_ACS_TB); pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); } /** * pci_enable_acs - enable ACS if hardware support it * @dev: the PCI device */ static void pci_enable_acs(struct pci_dev *dev) { if (!pci_acs_enable) goto disable_acs_redir; if (!pci_dev_specific_enable_acs(dev)) goto disable_acs_redir; pci_std_enable_acs(dev); disable_acs_redir: /* * Note: pci_disable_acs_redir() must be called even if ACS was not * enabled by the kernel because it may have been enabled by * platform firmware. So if we are told to disable it, we should * always disable it after setting the kernel's default * preferences. */ pci_disable_acs_redir(dev); } /** * pci_restore_bars - restore a device's BAR values (e.g. after wake-up) * @dev: PCI device to have its BARs restored * * Restore the BAR values for a given device, so as to make it * accessible by its driver. */ static void pci_restore_bars(struct pci_dev *dev) { int i; for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) pci_update_resource(dev, i); } static inline bool platform_pci_power_manageable(struct pci_dev *dev) { if (pci_use_mid_pm()) return true; return acpi_pci_power_manageable(dev); } static inline int platform_pci_set_power_state(struct pci_dev *dev, pci_power_t t) { if (pci_use_mid_pm()) return mid_pci_set_power_state(dev, t); return acpi_pci_set_power_state(dev, t); } static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev) { if (pci_use_mid_pm()) return mid_pci_get_power_state(dev); return acpi_pci_get_power_state(dev); } static inline void platform_pci_refresh_power_state(struct pci_dev *dev) { if (!pci_use_mid_pm()) acpi_pci_refresh_power_state(dev); } static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) { if (pci_use_mid_pm()) return PCI_POWER_ERROR; return acpi_pci_choose_state(dev); } static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable) { if (pci_use_mid_pm()) return PCI_POWER_ERROR; return acpi_pci_wakeup(dev, enable); } static inline bool platform_pci_need_resume(struct pci_dev *dev) { if (pci_use_mid_pm()) return false; return acpi_pci_need_resume(dev); } static inline bool platform_pci_bridge_d3(struct pci_dev *dev) { if (pci_use_mid_pm()) return false; return acpi_pci_bridge_d3(dev); } /** * pci_update_current_state - Read power state of given device and cache it * @dev: PCI device to handle. * @state: State to cache in case the device doesn't have the PM capability * * The power state is read from the PMCSR register, which however is * inaccessible in D3cold. The platform firmware is therefore queried first * to detect accessibility of the register. In case the platform firmware * reports an incorrect state or the device isn't power manageable by the * platform at all, we try to detect D3cold by testing accessibility of the * vendor ID in config space. */ void pci_update_current_state(struct pci_dev *dev, pci_power_t state) { if (platform_pci_get_power_state(dev) == PCI_D3cold) { dev->current_state = PCI_D3cold; } else if (dev->pm_cap) { u16 pmcsr; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); if (PCI_POSSIBLE_ERROR(pmcsr)) { dev->current_state = PCI_D3cold; return; } dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; } else { dev->current_state = state; } } /** * pci_refresh_power_state - Refresh the given device's power state data * @dev: Target PCI device. * * Ask the platform to refresh the devices power state information and invoke * pci_update_current_state() to update its current PCI power state. */ void pci_refresh_power_state(struct pci_dev *dev) { platform_pci_refresh_power_state(dev); pci_update_current_state(dev, dev->current_state); } /** * pci_platform_power_transition - Use platform to change device power state * @dev: PCI device to handle. * @state: State to put the device into. */ int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) { int error; error = platform_pci_set_power_state(dev, state); if (!error) pci_update_current_state(dev, state); else if (!dev->pm_cap) /* Fall back to PCI_D0 */ dev->current_state = PCI_D0; return error; } EXPORT_SYMBOL_GPL(pci_platform_power_transition); static int pci_resume_one(struct pci_dev *pci_dev, void *ign) { pm_request_resume(&pci_dev->dev); return 0; } /** * pci_resume_bus - Walk given bus and runtime resume devices on it * @bus: Top bus of the subtree to walk. */ void pci_resume_bus(struct pci_bus *bus) { if (bus) pci_walk_bus(bus, pci_resume_one, NULL); } static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) { int delay = 1; bool retrain = false; struct pci_dev *bridge; if (pci_is_pcie(dev)) { bridge = pci_upstream_bridge(dev); if (bridge) retrain = true; } /* * After reset, the device should not silently discard config * requests, but it may still indicate that it needs more time by * responding to them with CRS completions. The Root Port will * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete * the read (except when CRS SV is enabled and the read was for the * Vendor ID; in that case it synthesizes 0x0001 data). * * Wait for the device to return a non-CRS completion. Read the * Command register instead of Vendor ID so we don't have to * contend with the CRS SV value. */ for (;;) { u32 id; pci_read_config_dword(dev, PCI_COMMAND, &id); if (!PCI_POSSIBLE_ERROR(id)) break; if (delay > timeout) { pci_warn(dev, "not ready %dms after %s; giving up\n", delay - 1, reset_type); return -ENOTTY; } if (delay > PCI_RESET_WAIT) { if (retrain) { retrain = false; if (pcie_failed_link_retrain(bridge)) { delay = 1; continue; } } pci_info(dev, "not ready %dms after %s; waiting\n", delay - 1, reset_type); } msleep(delay); delay *= 2; } if (delay > PCI_RESET_WAIT) pci_info(dev, "ready %dms after %s\n", delay - 1, reset_type); return 0; } /** * pci_power_up - Put the given device into D0 * @dev: PCI device to power up * * On success, return 0 or 1, depending on whether or not it is necessary to * restore the device's BARs subsequently (1 is returned in that case). * * On failure, return a negative error code. Always return failure if @dev * lacks a Power Management Capability, even if the platform was able to * put the device in D0 via non-PCI means. */ int pci_power_up(struct pci_dev *dev) { bool need_restore; pci_power_t state; u16 pmcsr; platform_pci_set_power_state(dev, PCI_D0); if (!dev->pm_cap) { state = platform_pci_get_power_state(dev); if (state == PCI_UNKNOWN) dev->current_state = PCI_D0; else dev->current_state = state; return -EIO; } pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); if (PCI_POSSIBLE_ERROR(pmcsr)) { pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n", pci_power_name(dev->current_state)); dev->current_state = PCI_D3cold; return -EIO; } state = pmcsr & PCI_PM_CTRL_STATE_MASK; need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET); if (state == PCI_D0) goto end; /* * Force the entire word to 0. This doesn't affect PME_Status, disables * PME_En, and sets PowerState to 0. */ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0); /* Mandatory transition delays; see PCI PM 1.2. */ if (state == PCI_D3hot) pci_dev_d3_sleep(dev); else if (state == PCI_D2) udelay(PCI_PM_D2_DELAY); end: dev->current_state = PCI_D0; if (need_restore) return 1; return 0; } /** * pci_set_full_power_state - Put a PCI device into D0 and update its state * @dev: PCI device to power up * * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register * to confirm the state change, restore its BARs if they might be lost and * reconfigure ASPM in accordance with the new power state. * * If pci_restore_state() is going to be called right after a power state change * to D0, it is more efficient to use pci_power_up() directly instead of this * function. */ static int pci_set_full_power_state(struct pci_dev *dev) { u16 pmcsr; int ret; ret = pci_power_up(dev); if (ret < 0) { if (dev->current_state == PCI_D0) return 0; return ret; } pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; if (dev->current_state != PCI_D0) { pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n", pci_power_name(dev->current_state)); } else if (ret > 0) { /* * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning * from D3hot to D0 _may_ perform an internal reset, thereby * going to "D0 Uninitialized" rather than "D0 Initialized". * For example, at least some versions of the 3c905B and the * 3c556B exhibit this behaviour. * * At least some laptop BIOSen (e.g. the Thinkpad T21) leave * devices in a D3hot state at boot. Consequently, we need to * restore at least the BARs so that the device will be * accessible to its driver. */ pci_restore_bars(dev); } return 0; } /** * __pci_dev_set_current_state - Set current state of a PCI device * @dev: Device to handle * @data: pointer to state to be set */ static int __pci_dev_set_current_state(struct pci_dev *dev, void *data) { pci_power_t state = *(pci_power_t *)data; dev->current_state = state; return 0; } /** * pci_bus_set_current_state - Walk given bus and set current state of devices * @bus: Top bus of the subtree to walk. * @state: state to be set */ void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state) { if (bus) pci_walk_bus(bus, __pci_dev_set_current_state, &state); } /** * pci_set_low_power_state - Put a PCI device into a low-power state. * @dev: PCI device to handle. * @state: PCI power state (D1, D2, D3hot) to put the device into. * * Use the device's PCI_PM_CTRL register to put it into a low-power state. * * RETURN VALUE: * -EINVAL if the requested state is invalid. * -EIO if device does not support PCI PM or its PM capabilities register has a * wrong version, or device doesn't support the requested state. * 0 if device already is in the requested state. * 0 if device's power state has been successfully changed. */ static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state) { u16 pmcsr; if (!dev->pm_cap) return -EIO; /* * Validate transition: We can enter D0 from any state, but if * we're already in a low-power state, we can only go deeper. E.g., * we can go from D1 to D3, but we can't go directly from D3 to D1; * we'd have to go from D3 to D0, then to D1. */ if (dev->current_state <= PCI_D3cold && dev->current_state > state) { pci_dbg(dev, "Invalid power transition (from %s to %s)\n", pci_power_name(dev->current_state), pci_power_name(state)); return -EINVAL; } /* Check if this device supports the desired state */ if ((state == PCI_D1 && !dev->d1_support) || (state == PCI_D2 && !dev->d2_support)) return -EIO; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); if (PCI_POSSIBLE_ERROR(pmcsr)) { pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n", pci_power_name(dev->current_state), pci_power_name(state)); dev->current_state = PCI_D3cold; return -EIO; } pmcsr &= ~PCI_PM_CTRL_STATE_MASK; pmcsr |= state; /* Enter specified state */ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); /* Mandatory power management transition delays; see PCI PM 1.2. */ if (state == PCI_D3hot) pci_dev_d3_sleep(dev); else if (state == PCI_D2) udelay(PCI_PM_D2_DELAY); pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK; if (dev->current_state != state) pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n", pci_power_name(dev->current_state), pci_power_name(state)); return 0; } /** * pci_set_power_state - Set the power state of a PCI device * @dev: PCI device to handle. * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. * * Transition a device to a new power state, using the platform firmware and/or * the device's PCI PM registers. * * RETURN VALUE: * -EINVAL if the requested state is invalid. * -EIO if device does not support PCI PM or its PM capabilities register has a * wrong version, or device doesn't support the requested state. * 0 if the transition is to D1 or D2 but D1 and D2 are not supported. * 0 if device already is in the requested state. * 0 if the transition is to D3 but D3 is not supported. * 0 if device's power state has been successfully changed. */ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) { int error; /* Bound the state we're entering */ if (state > PCI_D3cold) state = PCI_D3cold; else if (state < PCI_D0) state = PCI_D0; else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) /* * If the device or the parent bridge do not support PCI * PM, ignore the request if we're doing anything other * than putting it into D0 (which would only happen on * boot). */ return 0; /* Check if we're already there */ if (dev->current_state == state) return 0; if (state == PCI_D0) return pci_set_full_power_state(dev); /* * This device is quirked not to be put into D3, so don't put it in * D3 */ if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) return 0; if (state == PCI_D3cold) { /* * To put the device in D3cold, put it into D3hot in the native * way, then put it into D3cold using platform ops. */ error = pci_set_low_power_state(dev, PCI_D3hot); if (pci_platform_power_transition(dev, PCI_D3cold)) return error; /* Powering off a bridge may power off the whole hierarchy */ if (dev->current_state == PCI_D3cold) pci_bus_set_current_state(dev->subordinate, PCI_D3cold); } else { error = pci_set_low_power_state(dev, state); if (pci_platform_power_transition(dev, state)) return error; } return 0; } EXPORT_SYMBOL(pci_set_power_state); #define PCI_EXP_SAVE_REGS 7 static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev, u16 cap, bool extended) { struct pci_cap_saved_state *tmp; hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) { if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap) return tmp; } return NULL; } struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap) { return _pci_find_saved_cap(dev, cap, false); } struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap) { return _pci_find_saved_cap(dev, cap, true); } static int pci_save_pcie_state(struct pci_dev *dev) { int i = 0; struct pci_cap_saved_state *save_state; u16 *cap; if (!pci_is_pcie(dev)) return 0; save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); if (!save_state) { pci_err(dev, "buffer not found in %s\n", __func__); return -ENOMEM; } cap = (u16 *)&save_state->cap.data[0]; pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]); pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]); return 0; } void pci_bridge_reconfigure_ltr(struct pci_dev *dev) { #ifdef CONFIG_PCIEASPM struct pci_dev *bridge; u32 ctl; bridge = pci_upstream_bridge(dev); if (bridge && bridge->ltr_path) { pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl); if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) { pci_dbg(bridge, "re-enabling LTR\n"); pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_LTR_EN); } } #endif } static void pci_restore_pcie_state(struct pci_dev *dev) { int i = 0; struct pci_cap_saved_state *save_state; u16 *cap; save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); if (!save_state) return; /* * Downstream ports reset the LTR enable bit when link goes down. * Check and re-configure the bit here before restoring device. * PCIe r5.0, sec 7.5.3.16. */ pci_bridge_reconfigure_ltr(dev); cap = (u16 *)&save_state->cap.data[0]; pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]); pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]); } static int pci_save_pcix_state(struct pci_dev *dev) { int pos; struct pci_cap_saved_state *save_state; pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!pos) return 0; save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); if (!save_state) { pci_err(dev, "buffer not found in %s\n", __func__); return -ENOMEM; } pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->cap.data); return 0; } static void pci_restore_pcix_state(struct pci_dev *dev) { int i = 0, pos; struct pci_cap_saved_state *save_state; u16 *cap; save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!save_state || !pos) return; cap = (u16 *)&save_state->cap.data[0]; pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); } static void pci_save_ltr_state(struct pci_dev *dev) { int ltr; struct pci_cap_saved_state *save_state; u32 *cap; if (!pci_is_pcie(dev)) return; ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); if (!ltr) return; save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); if (!save_state) { pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n"); return; } /* Some broken devices only support dword access to LTR */ cap = &save_state->cap.data[0]; pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap); } static void pci_restore_ltr_state(struct pci_dev *dev) { struct pci_cap_saved_state *save_state; int ltr; u32 *cap; save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR); ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR); if (!save_state || !ltr) return; /* Some broken devices only support dword access to LTR */ cap = &save_state->cap.data[0]; pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap); } /** * pci_save_state - save the PCI configuration space of a device before * suspending * @dev: PCI device that we're dealing with */ int pci_save_state(struct pci_dev *dev) { int i; /* XXX: 100% dword access ok here? */ for (i = 0; i < 16; i++) { pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); pci_dbg(dev, "save config %#04x: %#010x\n", i * 4, dev->saved_config_space[i]); } dev->state_saved = true; i = pci_save_pcie_state(dev); if (i != 0) return i; i = pci_save_pcix_state(dev); if (i != 0) return i; pci_save_ltr_state(dev); pci_save_dpc_state(dev); pci_save_aer_state(dev); pci_save_ptm_state(dev); return pci_save_vc_state(dev); } EXPORT_SYMBOL(pci_save_state); static void pci_restore_config_dword(struct pci_dev *pdev, int offset, u32 saved_val, int retry, bool force) { u32 val; pci_read_config_dword(pdev, offset, &val); if (!force && val == saved_val) return; for (;;) { pci_dbg(pdev, "restore config %#04x: %#010x -> %#010x\n", offset, val, saved_val); pci_write_config_dword(pdev, offset, saved_val); if (retry-- <= 0) return; pci_read_config_dword(pdev, offset, &val); if (val == saved_val) return; mdelay(1); } } static void pci_restore_config_space_range(struct pci_dev *pdev, int start, int end, int retry, bool force) { int index; for (index = end; index >= start; index--) pci_restore_config_dword(pdev, 4 * index, pdev->saved_config_space[index], retry, force); } static void pci_restore_config_space(struct pci_dev *pdev) { if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) { pci_restore_config_space_range(pdev, 10, 15, 0, false); /* Restore BARs before the command register. */ pci_restore_config_space_range(pdev, 4, 9, 10, false); pci_restore_config_space_range(pdev, 0, 3, 0, false); } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { pci_restore_config_space_range(pdev, 12, 15, 0, false); /* * Force rewriting of prefetch registers to avoid S3 resume * issues on Intel PCI bridges that occur when these * registers are not explicitly written. */ pci_restore_config_space_range(pdev, 9, 11, 0, true); pci_restore_config_space_range(pdev, 0, 8, 0, false); } else { pci_restore_config_space_range(pdev, 0, 15, 0, false); } } static void pci_restore_rebar_state(struct pci_dev *pdev) { unsigned int pos, nbars, i; u32 ctrl; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR); if (!pos) return; pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; for (i = 0; i < nbars; i++, pos += 8) { struct resource *res; int bar_idx, size; pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX; res = pdev->resource + bar_idx; size = pci_rebar_bytes_to_size(resource_size(res)); ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE; ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT; pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl); } } /** * pci_restore_state - Restore the saved state of a PCI device * @dev: PCI device that we're dealing with */ void pci_restore_state(struct pci_dev *dev) { if (!dev->state_saved) return; /* * Restore max latencies (in the LTR capability) before enabling * LTR itself (in the PCIe capability). */ pci_restore_ltr_state(dev); pci_restore_pcie_state(dev); pci_restore_pasid_state(dev); pci_restore_pri_state(dev); pci_restore_ats_state(dev); pci_restore_vc_state(dev); pci_restore_rebar_state(dev); pci_restore_dpc_state(dev); pci_restore_ptm_state(dev); pci_aer_clear_status(dev); pci_restore_aer_state(dev); pci_restore_config_space(dev); pci_restore_pcix_state(dev); pci_restore_msi_state(dev); /* Restore ACS and IOV configuration state */ pci_enable_acs(dev); pci_restore_iov_state(dev); dev->state_saved = false; } EXPORT_SYMBOL(pci_restore_state); struct pci_saved_state { u32 config_space[16]; struct pci_cap_saved_data cap[]; }; /** * pci_store_saved_state - Allocate and return an opaque struct containing * the device saved state. * @dev: PCI device that we're dealing with * * Return NULL if no state or error. */ struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev) { struct pci_saved_state *state; struct pci_cap_saved_state *tmp; struct pci_cap_saved_data *cap; size_t size; if (!dev->state_saved) return NULL; size = sizeof(*state) + sizeof(struct pci_cap_saved_data); hlist_for_each_entry(tmp, &dev->saved_cap_space, next) size += sizeof(struct pci_cap_saved_data) + tmp->cap.size; state = kzalloc(size, GFP_KERNEL); if (!state) return NULL; memcpy(state->config_space, dev->saved_config_space, sizeof(state->config_space)); cap = state->cap; hlist_for_each_entry(tmp, &dev->saved_cap_space, next) { size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size; memcpy(cap, &tmp->cap, len); cap = (struct pci_cap_saved_data *)((u8 *)cap + len); } /* Empty cap_save terminates list */ return state; } EXPORT_SYMBOL_GPL(pci_store_saved_state); /** * pci_load_saved_state - Reload the provided save state into struct pci_dev. * @dev: PCI device that we're dealing with * @state: Saved state returned from pci_store_saved_state() */ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state) { struct pci_cap_saved_data *cap; dev->state_saved = false; if (!state) return 0; memcpy(dev->saved_config_space, state->config_space, sizeof(state->config_space)); cap = state->cap; while (cap->size) { struct pci_cap_saved_state *tmp; tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended); if (!tmp || tmp->cap.size != cap->size) return -EINVAL; memcpy(tmp->cap.data, cap->data, tmp->cap.size); cap = (struct pci_cap_saved_data *)((u8 *)cap + sizeof(struct pci_cap_saved_data) + cap->size); } dev->state_saved = true; return 0; } EXPORT_SYMBOL_GPL(pci_load_saved_state); /** * pci_load_and_free_saved_state - Reload the save state pointed to by state, * and free the memory allocated for it. * @dev: PCI device that we're dealing with * @state: Pointer to saved state returned from pci_store_saved_state() */ int pci_load_and_free_saved_state(struct pci_dev *dev, struct pci_saved_state **state) { int ret = pci_load_saved_state(dev, *state); kfree(*state); *state = NULL; return ret; } EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state); int __weak pcibios_enable_device(struct pci_dev *dev, int bars) { return pci_enable_resources(dev, bars); } static int do_pci_enable_device(struct pci_dev *dev, int bars) { int err; struct pci_dev *bridge; u16 cmd; u8 pin; err = pci_set_power_state(dev, PCI_D0); if (err < 0 && err != -EIO) return err; bridge = pci_upstream_bridge(dev); if (bridge) pcie_aspm_powersave_config_link(bridge); err = pcibios_enable_device(dev, bars); if (err < 0) return err; pci_fixup_device(pci_fixup_enable, dev); if (dev->msi_enabled || dev->msix_enabled) return 0; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (pin) { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & PCI_COMMAND_INTX_DISABLE) pci_write_config_word(dev, PCI_COMMAND, cmd & ~PCI_COMMAND_INTX_DISABLE); } return 0; } /** * pci_reenable_device - Resume abandoned device * @dev: PCI device to be resumed * * NOTE: This function is a backend of pci_default_resume() and is not supposed * to be called by normal code, write proper resume handler and use it instead. */ int pci_reenable_device(struct pci_dev *dev) { if (pci_is_enabled(dev)) return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); return 0; } EXPORT_SYMBOL(pci_reenable_device); static void pci_enable_bridge(struct pci_dev *dev) { struct pci_dev *bridge; int retval; bridge = pci_upstream_bridge(dev); if (bridge) pci_enable_bridge(bridge); if (pci_is_enabled(dev)) { if (!dev->is_busmaster) pci_set_master(dev); return; } retval = pci_enable_device(dev); if (retval) pci_err(dev, "Error enabling bridge (%d), continuing\n", retval); pci_set_master(dev); } static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) { struct pci_dev *bridge; int err; int i, bars = 0; /* * Power state could be unknown at this point, either due to a fresh * boot or a device removal call. So get the current power state * so that things like MSI message writing will behave as expected * (e.g. if the device really is in D0 at enable time). */ pci_update_current_state(dev, dev->current_state); if (atomic_inc_return(&dev->enable_cnt) > 1) return 0; /* already enabled */ bridge = pci_upstream_bridge(dev); if (bridge) pci_enable_bridge(bridge); /* only skip sriov related */ for (i = 0; i <= PCI_ROM_RESOURCE; i++) if (dev->resource[i].flags & flags) bars |= (1 << i); for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++) if (dev->resource[i].flags & flags) bars |= (1 << i); err = do_pci_enable_device(dev, bars); if (err < 0) atomic_dec(&dev->enable_cnt); return err; } /** * pci_enable_device_io - Initialize a device for use with IO space * @dev: PCI device to be initialized * * Initialize device before it's used by a driver. Ask low-level code * to enable I/O resources. Wake up the device if it was suspended. * Beware, this function can fail. */ int pci_enable_device_io(struct pci_dev *dev) { return pci_enable_device_flags(dev, IORESOURCE_IO); } EXPORT_SYMBOL(pci_enable_device_io); /** * pci_enable_device_mem - Initialize a device for use with Memory space * @dev: PCI device to be initialized * * Initialize device before it's used by a driver. Ask low-level code * to enable Memory resources. Wake up the device if it was suspended. * Beware, this function can fail. */ int pci_enable_device_mem(struct pci_dev *dev) { return pci_enable_device_flags(dev, IORESOURCE_MEM); } EXPORT_SYMBOL(pci_enable_device_mem); /** * pci_enable_device - Initialize device before it's used by a driver. * @dev: PCI device to be initialized * * Initialize device before it's used by a driver. Ask low-level code * to enable I/O and memory. Wake up the device if it was suspended. * Beware, this function can fail. * * Note we don't actually enable the device many times if we call * this function repeatedly (we just increment the count). */ int pci_enable_device(struct pci_dev *dev) { return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); } EXPORT_SYMBOL(pci_enable_device); /* * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so * there's no need to track it separately. pci_devres is initialized * when a device is enabled using managed PCI device enable interface. */ struct pci_devres { unsigned int enabled:1; unsigned int pinned:1; unsigned int orig_intx:1; unsigned int restore_intx:1; unsigned int mwi:1; u32 region_mask; }; static void pcim_release(struct device *gendev, void *res) { struct pci_dev *dev = to_pci_dev(gendev); struct pci_devres *this = res; int i; for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) if (this->region_mask & (1 << i)) pci_release_region(dev, i); if (this->mwi) pci_clear_mwi(dev); if (this->restore_intx) pci_intx(dev, this->orig_intx); if (this->enabled && !this->pinned) pci_disable_device(dev); } static struct pci_devres *get_pci_dr(struct pci_dev *pdev) { struct pci_devres *dr, *new_dr; dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); if (dr) return dr; new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); if (!new_dr) return NULL; return devres_get(&pdev->dev, new_dr, NULL, NULL); } static struct pci_devres *find_pci_dr(struct pci_dev *pdev) { if (pci_is_managed(pdev)) return devres_find(&pdev->dev, pcim_release, NULL, NULL); return NULL; } /** * pcim_enable_device - Managed pci_enable_device() * @pdev: PCI device to be initialized * * Managed pci_enable_device(). */ int pcim_enable_device(struct pci_dev *pdev) { struct pci_devres *dr; int rc; dr = get_pci_dr(pdev); if (unlikely(!dr)) return -ENOMEM; if (dr->enabled) return 0; rc = pci_enable_device(pdev); if (!rc) { pdev->is_managed = 1; dr->enabled = 1; } return rc; } EXPORT_SYMBOL(pcim_enable_device); /** * pcim_pin_device - Pin managed PCI device * @pdev: PCI device to pin * * Pin managed PCI device @pdev. Pinned device won't be disabled on * driver detach. @pdev must have been enabled with * pcim_enable_device(). */ void pcim_pin_device(struct pci_dev *pdev) { struct pci_devres *dr; dr = find_pci_dr(pdev); WARN_ON(!dr || !dr->enabled); if (dr) dr->pinned = 1; } EXPORT_SYMBOL(pcim_pin_device); /* * pcibios_device_add - provide arch specific hooks when adding device dev * @dev: the PCI device being added * * Permits the platform to provide architecture specific functionality when * devices are added. This is the default implementation. Architecture * implementations can override this. */ int __weak pcibios_device_add(struct pci_dev *dev) { return 0; } /** * pcibios_release_device - provide arch specific hooks when releasing * device dev * @dev: the PCI device being released * * Permits the platform to provide architecture specific functionality when * devices are released. This is the default implementation. Architecture * implementations can override this. */ void __weak pcibios_release_device(struct pci_dev *dev) {} /** * pcibios_disable_device - disable arch specific PCI resources for device dev * @dev: the PCI device to disable * * Disables architecture specific PCI resources for the device. This * is the default implementation. Architecture implementations can * override this. */ void __weak pcibios_disable_device(struct pci_dev *dev) {} /** * pcibios_penalize_isa_irq - penalize an ISA IRQ * @irq: ISA IRQ to penalize * @active: IRQ active or not * * Permits the platform to provide architecture-specific functionality when * penalizing ISA IRQs. This is the default implementation. Architecture * implementations can override this. */ void __weak pcibios_penalize_isa_irq(int irq, int active) {} static void do_pci_disable_device(struct pci_dev *dev) { u16 pci_command; pci_read_config_word(dev, PCI_COMMAND, &pci_command); if (pci_command & PCI_COMMAND_MASTER) { pci_command &= ~PCI_COMMAND_MASTER; pci_write_config_word(dev, PCI_COMMAND, pci_command); } pcibios_disable_device(dev); } /** * pci_disable_enabled_device - Disable device without updating enable_cnt * @dev: PCI device to disable * * NOTE: This function is a backend of PCI power management routines and is * not supposed to be called drivers. */ void pci_disable_enabled_device(struct pci_dev *dev) { if (pci_is_enabled(dev)) do_pci_disable_device(dev); } /** * pci_disable_device - Disable PCI device after use * @dev: PCI device to be disabled * * Signal to the system that the PCI device is not in use by the system * anymore. This only involves disabling PCI bus-mastering, if active. * * Note we don't actually disable the device until all callers of * pci_enable_device() have called pci_disable_device(). */ void pci_disable_device(struct pci_dev *dev) { struct pci_devres *dr; dr = find_pci_dr(dev); if (dr) dr->enabled = 0; dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0, "disabling already-disabled device"); if (atomic_dec_return(&dev->enable_cnt) != 0) return; do_pci_disable_device(dev); dev->is_busmaster = 0; } EXPORT_SYMBOL(pci_disable_device); /** * pcibios_set_pcie_reset_state - set reset state for device dev * @dev: the PCIe device reset * @state: Reset state to enter into * * Set the PCIe reset state for the device. This is the default * implementation. Architecture implementations can override this. */ int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) { return -EINVAL; } /** * pci_set_pcie_reset_state - set reset state for device dev * @dev: the PCIe device reset * @state: Reset state to enter into * * Sets the PCI reset state for the device. */ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) { return pcibios_set_pcie_reset_state(dev, state); } EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); #ifdef CONFIG_PCIEAER void pcie_clear_device_status(struct pci_dev *dev) { u16 sta; pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); } #endif /** * pcie_clear_root_pme_status - Clear root port PME interrupt status. * @dev: PCIe root port or event collector. */ void pcie_clear_root_pme_status(struct pci_dev *dev) { pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME); } /** * pci_check_pme_status - Check if given device has generated PME. * @dev: Device to check. * * Check the PME status of the device and if set, clear it and clear PME enable * (if set). Return 'true' if PME status and PME enable were both set or * 'false' otherwise. */ bool pci_check_pme_status(struct pci_dev *dev) { int pmcsr_pos; u16 pmcsr; bool ret = false; if (!dev->pm_cap) return false; pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; pci_read_config_word(dev, pmcsr_pos, &pmcsr); if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) return false; /* Clear PME status. */ pmcsr |= PCI_PM_CTRL_PME_STATUS; if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { /* Disable PME to avoid interrupt flood. */ pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; ret = true; } pci_write_config_word(dev, pmcsr_pos, pmcsr); return ret; } /** * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. * @dev: Device to handle. * @pme_poll_reset: Whether or not to reset the device's pme_poll flag. * * Check if @dev has generated PME and queue a resume request for it in that * case. */ static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset) { if (pme_poll_reset && dev->pme_poll) dev->pme_poll = false; if (pci_check_pme_status(dev)) { pci_wakeup_event(dev); pm_request_resume(&dev->dev); } return 0; } /** * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. * @bus: Top bus of the subtree to walk. */ void pci_pme_wakeup_bus(struct pci_bus *bus) { if (bus) pci_walk_bus(bus, pci_pme_wakeup, (void *)true); } /** * pci_pme_capable - check the capability of PCI device to generate PME# * @dev: PCI device to handle. * @state: PCI state from which device will issue PME#. */ bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) { if (!dev->pm_cap) return false; return !!(dev->pme_support & (1 << state)); } EXPORT_SYMBOL(pci_pme_capable); static void pci_pme_list_scan(struct work_struct *work) { struct pci_pme_device *pme_dev, *n; mutex_lock(&pci_pme_list_mutex); list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) { struct pci_dev *pdev = pme_dev->dev; if (pdev->pme_poll) { struct pci_dev *bridge = pdev->bus->self; struct device *dev = &pdev->dev; int pm_status; /* * If bridge is in low power state, the * configuration space of subordinate devices * may be not accessible */ if (bridge && bridge->current_state != PCI_D0) continue; /* * If the device is in a low power state it * should not be polled either. */ pm_status = pm_runtime_get_if_active(dev, true); if (!pm_status) continue; if (pdev->current_state != PCI_D3cold) pci_pme_wakeup(pdev, NULL); if (pm_status > 0) pm_runtime_put(dev); } else { list_del(&pme_dev->list); kfree(pme_dev); } } if (!list_empty(&pci_pme_list)) queue_delayed_work(system_freezable_wq, &pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); mutex_unlock(&pci_pme_list_mutex); } static void __pci_pme_active(struct pci_dev *dev, bool enable) { u16 pmcsr; if (!dev->pme_support) return; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); /* Clear PME_Status by writing 1 to it and enable PME# */ pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; if (!enable) pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); } /** * pci_pme_restore - Restore PME configuration after config space restore. * @dev: PCI device to update. */ void pci_pme_restore(struct pci_dev *dev) { u16 pmcsr; if (!dev->pme_support) return; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); if (dev->wakeup_prepared) { pmcsr |= PCI_PM_CTRL_PME_ENABLE; pmcsr &= ~PCI_PM_CTRL_PME_STATUS; } else { pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; pmcsr |= PCI_PM_CTRL_PME_STATUS; } pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); } /** * pci_pme_active - enable or disable PCI device's PME# function * @dev: PCI device to handle. * @enable: 'true' to enable PME# generation; 'false' to disable it. * * The caller must verify that the device is capable of generating PME# before * calling this function with @enable equal to 'true'. */ void pci_pme_active(struct pci_dev *dev, bool enable) { __pci_pme_active(dev, enable); /* * PCI (as opposed to PCIe) PME requires that the device have * its PME# line hooked up correctly. Not all hardware vendors * do this, so the PME never gets delivered and the device * remains asleep. The easiest way around this is to * periodically walk the list of suspended devices and check * whether any have their PME flag set. The assumption is that * we'll wake up often enough anyway that this won't be a huge * hit, and the power savings from the devices will still be a * win. * * Although PCIe uses in-band PME message instead of PME# line * to report PME, PME does not work for some PCIe devices in * reality. For example, there are devices that set their PME * status bits, but don't really bother to send a PME message; * there are PCI Express Root Ports that don't bother to * trigger interrupts when they receive PME messages from the * devices below. So PME poll is used for PCIe devices too. */ if (dev->pme_poll) { struct pci_pme_device *pme_dev; if (enable) { pme_dev = kmalloc(sizeof(struct pci_pme_device), GFP_KERNEL); if (!pme_dev) { pci_warn(dev, "can't enable PME#\n"); return; } pme_dev->dev = dev; mutex_lock(&pci_pme_list_mutex); list_add(&pme_dev->list, &pci_pme_list); if (list_is_singular(&pci_pme_list)) queue_delayed_work(system_freezable_wq, &pci_pme_work, msecs_to_jiffies(PME_TIMEOUT)); mutex_unlock(&pci_pme_list_mutex); } else { mutex_lock(&pci_pme_list_mutex); list_for_each_entry(pme_dev, &pci_pme_list, list) { if (pme_dev->dev == dev) { list_del(&pme_dev->list); kfree(pme_dev); break; } } mutex_unlock(&pci_pme_list_mutex); } } pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled"); } EXPORT_SYMBOL(pci_pme_active); /** * __pci_enable_wake - enable PCI device as wakeup event source * @dev: PCI device affected * @state: PCI state from which device will issue wakeup events * @enable: True to enable event generation; false to disable * * This enables the device as a wakeup event source, or disables it. * When such events involves platform-specific hooks, those hooks are * called automatically by this routine. * * Devices with legacy power management (no standard PCI PM capabilities) * always require such platform hooks. * * RETURN VALUE: * 0 is returned on success * -EINVAL is returned if device is not supposed to wake up the system * Error code depending on the platform is returned if both the platform and * the native mechanism fail to enable the generation of wake-up events */ static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) { int ret = 0; /* * Bridges that are not power-manageable directly only signal * wakeup on behalf of subordinate devices which is set up * elsewhere, so skip them. However, bridges that are * power-manageable may signal wakeup for themselves (for example, * on a hotplug event) and they need to be covered here. */ if (!pci_power_manageable(dev)) return 0; /* Don't do the same thing twice in a row for one device. */ if (!!enable == !!dev->wakeup_prepared) return 0; /* * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don * Anderson we should be doing PME# wake enable followed by ACPI wake * enable. To disable wake-up we call the platform first, for symmetry. */ if (enable) { int error; /* * Enable PME signaling if the device can signal PME from * D3cold regardless of whether or not it can signal PME from * the current target state, because that will allow it to * signal PME when the hierarchy above it goes into D3cold and * the device itself ends up in D3cold as a result of that. */ if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold)) pci_pme_active(dev, true); else ret = 1; error = platform_pci_set_wakeup(dev, true); if (ret) ret = error; if (!ret) dev->wakeup_prepared = true; } else { platform_pci_set_wakeup(dev, false); pci_pme_active(dev, false); dev->wakeup_prepared = false; } return ret; } /** * pci_enable_wake - change wakeup settings for a PCI device * @pci_dev: Target device * @state: PCI state from which device will issue wakeup events * @enable: Whether or not to enable event generation * * If @enable is set, check device_may_wakeup() for the device before calling * __pci_enable_wake() for it. */ int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable) { if (enable && !device_may_wakeup(&pci_dev->dev)) return -EINVAL; return __pci_enable_wake(pci_dev, state, enable); } EXPORT_SYMBOL(pci_enable_wake); /** * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold * @dev: PCI device to prepare * @enable: True to enable wake-up event generation; false to disable * * Many drivers want the device to wake up the system from D3_hot or D3_cold * and this function allows them to set that up cleanly - pci_enable_wake() * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI * ordering constraints. * * This function only returns error code if the device is not allowed to wake * up the system from sleep or it is not capable of generating PME# from both * D3_hot and D3_cold and the platform is unable to enable wake-up power for it. */ int pci_wake_from_d3(struct pci_dev *dev, bool enable) { return pci_pme_capable(dev, PCI_D3cold) ? pci_enable_wake(dev, PCI_D3cold, enable) : pci_enable_wake(dev, PCI_D3hot, enable); } EXPORT_SYMBOL(pci_wake_from_d3); /** * pci_target_state - find an appropriate low power state for a given PCI dev * @dev: PCI device * @wakeup: Whether or not wakeup functionality will be enabled for the device. * * Use underlying platform code to find a supported low power state for @dev. * If the platform can't manage @dev, return the deepest state from which it * can generate wake events, based on any available PME info. */ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) { if (platform_pci_power_manageable(dev)) { /* * Call the platform to find the target state for the device. */ pci_power_t state = platform_pci_choose_state(dev); switch (state) { case PCI_POWER_ERROR: case PCI_UNKNOWN: return PCI_D3hot; case PCI_D1: case PCI_D2: if (pci_no_d1d2(dev)) return PCI_D3hot; } return state; } /* * If the device is in D3cold even though it's not power-manageable by * the platform, it may have been powered down by non-standard means. * Best to let it slumber. */ if (dev->current_state == PCI_D3cold) return PCI_D3cold; else if (!dev->pm_cap) return PCI_D0; if (wakeup && dev->pme_support) { pci_power_t state = PCI_D3hot; /* * Find the deepest state from which the device can generate * PME#. */ while (state && !(dev->pme_support & (1 << state))) state--; if (state) return state; else if (dev->pme_support & 1) return PCI_D0; } return PCI_D3hot; } /** * pci_prepare_to_sleep - prepare PCI device for system-wide transition * into a sleep state * @dev: Device to handle. * * Choose the power state appropriate for the device depending on whether * it can wake up the system and/or is power manageable by the platform * (PCI_D3hot is the default) and put the device into that state. */ int pci_prepare_to_sleep(struct pci_dev *dev) { bool wakeup = device_may_wakeup(&dev->dev); pci_power_t target_state = pci_target_state(dev, wakeup); int error; if (target_state == PCI_POWER_ERROR) return -EIO; pci_enable_wake(dev, target_state, wakeup); error = pci_set_power_state(dev, target_state); if (error) pci_enable_wake(dev, target_state, false); return error; } EXPORT_SYMBOL(pci_prepare_to_sleep); /** * pci_back_from_sleep - turn PCI device on during system-wide transition * into working state * @dev: Device to handle. * * Disable device's system wake-up capability and put it into D0. */ int pci_back_from_sleep(struct pci_dev *dev) { int ret = pci_set_power_state(dev, PCI_D0); if (ret) return ret; pci_enable_wake(dev, PCI_D0, false); return 0; } EXPORT_SYMBOL(pci_back_from_sleep); /** * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. * @dev: PCI device being suspended. * * Prepare @dev to generate wake-up events at run time and put it into a low * power state. */ int pci_finish_runtime_suspend(struct pci_dev *dev) { pci_power_t target_state; int error; target_state = pci_target_state(dev, device_can_wakeup(&dev->dev)); if (target_state == PCI_POWER_ERROR) return -EIO; __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); error = pci_set_power_state(dev, target_state); if (error) pci_enable_wake(dev, target_state, false); return error; } /** * pci_dev_run_wake - Check if device can generate run-time wake-up events. * @dev: Device to check. * * Return true if the device itself is capable of generating wake-up events * (through the platform or using the native PCIe PME) or if the device supports * PME and one of its upstream bridges can generate wake-up events. */ bool pci_dev_run_wake(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; if (!dev->pme_support) return false; /* PME-capable in principle, but not from the target power state */ if (!pci_pme_capable(dev, pci_target_state(dev, true))) return false; if (device_can_wakeup(&dev->dev)) return true; while (bus->parent) { struct pci_dev *bridge = bus->self; if (device_can_wakeup(&bridge->dev)) return true; bus = bus->parent; } /* We have reached the root bus. */ if (bus->bridge) return device_can_wakeup(bus->bridge); return false; } EXPORT_SYMBOL_GPL(pci_dev_run_wake); /** * pci_dev_need_resume - Check if it is necessary to resume the device. * @pci_dev: Device to check. * * Return 'true' if the device is not runtime-suspended or it has to be * reconfigured due to wakeup settings difference between system and runtime * suspend, or the current power state of it is not suitable for the upcoming * (system-wide) transition. */ bool pci_dev_need_resume(struct pci_dev *pci_dev) { struct device *dev = &pci_dev->dev; pci_power_t target_state; if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev)) return true; target_state = pci_target_state(pci_dev, device_may_wakeup(dev)); /* * If the earlier platform check has not triggered, D3cold is just power * removal on top of D3hot, so no need to resume the device in that * case. */ return target_state != pci_dev->current_state && target_state != PCI_D3cold && pci_dev->current_state != PCI_D3hot; } /** * pci_dev_adjust_pme - Adjust PME setting for a suspended device. * @pci_dev: Device to check. * * If the device is suspended and it is not configured for system wakeup, * disable PME for it to prevent it from waking up the system unnecessarily. * * Note that if the device's power state is D3cold and the platform check in * pci_dev_need_resume() has not triggered, the device's configuration need not * be changed. */ void pci_dev_adjust_pme(struct pci_dev *pci_dev) { struct device *dev = &pci_dev->dev; spin_lock_irq(&dev->power.lock); if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) && pci_dev->current_state < PCI_D3cold) __pci_pme_active(pci_dev, false); spin_unlock_irq(&dev->power.lock); } /** * pci_dev_complete_resume - Finalize resume from system sleep for a device. * @pci_dev: Device to handle. * * If the device is runtime suspended and wakeup-capable, enable PME for it as * it might have been disabled during the prepare phase of system suspend if * the device was not configured for system wakeup. */ void pci_dev_complete_resume(struct pci_dev *pci_dev) { struct device *dev = &pci_dev->dev; if (!pci_dev_run_wake(pci_dev)) return; spin_lock_irq(&dev->power.lock); if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold) __pci_pme_active(pci_dev, true); spin_unlock_irq(&dev->power.lock); } /** * pci_choose_state - Choose the power state of a PCI device. * @dev: Target PCI device. * @state: Target state for the whole system. * * Returns PCI power state suitable for @dev and @state. */ pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) { if (state.event == PM_EVENT_ON) return PCI_D0; return pci_target_state(dev, false); } EXPORT_SYMBOL(pci_choose_state); void pci_config_pm_runtime_get(struct pci_dev *pdev) { struct device *dev = &pdev->dev; struct device *parent = dev->parent; if (parent) pm_runtime_get_sync(parent); pm_runtime_get_noresume(dev); /* * pdev->current_state is set to PCI_D3cold during suspending, * so wait until suspending completes */ pm_runtime_barrier(dev); /* * Only need to resume devices in D3cold, because config * registers are still accessible for devices suspended but * not in D3cold. */ if (pdev->current_state == PCI_D3cold) pm_runtime_resume(dev); } void pci_config_pm_runtime_put(struct pci_dev *pdev) { struct device *dev = &pdev->dev; struct device *parent = dev->parent; pm_runtime_put(dev); if (parent) pm_runtime_put_sync(parent); } static const struct dmi_system_id bridge_d3_blacklist[] = { #ifdef CONFIG_X86 { /* * Gigabyte X299 root port is not marked as hotplug capable * which allows Linux to power manage it. However, this * confuses the BIOS SMI handler so don't power manage root * ports on that system. */ .ident = "X299 DESIGNARE EX-CF", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"), }, }, { /* * Downstream device is not accessible after putting a root port * into D3cold and back into D0 on Elo Continental Z2 board */ .ident = "Elo Continental Z2", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Elo Touch Solutions"), DMI_MATCH(DMI_BOARD_NAME, "Geminilake"), DMI_MATCH(DMI_BOARD_VERSION, "Continental Z2"), }, }, #endif { } }; /** * pci_bridge_d3_possible - Is it possible to put the bridge into D3 * @bridge: Bridge to check * * This function checks if it is possible to move the bridge to D3. * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt. */ bool pci_bridge_d3_possible(struct pci_dev *bridge) { if (!pci_is_pcie(bridge)) return false; switch (pci_pcie_type(bridge)) { case PCI_EXP_TYPE_ROOT_PORT: case PCI_EXP_TYPE_UPSTREAM: case PCI_EXP_TYPE_DOWNSTREAM: if (pci_bridge_d3_disable) return false; /* * Hotplug ports handled by firmware in System Management Mode * may not be put into D3 by the OS (Thunderbolt on non-Macs). */ if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge)) return false; if (pci_bridge_d3_force) return true; /* Even the oldest 2010 Thunderbolt controller supports D3. */ if (bridge->is_thunderbolt) return true; /* Platform might know better if the bridge supports D3 */ if (platform_pci_bridge_d3(bridge)) return true; /* * Hotplug ports handled natively by the OS were not validated * by vendors for runtime D3 at least until 2018 because there * was no OS support. */ if (bridge->is_hotplug_bridge) return false; if (dmi_check_system(bridge_d3_blacklist)) return false; /* * It should be safe to put PCIe ports from 2015 or newer * to D3. */ if (dmi_get_bios_year() >= 2015) return true; break; } return false; } static int pci_dev_check_d3cold(struct pci_dev *dev, void *data) { bool *d3cold_ok = data; if (/* The device needs to be allowed to go D3cold ... */ dev->no_d3cold || !dev->d3cold_allowed || /* ... and if it is wakeup capable to do so from D3cold. */ (device_may_wakeup(&dev->dev) && !pci_pme_capable(dev, PCI_D3cold)) || /* If it is a bridge it must be allowed to go to D3. */ !pci_power_manageable(dev)) *d3cold_ok = false; return !*d3cold_ok; } /* * pci_bridge_d3_update - Update bridge D3 capabilities * @dev: PCI device which is changed * * Update upstream bridge PM capabilities accordingly depending on if the * device PM configuration was changed or the device is being removed. The * change is also propagated upstream. */ void pci_bridge_d3_update(struct pci_dev *dev) { bool remove = !device_is_registered(&dev->dev); struct pci_dev *bridge; bool d3cold_ok = true; bridge = pci_upstream_bridge(dev); if (!bridge || !pci_bridge_d3_possible(bridge)) return; /* * If D3 is currently allowed for the bridge, removing one of its * children won't change that. */ if (remove && bridge->bridge_d3) return; /* * If D3 is currently allowed for the bridge and a child is added or * changed, disallowance of D3 can only be caused by that child, so * we only need to check that single device, not any of its siblings. * * If D3 is currently not allowed for the bridge, checking the device * first may allow us to skip checking its siblings. */ if (!remove) pci_dev_check_d3cold(dev, &d3cold_ok); /* * If D3 is currently not allowed for the bridge, this may be caused * either by the device being changed/removed or any of its siblings, * so we need to go through all children to find out if one of them * continues to block D3. */ if (d3cold_ok && !bridge->bridge_d3) pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold, &d3cold_ok); if (bridge->bridge_d3 != d3cold_ok) { bridge->bridge_d3 = d3cold_ok; /* Propagate change to upstream bridges */ pci_bridge_d3_update(bridge); } } /** * pci_d3cold_enable - Enable D3cold for device * @dev: PCI device to handle * * This function can be used in drivers to enable D3cold from the device * they handle. It also updates upstream PCI bridge PM capabilities * accordingly. */ void pci_d3cold_enable(struct pci_dev *dev) { if (dev->no_d3cold) { dev->no_d3cold = false; pci_bridge_d3_update(dev); } } EXPORT_SYMBOL_GPL(pci_d3cold_enable); /** * pci_d3cold_disable - Disable D3cold for device * @dev: PCI device to handle * * This function can be used in drivers to disable D3cold from the device * they handle. It also updates upstream PCI bridge PM capabilities * accordingly. */ void pci_d3cold_disable(struct pci_dev *dev) { if (!dev->no_d3cold) { dev->no_d3cold = true; pci_bridge_d3_update(dev); } } EXPORT_SYMBOL_GPL(pci_d3cold_disable); /** * pci_pm_init - Initialize PM functions of given PCI device * @dev: PCI device to handle. */ void pci_pm_init(struct pci_dev *dev) { int pm; u16 status; u16 pmc; pm_runtime_forbid(&dev->dev); pm_runtime_set_active(&dev->dev); pm_runtime_enable(&dev->dev); device_enable_async_suspend(&dev->dev); dev->wakeup_prepared = false; dev->pm_cap = 0; dev->pme_support = 0; /* find PCI PM capability in list */ pm = pci_find_capability(dev, PCI_CAP_ID_PM); if (!pm) return; /* Check device's ability to generate PME# */ pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { pci_err(dev, "unsupported PM cap regs version (%u)\n", pmc & PCI_PM_CAP_VER_MASK); return; } dev->pm_cap = pm; dev->d3hot_delay = PCI_PM_D3HOT_WAIT; dev->d3cold_delay = PCI_PM_D3COLD_WAIT; dev->bridge_d3 = pci_bridge_d3_possible(dev); dev->d3cold_allowed = true; dev->d1_support = false; dev->d2_support = false; if (!pci_no_d1d2(dev)) { if (pmc & PCI_PM_CAP_D1) dev->d1_support = true; if (pmc & PCI_PM_CAP_D2) dev->d2_support = true; if (dev->d1_support || dev->d2_support) pci_info(dev, "supports%s%s\n", dev->d1_support ? " D1" : "", dev->d2_support ? " D2" : ""); } pmc &= PCI_PM_CAP_PME_MASK; if (pmc) { pci_info(dev, "PME# supported from%s%s%s%s%s\n", (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "", (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; dev->pme_poll = true; /* * Make device's PM flags reflect the wake-up capability, but * let the user space enable it to wake up the system as needed. */ device_set_wakeup_capable(&dev->dev, true); /* Disable the PME# generation functionality */ pci_pme_active(dev, false); } pci_read_config_word(dev, PCI_STATUS, &status); if (status & PCI_STATUS_IMM_READY) dev->imm_ready = 1; } static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop) { unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI; switch (prop) { case PCI_EA_P_MEM: case PCI_EA_P_VF_MEM: flags |= IORESOURCE_MEM; break; case PCI_EA_P_MEM_PREFETCH: case PCI_EA_P_VF_MEM_PREFETCH: flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; break; case PCI_EA_P_IO: flags |= IORESOURCE_IO; break; default: return 0; } return flags; } static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei, u8 prop) { if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO) return &dev->resource[bei]; #ifdef CONFIG_PCI_IOV else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 && (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH)) return &dev->resource[PCI_IOV_RESOURCES + bei - PCI_EA_BEI_VF_BAR0]; #endif else if (bei == PCI_EA_BEI_ROM) return &dev->resource[PCI_ROM_RESOURCE]; else return NULL; } /* Read an Enhanced Allocation (EA) entry */ static int pci_ea_read(struct pci_dev *dev, int offset) { struct resource *res; int ent_size, ent_offset = offset; resource_size_t start, end; unsigned long flags; u32 dw0, bei, base, max_offset; u8 prop; bool support_64 = (sizeof(resource_size_t) >= 8); pci_read_config_dword(dev, ent_offset, &dw0); ent_offset += 4; /* Entry size field indicates DWORDs after 1st */ ent_size = ((dw0 & PCI_EA_ES) + 1) << 2; if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */ goto out; bei = (dw0 & PCI_EA_BEI) >> 4; prop = (dw0 & PCI_EA_PP) >> 8; /* * If the Property is in the reserved range, try the Secondary * Property instead. */ if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED) prop = (dw0 & PCI_EA_SP) >> 16; if (prop > PCI_EA_P_BRIDGE_IO) goto out; res = pci_ea_get_resource(dev, bei, prop); if (!res) { pci_err(dev, "Unsupported EA entry BEI: %u\n", bei); goto out; } flags = pci_ea_flags(dev, prop); if (!flags) { pci_err(dev, "Unsupported EA properties: %#x\n", prop); goto out; } /* Read Base */ pci_read_config_dword(dev, ent_offset, &base); start = (base & PCI_EA_FIELD_MASK); ent_offset += 4; /* Read MaxOffset */ pci_read_config_dword(dev, ent_offset, &max_offset); ent_offset += 4; /* Read Base MSBs (if 64-bit entry) */ if (base & PCI_EA_IS_64) { u32 base_upper; pci_read_config_dword(dev, ent_offset, &base_upper); ent_offset += 4; flags |= IORESOURCE_MEM_64; /* entry starts above 32-bit boundary, can't use */ if (!support_64 && base_upper) goto out; if (support_64) start |= ((u64)base_upper << 32); } end = start + (max_offset | 0x03); /* Read MaxOffset MSBs (if 64-bit entry) */ if (max_offset & PCI_EA_IS_64) { u32 max_offset_upper; pci_read_config_dword(dev, ent_offset, &max_offset_upper); ent_offset += 4; flags |= IORESOURCE_MEM_64; /* entry too big, can't use */ if (!support_64 && max_offset_upper) goto out; if (support_64) end += ((u64)max_offset_upper << 32); } if (end < start) { pci_err(dev, "EA Entry crosses address boundary\n"); goto out; } if (ent_size != ent_offset - offset) { pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n", ent_size, ent_offset - offset); goto out; } res->name = pci_name(dev); res->start = start; res->end = end; res->flags = flags; if (bei <= PCI_EA_BEI_BAR5) pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", bei, res, prop); else if (bei == PCI_EA_BEI_ROM) pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", res, prop); else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", bei - PCI_EA_BEI_VF_BAR0, res, prop); else pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", bei, res, prop); out: return offset + ent_size; } /* Enhanced Allocation Initialization */ void pci_ea_init(struct pci_dev *dev) { int ea; u8 num_ent; int offset; int i; /* find PCI EA capability in list */ ea = pci_find_capability(dev, PCI_CAP_ID_EA); if (!ea) return; /* determine the number of entries */ pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT, &num_ent); num_ent &= PCI_EA_NUM_ENT_MASK; offset = ea + PCI_EA_FIRST_ENT; /* Skip DWORD 2 for type 1 functions */ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) offset += 4; /* parse each EA entry */ for (i = 0; i < num_ent; ++i) offset = pci_ea_read(dev, offset); } static void pci_add_saved_cap(struct pci_dev *pci_dev, struct pci_cap_saved_state *new_cap) { hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space); } /** * _pci_add_cap_save_buffer - allocate buffer for saving given * capability registers * @dev: the PCI device * @cap: the capability to allocate the buffer for * @extended: Standard or Extended capability ID * @size: requested size of the buffer */ static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap, bool extended, unsigned int size) { int pos; struct pci_cap_saved_state *save_state; if (extended) pos = pci_find_ext_capability(dev, cap); else pos = pci_find_capability(dev, cap); if (!pos) return 0; save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); if (!save_state) return -ENOMEM; save_state->cap.cap_nr = cap; save_state->cap.cap_extended = extended; save_state->cap.size = size; pci_add_saved_cap(dev, save_state); return 0; } int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size) { return _pci_add_cap_save_buffer(dev, cap, false, size); } int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size) { return _pci_add_cap_save_buffer(dev, cap, true, size); } /** * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities * @dev: the PCI device */ void pci_allocate_cap_save_buffers(struct pci_dev *dev) { int error; error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, PCI_EXP_SAVE_REGS * sizeof(u16)); if (error) pci_err(dev, "unable to preallocate PCI Express save buffer\n"); error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); if (error) pci_err(dev, "unable to preallocate PCI-X save buffer\n"); error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR, 2 * sizeof(u16)); if (error) pci_err(dev, "unable to allocate suspend buffer for LTR\n"); pci_allocate_vc_save_buffers(dev); } void pci_free_cap_save_buffers(struct pci_dev *dev) { struct pci_cap_saved_state *tmp; struct hlist_node *n; hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next) kfree(tmp); } /** * pci_configure_ari - enable or disable ARI forwarding * @dev: the PCI device * * If @dev and its upstream bridge both support ARI, enable ARI in the * bridge. Otherwise, disable ARI in the bridge. */ void pci_configure_ari(struct pci_dev *dev) { u32 cap; struct pci_dev *bridge; if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn) return; bridge = dev->bus->self; if (!bridge) return; pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); if (!(cap & PCI_EXP_DEVCAP2_ARI)) return; if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) { pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI); bridge->ari_enabled = 1; } else { pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ARI); bridge->ari_enabled = 0; } } static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) { int pos; u16 cap, ctrl; pos = pdev->acs_cap; if (!pos) return false; /* * Except for egress control, capabilities are either required * or only required if controllable. Features missing from the * capability field can therefore be assumed as hard-wired enabled. */ pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap); acs_flags &= (cap | PCI_ACS_EC); pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); return (ctrl & acs_flags) == acs_flags; } /** * pci_acs_enabled - test ACS against required flags for a given device * @pdev: device to test * @acs_flags: required PCI ACS flags * * Return true if the device supports the provided flags. Automatically * filters out flags that are not implemented on multifunction devices. * * Note that this interface checks the effective ACS capabilities of the * device rather than the actual capabilities. For instance, most single * function endpoints are not required to support ACS because they have no * opportunity for peer-to-peer access. We therefore return 'true' * regardless of whether the device exposes an ACS capability. This makes * it much easier for callers of this function to ignore the actual type * or topology of the device when testing ACS support. */ bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) { int ret; ret = pci_dev_specific_acs_enabled(pdev, acs_flags); if (ret >= 0) return ret > 0; /* * Conventional PCI and PCI-X devices never support ACS, either * effectively or actually. The shared bus topology implies that * any device on the bus can receive or snoop DMA. */ if (!pci_is_pcie(pdev)) return false; switch (pci_pcie_type(pdev)) { /* * PCI/X-to-PCIe bridges are not specifically mentioned by the spec, * but since their primary interface is PCI/X, we conservatively * handle them as we would a non-PCIe device. */ case PCI_EXP_TYPE_PCIE_BRIDGE: /* * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never * applicable... must never implement an ACS Extended Capability...". * This seems arbitrary, but we take a conservative interpretation * of this statement. */ case PCI_EXP_TYPE_PCI_BRIDGE: case PCI_EXP_TYPE_RC_EC: return false; /* * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should * implement ACS in order to indicate their peer-to-peer capabilities, * regardless of whether they are single- or multi-function devices. */ case PCI_EXP_TYPE_DOWNSTREAM: case PCI_EXP_TYPE_ROOT_PORT: return pci_acs_flags_enabled(pdev, acs_flags); /* * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be * implemented by the remaining PCIe types to indicate peer-to-peer * capabilities, but only when they are part of a multifunction * device. The footnote for section 6.12 indicates the specific * PCIe types included here. */ case PCI_EXP_TYPE_ENDPOINT: case PCI_EXP_TYPE_UPSTREAM: case PCI_EXP_TYPE_LEG_END: case PCI_EXP_TYPE_RC_END: if (!pdev->multifunction) break; return pci_acs_flags_enabled(pdev, acs_flags); } /* * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable * to single function devices with the exception of downstream ports. */ return true; } /** * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy * @start: starting downstream device * @end: ending upstream device or NULL to search to the root bus * @acs_flags: required flags * * Walk up a device tree from start to end testing PCI ACS support. If * any step along the way does not support the required flags, return false. */ bool pci_acs_path_enabled(struct pci_dev *start, struct pci_dev *end, u16 acs_flags) { struct pci_dev *pdev, *parent = start; do { pdev = parent; if (!pci_acs_enabled(pdev, acs_flags)) return false; if (pci_is_root_bus(pdev->bus)) return (end == NULL); parent = pdev->bus->self; } while (pdev != end); return true; } /** * pci_acs_init - Initialize ACS if hardware supports it * @dev: the PCI device */ void pci_acs_init(struct pci_dev *dev) { dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); /* * Attempt to enable ACS regardless of capability because some Root * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have * the standard ACS capability but still support ACS via those * quirks. */ pci_enable_acs(dev); } /** * pci_rebar_find_pos - find position of resize ctrl reg for BAR * @pdev: PCI device * @bar: BAR to find * * Helper to find the position of the ctrl register for a BAR. * Returns -ENOTSUPP if resizable BARs are not supported at all. * Returns -ENOENT if no ctrl register for the BAR could be found. */ static int pci_rebar_find_pos(struct pci_dev *pdev, int bar) { unsigned int pos, nbars, i; u32 ctrl; pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR); if (!pos) return -ENOTSUPP; pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; for (i = 0; i < nbars; i++, pos += 8) { int bar_idx; pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX; if (bar_idx == bar) return pos; } return -ENOENT; } /** * pci_rebar_get_possible_sizes - get possible sizes for BAR * @pdev: PCI device * @bar: BAR to query * * Get the possible sizes of a resizable BAR as bitmask defined in the spec * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable. */ u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar) { int pos; u32 cap; pos = pci_rebar_find_pos(pdev, bar); if (pos < 0) return 0; pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap); cap &= PCI_REBAR_CAP_SIZES; /* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */ if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f && bar == 0 && cap == 0x7000) cap = 0x3f000; return cap >> 4; } EXPORT_SYMBOL(pci_rebar_get_possible_sizes); /** * pci_rebar_get_current_size - get the current size of a BAR * @pdev: PCI device * @bar: BAR to set size to * * Read the size of a BAR from the resizable BAR config. * Returns size if found or negative error code. */ int pci_rebar_get_current_size(struct pci_dev *pdev, int bar) { int pos; u32 ctrl; pos = pci_rebar_find_pos(pdev, bar); if (pos < 0) return pos; pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT; } /** * pci_rebar_set_size - set a new size for a BAR * @pdev: PCI device * @bar: BAR to set size to * @size: new size as defined in the spec (0=1MB, 19=512GB) * * Set the new size of a BAR as defined in the spec. * Returns zero if resizing was successful, error code otherwise. */ int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size) { int pos; u32 ctrl; pos = pci_rebar_find_pos(pdev, bar); if (pos < 0) return pos; pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl); ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE; ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT; pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl); return 0; } /** * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port * @dev: the PCI device * @cap_mask: mask of desired AtomicOp sizes, including one or more of: * PCI_EXP_DEVCAP2_ATOMIC_COMP32 * PCI_EXP_DEVCAP2_ATOMIC_COMP64 * PCI_EXP_DEVCAP2_ATOMIC_COMP128 * * Return 0 if all upstream bridges support AtomicOp routing, egress * blocking is disabled on all upstream ports, and the root port supports * the requested completion capabilities (32-bit, 64-bit and/or 128-bit * AtomicOp completion), or negative otherwise. */ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask) { struct pci_bus *bus = dev->bus; struct pci_dev *bridge; u32 cap, ctl2; /* * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit * in Device Control 2 is reserved in VFs and the PF value applies * to all associated VFs. */ if (dev->is_virtfn) return -EINVAL; if (!pci_is_pcie(dev)) return -EINVAL; /* * Per PCIe r4.0, sec 6.15, endpoints and root ports may be * AtomicOp requesters. For now, we only support endpoints as * requesters and root ports as completers. No endpoints as * completers, and no peer-to-peer. */ switch (pci_pcie_type(dev)) { case PCI_EXP_TYPE_ENDPOINT: case PCI_EXP_TYPE_LEG_END: case PCI_EXP_TYPE_RC_END: break; default: return -EINVAL; } while (bus->parent) { bridge = bus->self; pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap); switch (pci_pcie_type(bridge)) { /* Ensure switch ports support AtomicOp routing */ case PCI_EXP_TYPE_UPSTREAM: case PCI_EXP_TYPE_DOWNSTREAM: if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE)) return -EINVAL; break; /* Ensure root port supports all the sizes we care about */ case PCI_EXP_TYPE_ROOT_PORT: if ((cap & cap_mask) != cap_mask) return -EINVAL; break; } /* Ensure upstream ports don't block AtomicOps on egress */ if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) { pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2); if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK) return -EINVAL; } bus = bus->parent; } pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_ATOMIC_REQ); return 0; } EXPORT_SYMBOL(pci_enable_atomic_ops_to_root); /** * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge * @dev: the PCI device * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD) * * Perform INTx swizzling for a device behind one level of bridge. This is * required by section 9.1 of the PCI-to-PCI bridge specification for devices * behind bridges on add-in cards. For devices with ARI enabled, the slot * number is always 0 (see the Implementation Note in section 2.2.8.1 of * the PCI Express Base Specification, Revision 2.1) */ u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin) { int slot; if (pci_ari_enabled(dev->bus)) slot = 0; else slot = PCI_SLOT(dev->devfn); return (((pin - 1) + slot) % 4) + 1; } int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) { u8 pin; pin = dev->pin; if (!pin) return -1; while (!pci_is_root_bus(dev->bus)) { pin = pci_swizzle_interrupt_pin(dev, pin); dev = dev->bus->self; } *bridge = dev; return pin; } /** * pci_common_swizzle - swizzle INTx all the way to root bridge * @dev: the PCI device * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) * * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI * bridges all the way up to a PCI root bus. */ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) { u8 pin = *pinp; while (!pci_is_root_bus(dev->bus)) { pin = pci_swizzle_interrupt_pin(dev, pin); dev = dev->bus->self; } *pinp = pin; return PCI_SLOT(dev->devfn); } EXPORT_SYMBOL_GPL(pci_common_swizzle); /** * pci_release_region - Release a PCI bar * @pdev: PCI device whose resources were previously reserved by * pci_request_region() * @bar: BAR to release * * Releases the PCI I/O and memory resources previously reserved by a * successful call to pci_request_region(). Call this function only * after all use of the PCI regions has ceased. */ void pci_release_region(struct pci_dev *pdev, int bar) { struct pci_devres *dr; if (pci_resource_len(pdev, bar) == 0) return; if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) release_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar)); else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) release_mem_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar)); dr = find_pci_dr(pdev); if (dr) dr->region_mask &= ~(1 << bar); } EXPORT_SYMBOL(pci_release_region); /** * __pci_request_region - Reserved PCI I/O and memory resource * @pdev: PCI device whose resources are to be reserved * @bar: BAR to be reserved * @res_name: Name to be associated with resource. * @exclusive: whether the region access is exclusive or not * * Mark the PCI region associated with PCI device @pdev BAR @bar as * being reserved by owner @res_name. Do not access any * address inside the PCI regions unless this call returns * successfully. * * If @exclusive is set, then the region is marked so that userspace * is explicitly not allowed to map the resource via /dev/mem or * sysfs MMIO access. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, int exclusive) { struct pci_devres *dr; if (pci_resource_len(pdev, bar) == 0) return 0; if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { if (!request_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar), res_name)) goto err_out; } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { if (!__request_mem_region(pci_resource_start(pdev, bar), pci_resource_len(pdev, bar), res_name, exclusive)) goto err_out; } dr = find_pci_dr(pdev); if (dr) dr->region_mask |= 1 << bar; return 0; err_out: pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar, &pdev->resource[bar]); return -EBUSY; } /** * pci_request_region - Reserve PCI I/O and memory resource * @pdev: PCI device whose resources are to be reserved * @bar: BAR to be reserved * @res_name: Name to be associated with resource * * Mark the PCI region associated with PCI device @pdev BAR @bar as * being reserved by owner @res_name. Do not access any * address inside the PCI regions unless this call returns * successfully. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) { return __pci_request_region(pdev, bar, res_name, 0); } EXPORT_SYMBOL(pci_request_region); /** * pci_release_selected_regions - Release selected PCI I/O and memory resources * @pdev: PCI device whose resources were previously reserved * @bars: Bitmask of BARs to be released * * Release selected PCI I/O and memory resources previously reserved. * Call this function only after all use of the PCI regions has ceased. */ void pci_release_selected_regions(struct pci_dev *pdev, int bars) { int i; for (i = 0; i < PCI_STD_NUM_BARS; i++) if (bars & (1 << i)) pci_release_region(pdev, i); } EXPORT_SYMBOL(pci_release_selected_regions); static int __pci_request_selected_regions(struct pci_dev *pdev, int bars, const char *res_name, int excl) { int i; for (i = 0; i < PCI_STD_NUM_BARS; i++) if (bars & (1 << i)) if (__pci_request_region(pdev, i, res_name, excl)) goto err_out; return 0; err_out: while (--i >= 0) if (bars & (1 << i)) pci_release_region(pdev, i); return -EBUSY; } /** * pci_request_selected_regions - Reserve selected PCI I/O and memory resources * @pdev: PCI device whose resources are to be reserved * @bars: Bitmask of BARs to be requested * @res_name: Name to be associated with resource */ int pci_request_selected_regions(struct pci_dev *pdev, int bars, const char *res_name) { return __pci_request_selected_regions(pdev, bars, res_name, 0); } EXPORT_SYMBOL(pci_request_selected_regions); int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars, const char *res_name) { return __pci_request_selected_regions(pdev, bars, res_name, IORESOURCE_EXCLUSIVE); } EXPORT_SYMBOL(pci_request_selected_regions_exclusive); /** * pci_release_regions - Release reserved PCI I/O and memory resources * @pdev: PCI device whose resources were previously reserved by * pci_request_regions() * * Releases all PCI I/O and memory resources previously reserved by a * successful call to pci_request_regions(). Call this function only * after all use of the PCI regions has ceased. */ void pci_release_regions(struct pci_dev *pdev) { pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1); } EXPORT_SYMBOL(pci_release_regions); /** * pci_request_regions - Reserve PCI I/O and memory resources * @pdev: PCI device whose resources are to be reserved * @res_name: Name to be associated with resource. * * Mark all PCI regions associated with PCI device @pdev as * being reserved by owner @res_name. Do not access any * address inside the PCI regions unless this call returns * successfully. * * Returns 0 on success, or %EBUSY on error. A warning * message is also printed on failure. */ int pci_request_regions(struct pci_dev *pdev, const char *res_name) { return pci_request_selected_regions(pdev, ((1 << PCI_STD_NUM_BARS) - 1), res_name); } EXPORT_SYMBOL(pci_request_regions); /** * pci_request_regions_exclusive - Reserve PCI I/O and memory resources * @pdev: PCI device whose resources are to be reserved * @res_name: Name to be associated with resource. * * Mark all PCI regions associated with PCI device @pdev as being reserved * by owner @res_name. Do not access any address inside the PCI regions * unless this call returns successfully. * * pci_request_regions_exclusive() will mark the region so that /dev/mem * and the sysfs MMIO access will not be allowed. * * Returns 0 on success, or %EBUSY on error. A warning message is also * printed on failure. */ int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) { return pci_request_selected_regions_exclusive(pdev, ((1 << PCI_STD_NUM_BARS) - 1), res_name); } EXPORT_SYMBOL(pci_request_regions_exclusive); /* * Record the PCI IO range (expressed as CPU physical address + size). * Return a negative value if an error has occurred, zero otherwise */ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, resource_size_t size) { int ret = 0; #ifdef PCI_IOBASE struct logic_pio_hwaddr *range; if (!size || addr + size < addr) return -EINVAL; range = kzalloc(sizeof(*range), GFP_ATOMIC); if (!range) return -ENOMEM; range->fwnode = fwnode; range->size = size; range->hw_start = addr; range->flags = LOGIC_PIO_CPU_MMIO; ret = logic_pio_register_range(range); if (ret) kfree(range); /* Ignore duplicates due to deferred probing */ if (ret == -EEXIST) ret = 0; #endif return ret; } phys_addr_t pci_pio_to_address(unsigned long pio) { #ifdef PCI_IOBASE if (pio < MMIO_UPPER_LIMIT) return logic_pio_to_hwaddr(pio); #endif return (phys_addr_t) OF_BAD_ADDR; } EXPORT_SYMBOL_GPL(pci_pio_to_address); unsigned long __weak pci_address_to_pio(phys_addr_t address) { #ifdef PCI_IOBASE return logic_pio_trans_cpuaddr(address); #else if (address > IO_SPACE_LIMIT) return (unsigned long)-1; return (unsigned long) address; #endif } /** * pci_remap_iospace - Remap the memory mapped I/O space * @res: Resource describing the I/O space * @phys_addr: physical address of range to be mapped * * Remap the memory mapped I/O space described by the @res and the CPU * physical address @phys_addr into virtual address space. Only * architectures that have memory mapped IO functions defined (and the * PCI_IOBASE value defined) should call this function. */ #ifndef pci_remap_iospace int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) { #if defined(PCI_IOBASE) && defined(CONFIG_MMU) unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; if (!(res->flags & IORESOURCE_IO)) return -EINVAL; if (res->end > IO_SPACE_LIMIT) return -EINVAL; return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, pgprot_device(PAGE_KERNEL)); #else /* * This architecture does not have memory mapped I/O space, * so this function should never be called */ WARN_ONCE(1, "This architecture does not support memory mapped I/O\n"); return -ENODEV; #endif } EXPORT_SYMBOL(pci_remap_iospace); #endif /** * pci_unmap_iospace - Unmap the memory mapped I/O space * @res: resource to be unmapped * * Unmap the CPU virtual address @res from virtual address space. Only * architectures that have memory mapped IO functions defined (and the * PCI_IOBASE value defined) should call this function. */ void pci_unmap_iospace(struct resource *res) { #if defined(PCI_IOBASE) && defined(CONFIG_MMU) unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start; vunmap_range(vaddr, vaddr + resource_size(res)); #endif } EXPORT_SYMBOL(pci_unmap_iospace); static void devm_pci_unmap_iospace(struct device *dev, void *ptr) { struct resource **res = ptr; pci_unmap_iospace(*res); } /** * devm_pci_remap_iospace - Managed pci_remap_iospace() * @dev: Generic device to remap IO address for * @res: Resource describing the I/O space * @phys_addr: physical address of range to be mapped * * Managed pci_remap_iospace(). Map is automatically unmapped on driver * detach. */ int devm_pci_remap_iospace(struct device *dev, const struct resource *res, phys_addr_t phys_addr) { const struct resource **ptr; int error; ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; error = pci_remap_iospace(res, phys_addr); if (error) { devres_free(ptr); } else { *ptr = res; devres_add(dev, ptr); } return error; } EXPORT_SYMBOL(devm_pci_remap_iospace); /** * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() * @dev: Generic device to remap IO address for * @offset: Resource address to map * @size: Size of map * * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver * detach. */ void __iomem *devm_pci_remap_cfgspace(struct device *dev, resource_size_t offset, resource_size_t size) { void __iomem **ptr, *addr; ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return NULL; addr = pci_remap_cfgspace(offset, size); if (addr) { *ptr = addr; devres_add(dev, ptr); } else devres_free(ptr); return addr; } EXPORT_SYMBOL(devm_pci_remap_cfgspace); /** * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource * @dev: generic device to handle the resource for * @res: configuration space resource to be handled * * Checks that a resource is a valid memory region, requests the memory * region and ioremaps with pci_remap_cfgspace() API that ensures the * proper PCI configuration space memory attributes are guaranteed. * * All operations are managed and will be undone on driver detach. * * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code * on failure. Usage example:: * * res = platform_get_resource(pdev, IORESOURCE_MEM, 0); * base = devm_pci_remap_cfg_resource(&pdev->dev, res); * if (IS_ERR(base)) * return PTR_ERR(base); */ void __iomem *devm_pci_remap_cfg_resource(struct device *dev, struct resource *res) { resource_size_t size; const char *name; void __iomem *dest_ptr; BUG_ON(!dev); if (!res || resource_type(res) != IORESOURCE_MEM) { dev_err(dev, "invalid resource\n"); return IOMEM_ERR_PTR(-EINVAL); } size = resource_size(res); if (res->name) name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev), res->name); else name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL); if (!name) return IOMEM_ERR_PTR(-ENOMEM); if (!devm_request_mem_region(dev, res->start, size, name)) { dev_err(dev, "can't request region for resource %pR\n", res); return IOMEM_ERR_PTR(-EBUSY); } dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size); if (!dest_ptr) { dev_err(dev, "ioremap failed for resource %pR\n", res); devm_release_mem_region(dev, res->start, size); dest_ptr = IOMEM_ERR_PTR(-ENOMEM); } return dest_ptr; } EXPORT_SYMBOL(devm_pci_remap_cfg_resource); static void __pci_set_master(struct pci_dev *dev, bool enable) { u16 old_cmd, cmd; pci_read_config_word(dev, PCI_COMMAND, &old_cmd); if (enable) cmd = old_cmd | PCI_COMMAND_MASTER; else cmd = old_cmd & ~PCI_COMMAND_MASTER; if (cmd != old_cmd) { pci_dbg(dev, "%s bus mastering\n", enable ? "enabling" : "disabling"); pci_write_config_word(dev, PCI_COMMAND, cmd); } dev->is_busmaster = enable; } /** * pcibios_setup - process "pci=" kernel boot arguments * @str: string used to pass in "pci=" kernel boot arguments * * Process kernel boot arguments. This is the default implementation. * Architecture specific implementations can override this as necessary. */ char * __weak __init pcibios_setup(char *str) { return str; } /** * pcibios_set_master - enable PCI bus-mastering for device dev * @dev: the PCI device to enable * * Enables PCI bus-mastering for the device. This is the default * implementation. Architecture specific implementations can override * this if necessary. */ void __weak pcibios_set_master(struct pci_dev *dev) { u8 lat; /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */ if (pci_is_pcie(dev)) return; pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); if (lat < 16) lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency; else if (lat > pcibios_max_latency) lat = pcibios_max_latency; else return; pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat); } /** * pci_set_master - enables bus-mastering for device dev * @dev: the PCI device to enable * * Enables bus-mastering on the device and calls pcibios_set_master() * to do the needed arch specific settings. */ void pci_set_master(struct pci_dev *dev) { __pci_set_master(dev, true); pcibios_set_master(dev); } EXPORT_SYMBOL(pci_set_master); /** * pci_clear_master - disables bus-mastering for device dev * @dev: the PCI device to disable */ void pci_clear_master(struct pci_dev *dev) { __pci_set_master(dev, false); } EXPORT_SYMBOL(pci_clear_master); /** * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed * @dev: the PCI device for which MWI is to be enabled * * Helper function for pci_set_mwi. * Originally copied from drivers/net/acenic.c. * Copyright 1998-2001 by Jes Sorensen, <[email protected]>. * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ int pci_set_cacheline_size(struct pci_dev *dev) { u8 cacheline_size; if (!pci_cache_line_size) return -EINVAL; /* Validate current setting: the PCI_CACHE_LINE_SIZE must be equal to or multiple of the right value. */ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); if (cacheline_size >= pci_cache_line_size && (cacheline_size % pci_cache_line_size) == 0) return 0; /* Write the correct value. */ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); /* Read it back. */ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); if (cacheline_size == pci_cache_line_size) return 0; pci_dbg(dev, "cache line size of %d is not supported\n", pci_cache_line_size << 2); return -EINVAL; } EXPORT_SYMBOL_GPL(pci_set_cacheline_size); /** * pci_set_mwi - enables memory-write-invalidate PCI transaction * @dev: the PCI device for which MWI is enabled * * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ int pci_set_mwi(struct pci_dev *dev) { #ifdef PCI_DISABLE_MWI return 0; #else int rc; u16 cmd; rc = pci_set_cacheline_size(dev); if (rc) return rc; pci_read_config_word(dev, PCI_COMMAND, &cmd); if (!(cmd & PCI_COMMAND_INVALIDATE)) { pci_dbg(dev, "enabling Mem-Wr-Inval\n"); cmd |= PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; #endif } EXPORT_SYMBOL(pci_set_mwi); /** * pcim_set_mwi - a device-managed pci_set_mwi() * @dev: the PCI device for which MWI is enabled * * Managed pci_set_mwi(). * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ int pcim_set_mwi(struct pci_dev *dev) { struct pci_devres *dr; dr = find_pci_dr(dev); if (!dr) return -ENOMEM; dr->mwi = 1; return pci_set_mwi(dev); } EXPORT_SYMBOL(pcim_set_mwi); /** * pci_try_set_mwi - enables memory-write-invalidate PCI transaction * @dev: the PCI device for which MWI is enabled * * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. * Callers are not required to check the return value. * * RETURNS: An appropriate -ERRNO error value on error, or zero for success. */ int pci_try_set_mwi(struct pci_dev *dev) { #ifdef PCI_DISABLE_MWI return 0; #else return pci_set_mwi(dev); #endif } EXPORT_SYMBOL(pci_try_set_mwi); /** * pci_clear_mwi - disables Memory-Write-Invalidate for device dev * @dev: the PCI device to disable * * Disables PCI Memory-Write-Invalidate transaction on the device */ void pci_clear_mwi(struct pci_dev *dev) { #ifndef PCI_DISABLE_MWI u16 cmd; pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & PCI_COMMAND_INVALIDATE) { cmd &= ~PCI_COMMAND_INVALIDATE; pci_write_config_word(dev, PCI_COMMAND, cmd); } #endif } EXPORT_SYMBOL(pci_clear_mwi); /** * pci_disable_parity - disable parity checking for device * @dev: the PCI device to operate on * * Disable parity checking for device @dev */ void pci_disable_parity(struct pci_dev *dev) { u16 cmd; pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & PCI_COMMAND_PARITY) { cmd &= ~PCI_COMMAND_PARITY; pci_write_config_word(dev, PCI_COMMAND, cmd); } } /** * pci_intx - enables/disables PCI INTx for device dev * @pdev: the PCI device to operate on * @enable: boolean: whether to enable or disable PCI INTx * * Enables/disables PCI INTx for device @pdev */ void pci_intx(struct pci_dev *pdev, int enable) { u16 pci_command, new; pci_read_config_word(pdev, PCI_COMMAND, &pci_command); if (enable) new = pci_command & ~PCI_COMMAND_INTX_DISABLE; else new = pci_command | PCI_COMMAND_INTX_DISABLE; if (new != pci_command) { struct pci_devres *dr; pci_write_config_word(pdev, PCI_COMMAND, new); dr = find_pci_dr(pdev); if (dr && !dr->restore_intx) { dr->restore_intx = 1; dr->orig_intx = !enable; } } } EXPORT_SYMBOL_GPL(pci_intx); static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) { struct pci_bus *bus = dev->bus; bool mask_updated = true; u32 cmd_status_dword; u16 origcmd, newcmd; unsigned long flags; bool irq_pending; /* * We do a single dword read to retrieve both command and status. * Document assumptions that make this possible. */ BUILD_BUG_ON(PCI_COMMAND % 4); BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS); raw_spin_lock_irqsave(&pci_lock, flags); bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword); irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT; /* * Check interrupt status register to see whether our device * triggered the interrupt (when masking) or the next IRQ is * already pending (when unmasking). */ if (mask != irq_pending) { mask_updated = false; goto done; } origcmd = cmd_status_dword; newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE; if (mask) newcmd |= PCI_COMMAND_INTX_DISABLE; if (newcmd != origcmd) bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd); done: raw_spin_unlock_irqrestore(&pci_lock, flags); return mask_updated; } /** * pci_check_and_mask_intx - mask INTx on pending interrupt * @dev: the PCI device to operate on * * Check if the device dev has its INTx line asserted, mask it and return * true in that case. False is returned if no interrupt was pending. */ bool pci_check_and_mask_intx(struct pci_dev *dev) { return pci_check_and_set_intx_mask(dev, true); } EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); /** * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending * @dev: the PCI device to operate on * * Check if the device dev has its INTx line asserted, unmask it if not and * return true. False is returned and the mask remains active if there was * still an interrupt pending. */ bool pci_check_and_unmask_intx(struct pci_dev *dev) { return pci_check_and_set_intx_mask(dev, false); } EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); /** * pci_wait_for_pending_transaction - wait for pending transaction * @dev: the PCI device to operate on * * Return 0 if transaction is pending 1 otherwise. */ int pci_wait_for_pending_transaction(struct pci_dev *dev) { if (!pci_is_pcie(dev)) return 1; return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA, PCI_EXP_DEVSTA_TRPND); } EXPORT_SYMBOL(pci_wait_for_pending_transaction); /** * pcie_flr - initiate a PCIe function level reset * @dev: device to reset * * Initiate a function level reset unconditionally on @dev without * checking any flags and DEVCAP */ int pcie_flr(struct pci_dev *dev) { if (!pci_wait_for_pending_transaction(dev)) pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n"); pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR); if (dev->imm_ready) return 0; /* * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within * 100ms, but may silently discard requests while the FLR is in * progress. Wait 100ms before trying to access the device. */ msleep(100); return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS); } EXPORT_SYMBOL_GPL(pcie_flr); /** * pcie_reset_flr - initiate a PCIe function level reset * @dev: device to reset * @probe: if true, return 0 if device can be reset this way * * Initiate a function level reset on @dev. */ int pcie_reset_flr(struct pci_dev *dev, bool probe) { if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) return -ENOTTY; if (!(dev->devcap & PCI_EXP_DEVCAP_FLR)) return -ENOTTY; if (probe) return 0; return pcie_flr(dev); } EXPORT_SYMBOL_GPL(pcie_reset_flr); static int pci_af_flr(struct pci_dev *dev, bool probe) { int pos; u8 cap; pos = pci_find_capability(dev, PCI_CAP_ID_AF); if (!pos) return -ENOTTY; if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET) return -ENOTTY; pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) return -ENOTTY; if (probe) return 0; /* * Wait for Transaction Pending bit to clear. A word-aligned test * is used, so we use the control offset rather than status and shift * the test bit to match. */ if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL, PCI_AF_STATUS_TP << 8)) pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n"); pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); if (dev->imm_ready) return 0; /* * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006, * updated 27 July 2006; a device must complete an FLR within * 100ms, but may silently discard requests while the FLR is in * progress. Wait 100ms before trying to access the device. */ msleep(100); return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS); } /** * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0. * @dev: Device to reset. * @probe: if true, return 0 if the device can be reset this way. * * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is * unset, it will be reinitialized internally when going from PCI_D3hot to * PCI_D0. If that's the case and the device is not in a low-power state * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset. * * NOTE: This causes the caller to sleep for twice the device power transition * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms * by default (i.e. unless the @dev's d3hot_delay field has a different value). * Moreover, only devices in D0 can be reset by this function. */ static int pci_pm_reset(struct pci_dev *dev, bool probe) { u16 csr; if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET) return -ENOTTY; pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); if (csr & PCI_PM_CTRL_NO_SOFT_RESET) return -ENOTTY; if (probe) return 0; if (dev->current_state != PCI_D0) return -EINVAL; csr &= ~PCI_PM_CTRL_STATE_MASK; csr |= PCI_D3hot; pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); pci_dev_d3_sleep(dev); csr &= ~PCI_PM_CTRL_STATE_MASK; csr |= PCI_D0; pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); pci_dev_d3_sleep(dev); return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS); } /** * pcie_wait_for_link_status - Wait for link status change * @pdev: Device whose link to wait for. * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE. * @active: Waiting for active or inactive? * * Return 0 if successful, or -ETIMEDOUT if status has not changed within * PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds. */ static int pcie_wait_for_link_status(struct pci_dev *pdev, bool use_lt, bool active) { u16 lnksta_mask, lnksta_match; unsigned long end_jiffies; u16 lnksta; lnksta_mask = use_lt ? PCI_EXP_LNKSTA_LT : PCI_EXP_LNKSTA_DLLLA; lnksta_match = active ? lnksta_mask : 0; end_jiffies = jiffies + msecs_to_jiffies(PCIE_LINK_RETRAIN_TIMEOUT_MS); do { pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta); if ((lnksta & lnksta_mask) == lnksta_match) return 0; msleep(1); } while (time_before(jiffies, end_jiffies)); return -ETIMEDOUT; } /** * pcie_retrain_link - Request a link retrain and wait for it to complete * @pdev: Device whose link to retrain. * @use_lt: Use the LT bit if TRUE, or the DLLLA bit if FALSE, for status. * * Retrain completion status is retrieved from the Link Status Register * according to @use_lt. It is not verified whether the use of the DLLLA * bit is valid. * * Return 0 if successful, or -ETIMEDOUT if training has not completed * within PCIE_LINK_RETRAIN_TIMEOUT_MS milliseconds. */ int pcie_retrain_link(struct pci_dev *pdev, bool use_lt) { int rc; /* * Ensure the updated LNKCTL parameters are used during link * training by checking that there is no ongoing link training to * avoid LTSSM race as recommended in Implementation Note at the * end of PCIe r6.0.1 sec 7.5.3.7. */ rc = pcie_wait_for_link_status(pdev, use_lt, !use_lt); if (rc) return rc; pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL); if (pdev->clear_retrain_link) { /* * Due to an erratum in some devices the Retrain Link bit * needs to be cleared again manually to allow the link * training to succeed. */ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_RL); } return pcie_wait_for_link_status(pdev, use_lt, !use_lt); } /** * pcie_wait_for_link_delay - Wait until link is active or inactive * @pdev: Bridge device * @active: waiting for active or inactive? * @delay: Delay to wait after link has become active (in ms) * * Use this to wait till link becomes active or inactive. */ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay) { int rc; /* * Some controllers might not implement link active reporting. In this * case, we wait for 1000 ms + any delay requested by the caller. */ if (!pdev->link_active_reporting) { msleep(PCIE_LINK_RETRAIN_TIMEOUT_MS + delay); return true; } /* * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms, * after which we should expect an link active if the reset was * successful. If so, software must wait a minimum 100ms before sending * configuration requests to devices downstream this port. * * If the link fails to activate, either the device was physically * removed or the link is permanently failed. */ if (active) msleep(20); rc = pcie_wait_for_link_status(pdev, false, active); if (active) { if (rc) rc = pcie_failed_link_retrain(pdev); if (rc) return false; msleep(delay); return true; } if (rc) return false; return true; } /** * pcie_wait_for_link - Wait until link is active or inactive * @pdev: Bridge device * @active: waiting for active or inactive? * * Use this to wait till link becomes active or inactive. */ bool pcie_wait_for_link(struct pci_dev *pdev, bool active) { return pcie_wait_for_link_delay(pdev, active, 100); } /* * Find maximum D3cold delay required by all the devices on the bus. The * spec says 100 ms, but firmware can lower it and we allow drivers to * increase it as well. * * Called with @pci_bus_sem locked for reading. */ static int pci_bus_max_d3cold_delay(const struct pci_bus *bus) { const struct pci_dev *pdev; int min_delay = 100; int max_delay = 0; list_for_each_entry(pdev, &bus->devices, bus_list) { if (pdev->d3cold_delay < min_delay) min_delay = pdev->d3cold_delay; if (pdev->d3cold_delay > max_delay) max_delay = pdev->d3cold_delay; } return max(min_delay, max_delay); } /** * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible * @dev: PCI bridge * @reset_type: reset type in human-readable form * * Handle necessary delays before access to the devices on the secondary * side of the bridge are permitted after D3cold to D0 transition * or Conventional Reset. * * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section * 4.3.2. * * Return 0 on success or -ENOTTY if the first device on the secondary bus * failed to become accessible. */ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) { struct pci_dev *child; int delay; if (pci_dev_is_disconnected(dev)) return 0; if (!pci_is_bridge(dev)) return 0; down_read(&pci_bus_sem); /* * We only deal with devices that are present currently on the bus. * For any hot-added devices the access delay is handled in pciehp * board_added(). In case of ACPI hotplug the firmware is expected * to configure the devices before OS is notified. */ if (!dev->subordinate || list_empty(&dev->subordinate->devices)) { up_read(&pci_bus_sem); return 0; } /* Take d3cold_delay requirements into account */ delay = pci_bus_max_d3cold_delay(dev->subordinate); if (!delay) { up_read(&pci_bus_sem); return 0; } child = list_first_entry(&dev->subordinate->devices, struct pci_dev, bus_list); up_read(&pci_bus_sem); /* * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before * accessing the device after reset (that is 1000 ms + 100 ms). */ if (!pci_is_pcie(dev)) { pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay); msleep(1000 + delay); return 0; } /* * For PCIe downstream and root ports that do not support speeds * greater than 5 GT/s need to wait minimum 100 ms. For higher * speeds (gen3) we need to wait first for the data link layer to * become active. * * However, 100 ms is the minimum and the PCIe spec says the * software must allow at least 1s before it can determine that the * device that did not respond is a broken device. Also device can * take longer than that to respond if it indicates so through Request * Retry Status completions. * * Therefore we wait for 100 ms and check for the device presence * until the timeout expires. */ if (!pcie_downstream_port(dev)) return 0; if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) { u16 status; pci_dbg(dev, "waiting %d ms for downstream link\n", delay); msleep(delay); if (!pci_dev_wait(child, reset_type, PCI_RESET_WAIT - delay)) return 0; /* * If the port supports active link reporting we now check * whether the link is active and if not bail out early with * the assumption that the device is not present anymore. */ if (!dev->link_active_reporting) return -ENOTTY; pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &status); if (!(status & PCI_EXP_LNKSTA_DLLLA)) return -ENOTTY; return pci_dev_wait(child, reset_type, PCIE_RESET_READY_POLL_MS - PCI_RESET_WAIT); } pci_dbg(dev, "waiting %d ms for downstream link, after activation\n", delay); if (!pcie_wait_for_link_delay(dev, true, delay)) { /* Did not train, no need to wait any further */ pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n"); return -ENOTTY; } return pci_dev_wait(child, reset_type, PCIE_RESET_READY_POLL_MS - delay); } void pci_reset_secondary_bus(struct pci_dev *dev) { u16 ctrl; pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); ctrl |= PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); /* * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double * this to 2ms to ensure that we meet the minimum requirement. */ msleep(2); ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); } void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) { pci_reset_secondary_bus(dev); } /** * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge. * @dev: Bridge device * * Use the bridge control register to assert reset on the secondary bus. * Devices on the secondary bus are left in power-on state. */ int pci_bridge_secondary_bus_reset(struct pci_dev *dev) { pcibios_reset_secondary_bus(dev); return pci_bridge_wait_for_secondary_bus(dev, "bus reset"); } EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset); static int pci_parent_bus_reset(struct pci_dev *dev, bool probe) { struct pci_dev *pdev; if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) return -ENOTTY; list_for_each_entry(pdev, &dev->bus->devices, bus_list) if (pdev != dev) return -ENOTTY; if (probe) return 0; return pci_bridge_secondary_bus_reset(dev->bus->self); } static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe) { int rc = -ENOTTY; if (!hotplug || !try_module_get(hotplug->owner)) return rc; if (hotplug->ops->reset_slot) rc = hotplug->ops->reset_slot(hotplug, probe); module_put(hotplug->owner); return rc; } static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe) { if (dev->multifunction || dev->subordinate || !dev->slot || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET) return -ENOTTY; return pci_reset_hotplug_slot(dev->slot->hotplug, probe); } static int pci_reset_bus_function(struct pci_dev *dev, bool probe) { int rc; rc = pci_dev_reset_slot_function(dev, probe); if (rc != -ENOTTY) return rc; return pci_parent_bus_reset(dev, probe); } void pci_dev_lock(struct pci_dev *dev) { /* block PM suspend, driver probe, etc. */ device_lock(&dev->dev); pci_cfg_access_lock(dev); } EXPORT_SYMBOL_GPL(pci_dev_lock); /* Return 1 on successful lock, 0 on contention */ int pci_dev_trylock(struct pci_dev *dev) { if (device_trylock(&dev->dev)) { if (pci_cfg_access_trylock(dev)) return 1; device_unlock(&dev->dev); } return 0; } EXPORT_SYMBOL_GPL(pci_dev_trylock); void pci_dev_unlock(struct pci_dev *dev) { pci_cfg_access_unlock(dev); device_unlock(&dev->dev); } EXPORT_SYMBOL_GPL(pci_dev_unlock); static void pci_dev_save_and_disable(struct pci_dev *dev) { const struct pci_error_handlers *err_handler = dev->driver ? dev->driver->err_handler : NULL; /* * dev->driver->err_handler->reset_prepare() is protected against * races with ->remove() by the device lock, which must be held by * the caller. */ if (err_handler && err_handler->reset_prepare) err_handler->reset_prepare(dev); /* * Wake-up device prior to save. PM registers default to D0 after * reset and a simple register restore doesn't reliably return * to a non-D0 state anyway. */ pci_set_power_state(dev, PCI_D0); pci_save_state(dev); /* * Disable the device by clearing the Command register, except for * INTx-disable which is set. This not only disables MMIO and I/O port * BARs, but also prevents the device from being Bus Master, preventing * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3 * compliant devices, INTx-disable prevents legacy interrupts. */ pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); } static void pci_dev_restore(struct pci_dev *dev) { const struct pci_error_handlers *err_handler = dev->driver ? dev->driver->err_handler : NULL; pci_restore_state(dev); /* * dev->driver->err_handler->reset_done() is protected against * races with ->remove() by the device lock, which must be held by * the caller. */ if (err_handler && err_handler->reset_done) err_handler->reset_done(dev); } /* dev->reset_methods[] is a 0-terminated list of indices into this array */ static const struct pci_reset_fn_method pci_reset_fn_methods[] = { { }, { pci_dev_specific_reset, .name = "device_specific" }, { pci_dev_acpi_reset, .name = "acpi" }, { pcie_reset_flr, .name = "flr" }, { pci_af_flr, .name = "af_flr" }, { pci_pm_reset, .name = "pm" }, { pci_reset_bus_function, .name = "bus" }, }; static ssize_t reset_method_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); ssize_t len = 0; int i, m; for (i = 0; i < PCI_NUM_RESET_METHODS; i++) { m = pdev->reset_methods[i]; if (!m) break; len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "", pci_reset_fn_methods[m].name); } if (len) len += sysfs_emit_at(buf, len, "\n"); return len; } static int reset_method_lookup(const char *name) { int m; for (m = 1; m < PCI_NUM_RESET_METHODS; m++) { if (sysfs_streq(name, pci_reset_fn_methods[m].name)) return m; } return 0; /* not found */ } static ssize_t reset_method_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); char *options, *name; int m, n; u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 }; if (sysfs_streq(buf, "")) { pdev->reset_methods[0] = 0; pci_warn(pdev, "All device reset methods disabled by user"); return count; } if (sysfs_streq(buf, "default")) { pci_init_reset_methods(pdev); return count; } options = kstrndup(buf, count, GFP_KERNEL); if (!options) return -ENOMEM; n = 0; while ((name = strsep(&options, " ")) != NULL) { if (sysfs_streq(name, "")) continue; name = strim(name); m = reset_method_lookup(name); if (!m) { pci_err(pdev, "Invalid reset method '%s'", name); goto error; } if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) { pci_err(pdev, "Unsupported reset method '%s'", name); goto error; } if (n == PCI_NUM_RESET_METHODS - 1) { pci_err(pdev, "Too many reset methods\n"); goto error; } reset_methods[n++] = m; } reset_methods[n] = 0; /* Warn if dev-specific supported but not highest priority */ if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 && reset_methods[0] != 1) pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user"); memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods)); kfree(options); return count; error: /* Leave previous methods unchanged */ kfree(options); return -EINVAL; } static DEVICE_ATTR_RW(reset_method); static struct attribute *pci_dev_reset_method_attrs[] = { &dev_attr_reset_method.attr, NULL, }; static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj, struct attribute *a, int n) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); if (!pci_reset_supported(pdev)) return 0; return a->mode; } const struct attribute_group pci_dev_reset_method_attr_group = { .attrs = pci_dev_reset_method_attrs, .is_visible = pci_dev_reset_method_attr_is_visible, }; /** * __pci_reset_function_locked - reset a PCI device function while holding * the @dev mutex lock. * @dev: PCI device to reset * * Some devices allow an individual function to be reset without affecting * other functions in the same device. The PCI device must be responsive * to PCI config space in order to use this function. * * The device function is presumed to be unused and the caller is holding * the device mutex lock when this function is called. * * Resetting the device will make the contents of PCI configuration space * random, so any caller of this must be prepared to reinitialise the * device including MSI, bus mastering, BARs, decoding IO and memory spaces, * etc. * * Returns 0 if the device function was successfully reset or negative if the * device doesn't support resetting a single function. */ int __pci_reset_function_locked(struct pci_dev *dev) { int i, m, rc; might_sleep(); /* * A reset method returns -ENOTTY if it doesn't support this device and * we should try the next method. * * If it returns 0 (success), we're finished. If it returns any other * error, we're also finished: this indicates that further reset * mechanisms might be broken on the device. */ for (i = 0; i < PCI_NUM_RESET_METHODS; i++) { m = dev->reset_methods[i]; if (!m) return -ENOTTY; rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET); if (!rc) return 0; if (rc != -ENOTTY) return rc; } return -ENOTTY; } EXPORT_SYMBOL_GPL(__pci_reset_function_locked); /** * pci_init_reset_methods - check whether device can be safely reset * and store supported reset mechanisms. * @dev: PCI device to check for reset mechanisms * * Some devices allow an individual function to be reset without affecting * other functions in the same device. The PCI device must be in D0-D3hot * state. * * Stores reset mechanisms supported by device in reset_methods byte array * which is a member of struct pci_dev. */ void pci_init_reset_methods(struct pci_dev *dev) { int m, i, rc; BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS); might_sleep(); i = 0; for (m = 1; m < PCI_NUM_RESET_METHODS; m++) { rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE); if (!rc) dev->reset_methods[i++] = m; else if (rc != -ENOTTY) break; } dev->reset_methods[i] = 0; } /** * pci_reset_function - quiesce and reset a PCI device function * @dev: PCI device to reset * * Some devices allow an individual function to be reset without affecting * other functions in the same device. The PCI device must be responsive * to PCI config space in order to use this function. * * This function does not just reset the PCI portion of a device, but * clears all the state associated with the device. This function differs * from __pci_reset_function_locked() in that it saves and restores device state * over the reset and takes the PCI device lock. * * Returns 0 if the device function was successfully reset or negative if the * device doesn't support resetting a single function. */ int pci_reset_function(struct pci_dev *dev) { int rc; if (!pci_reset_supported(dev)) return -ENOTTY; pci_dev_lock(dev); pci_dev_save_and_disable(dev); rc = __pci_reset_function_locked(dev); pci_dev_restore(dev); pci_dev_unlock(dev); return rc; } EXPORT_SYMBOL_GPL(pci_reset_function); /** * pci_reset_function_locked - quiesce and reset a PCI device function * @dev: PCI device to reset * * Some devices allow an individual function to be reset without affecting * other functions in the same device. The PCI device must be responsive * to PCI config space in order to use this function. * * This function does not just reset the PCI portion of a device, but * clears all the state associated with the device. This function differs * from __pci_reset_function_locked() in that it saves and restores device state * over the reset. It also differs from pci_reset_function() in that it * requires the PCI device lock to be held. * * Returns 0 if the device function was successfully reset or negative if the * device doesn't support resetting a single function. */ int pci_reset_function_locked(struct pci_dev *dev) { int rc; if (!pci_reset_supported(dev)) return -ENOTTY; pci_dev_save_and_disable(dev); rc = __pci_reset_function_locked(dev); pci_dev_restore(dev); return rc; } EXPORT_SYMBOL_GPL(pci_reset_function_locked); /** * pci_try_reset_function - quiesce and reset a PCI device function * @dev: PCI device to reset * * Same as above, except return -EAGAIN if unable to lock device. */ int pci_try_reset_function(struct pci_dev *dev) { int rc; if (!pci_reset_supported(dev)) return -ENOTTY; if (!pci_dev_trylock(dev)) return -EAGAIN; pci_dev_save_and_disable(dev); rc = __pci_reset_function_locked(dev); pci_dev_restore(dev); pci_dev_unlock(dev); return rc; } EXPORT_SYMBOL_GPL(pci_try_reset_function); /* Do any devices on or below this bus prevent a bus reset? */ static bool pci_bus_resettable(struct pci_bus *bus) { struct pci_dev *dev; if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) return false; list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; } return true; } /* Lock devices from the top of the tree down */ static void pci_bus_lock(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { pci_dev_lock(dev); if (dev->subordinate) pci_bus_lock(dev->subordinate); } } /* Unlock devices from the bottom of the tree up */ static void pci_bus_unlock(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->subordinate) pci_bus_unlock(dev->subordinate); pci_dev_unlock(dev); } } /* Return 1 on successful lock, 0 on contention */ static int pci_bus_trylock(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { if (!pci_dev_trylock(dev)) goto unlock; if (dev->subordinate) { if (!pci_bus_trylock(dev->subordinate)) { pci_dev_unlock(dev); goto unlock; } } } return 1; unlock: list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) { if (dev->subordinate) pci_bus_unlock(dev->subordinate); pci_dev_unlock(dev); } return 0; } /* Do any devices on or below this slot prevent a bus reset? */ static bool pci_slot_resettable(struct pci_slot *slot) { struct pci_dev *dev; if (slot->bus->self && (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)) return false; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET || (dev->subordinate && !pci_bus_resettable(dev->subordinate))) return false; } return true; } /* Lock devices from the top of the tree down */ static void pci_slot_lock(struct pci_slot *slot) { struct pci_dev *dev; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; pci_dev_lock(dev); if (dev->subordinate) pci_bus_lock(dev->subordinate); } } /* Unlock devices from the bottom of the tree up */ static void pci_slot_unlock(struct pci_slot *slot) { struct pci_dev *dev; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; if (dev->subordinate) pci_bus_unlock(dev->subordinate); pci_dev_unlock(dev); } } /* Return 1 on successful lock, 0 on contention */ static int pci_slot_trylock(struct pci_slot *slot) { struct pci_dev *dev; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; if (!pci_dev_trylock(dev)) goto unlock; if (dev->subordinate) { if (!pci_bus_trylock(dev->subordinate)) { pci_dev_unlock(dev); goto unlock; } } } return 1; unlock: list_for_each_entry_continue_reverse(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; if (dev->subordinate) pci_bus_unlock(dev->subordinate); pci_dev_unlock(dev); } return 0; } /* * Save and disable devices from the top of the tree down while holding * the @dev mutex lock for the entire tree. */ static void pci_bus_save_and_disable_locked(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { pci_dev_save_and_disable(dev); if (dev->subordinate) pci_bus_save_and_disable_locked(dev->subordinate); } } /* * Restore devices from top of the tree down while holding @dev mutex lock * for the entire tree. Parent bridges need to be restored before we can * get to subordinate devices. */ static void pci_bus_restore_locked(struct pci_bus *bus) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { pci_dev_restore(dev); if (dev->subordinate) pci_bus_restore_locked(dev->subordinate); } } /* * Save and disable devices from the top of the tree down while holding * the @dev mutex lock for the entire tree. */ static void pci_slot_save_and_disable_locked(struct pci_slot *slot) { struct pci_dev *dev; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; pci_dev_save_and_disable(dev); if (dev->subordinate) pci_bus_save_and_disable_locked(dev->subordinate); } } /* * Restore devices from top of the tree down while holding @dev mutex lock * for the entire tree. Parent bridges need to be restored before we can * get to subordinate devices. */ static void pci_slot_restore_locked(struct pci_slot *slot) { struct pci_dev *dev; list_for_each_entry(dev, &slot->bus->devices, bus_list) { if (!dev->slot || dev->slot != slot) continue; pci_dev_restore(dev); if (dev->subordinate) pci_bus_restore_locked(dev->subordinate); } } static int pci_slot_reset(struct pci_slot *slot, bool probe) { int rc; if (!slot || !pci_slot_resettable(slot)) return -ENOTTY; if (!probe) pci_slot_lock(slot); might_sleep(); rc = pci_reset_hotplug_slot(slot->hotplug, probe); if (!probe) pci_slot_unlock(slot); return rc; } /** * pci_probe_reset_slot - probe whether a PCI slot can be reset * @slot: PCI slot to probe * * Return 0 if slot can be reset, negative if a slot reset is not supported. */ int pci_probe_reset_slot(struct pci_slot *slot) { return pci_slot_reset(slot, PCI_RESET_PROBE); } EXPORT_SYMBOL_GPL(pci_probe_reset_slot); /** * __pci_reset_slot - Try to reset a PCI slot * @slot: PCI slot to reset * * A PCI bus may host multiple slots, each slot may support a reset mechanism * independent of other slots. For instance, some slots may support slot power * control. In the case of a 1:1 bus to slot architecture, this function may * wrap the bus reset to avoid spurious slot related events such as hotplug. * Generally a slot reset should be attempted before a bus reset. All of the * function of the slot and any subordinate buses behind the slot are reset * through this function. PCI config space of all devices in the slot and * behind the slot is saved before and restored after reset. * * Same as above except return -EAGAIN if the slot cannot be locked */ static int __pci_reset_slot(struct pci_slot *slot) { int rc; rc = pci_slot_reset(slot, PCI_RESET_PROBE); if (rc) return rc; if (pci_slot_trylock(slot)) { pci_slot_save_and_disable_locked(slot); might_sleep(); rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET); pci_slot_restore_locked(slot); pci_slot_unlock(slot); } else rc = -EAGAIN; return rc; } static int pci_bus_reset(struct pci_bus *bus, bool probe) { int ret; if (!bus->self || !pci_bus_resettable(bus)) return -ENOTTY; if (probe) return 0; pci_bus_lock(bus); might_sleep(); ret = pci_bridge_secondary_bus_reset(bus->self); pci_bus_unlock(bus); return ret; } /** * pci_bus_error_reset - reset the bridge's subordinate bus * @bridge: The parent device that connects to the bus to reset * * This function will first try to reset the slots on this bus if the method is * available. If slot reset fails or is not available, this will fall back to a * secondary bus reset. */ int pci_bus_error_reset(struct pci_dev *bridge) { struct pci_bus *bus = bridge->subordinate; struct pci_slot *slot; if (!bus) return -ENOTTY; mutex_lock(&pci_slot_mutex); if (list_empty(&bus->slots)) goto bus_reset; list_for_each_entry(slot, &bus->slots, list) if (pci_probe_reset_slot(slot)) goto bus_reset; list_for_each_entry(slot, &bus->slots, list) if (pci_slot_reset(slot, PCI_RESET_DO_RESET)) goto bus_reset; mutex_unlock(&pci_slot_mutex); return 0; bus_reset: mutex_unlock(&pci_slot_mutex); return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET); } /** * pci_probe_reset_bus - probe whether a PCI bus can be reset * @bus: PCI bus to probe * * Return 0 if bus can be reset, negative if a bus reset is not supported. */ int pci_probe_reset_bus(struct pci_bus *bus) { return pci_bus_reset(bus, PCI_RESET_PROBE); } EXPORT_SYMBOL_GPL(pci_probe_reset_bus); /** * __pci_reset_bus - Try to reset a PCI bus * @bus: top level PCI bus to reset * * Same as above except return -EAGAIN if the bus cannot be locked */ static int __pci_reset_bus(struct pci_bus *bus) { int rc; rc = pci_bus_reset(bus, PCI_RESET_PROBE); if (rc) return rc; if (pci_bus_trylock(bus)) { pci_bus_save_and_disable_locked(bus); might_sleep(); rc = pci_bridge_secondary_bus_reset(bus->self); pci_bus_restore_locked(bus); pci_bus_unlock(bus); } else rc = -EAGAIN; return rc; } /** * pci_reset_bus - Try to reset a PCI bus * @pdev: top level PCI device to reset via slot/bus * * Same as above except return -EAGAIN if the bus cannot be locked */ int pci_reset_bus(struct pci_dev *pdev) { return (!pci_probe_reset_slot(pdev->slot)) ? __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus); } EXPORT_SYMBOL_GPL(pci_reset_bus); /** * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count * @dev: PCI device to query * * Returns mmrbc: maximum designed memory read count in bytes or * appropriate error value. */ int pcix_get_max_mmrbc(struct pci_dev *dev) { int cap; u32 stat; cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!cap) return -EINVAL; if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) return -EINVAL; return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); } EXPORT_SYMBOL(pcix_get_max_mmrbc); /** * pcix_get_mmrbc - get PCI-X maximum memory read byte count * @dev: PCI device to query * * Returns mmrbc: maximum memory read count in bytes or appropriate error * value. */ int pcix_get_mmrbc(struct pci_dev *dev) { int cap; u16 cmd; cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!cap) return -EINVAL; if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) return -EINVAL; return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); } EXPORT_SYMBOL(pcix_get_mmrbc); /** * pcix_set_mmrbc - set PCI-X maximum memory read byte count * @dev: PCI device to query * @mmrbc: maximum memory read count in bytes * valid values are 512, 1024, 2048, 4096 * * If possible sets maximum memory read byte count, some bridges have errata * that prevent this. */ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) { int cap; u32 stat, v, o; u16 cmd; if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) return -EINVAL; v = ffs(mmrbc) - 10; cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!cap) return -EINVAL; if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) return -EINVAL; if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) return -E2BIG; if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) return -EINVAL; o = (cmd & PCI_X_CMD_MAX_READ) >> 2; if (o != v) { if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) return -EIO; cmd &= ~PCI_X_CMD_MAX_READ; cmd |= v << 2; if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) return -EIO; } return 0; } EXPORT_SYMBOL(pcix_set_mmrbc); /** * pcie_get_readrq - get PCI Express read request size * @dev: PCI device to query * * Returns maximum memory read request in bytes or appropriate error value. */ int pcie_get_readrq(struct pci_dev *dev) { u16 ctl; pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); } EXPORT_SYMBOL(pcie_get_readrq); /** * pcie_set_readrq - set PCI Express maximum memory read request * @dev: PCI device to query * @rq: maximum memory read count in bytes * valid values are 128, 256, 512, 1024, 2048, 4096 * * If possible sets maximum memory read request in bytes */ int pcie_set_readrq(struct pci_dev *dev, int rq) { u16 v; int ret; struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) return -EINVAL; /* * If using the "performance" PCIe config, we clamp the read rq * size to the max packet size to keep the host bridge from * generating requests larger than we can cope with. */ if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { int mps = pcie_get_mps(dev); if (mps < rq) rq = mps; } v = (ffs(rq) - 8) << 12; if (bridge->no_inc_mrrs) { int max_mrrs = pcie_get_readrq(dev); if (rq > max_mrrs) { pci_info(dev, "can't set Max_Read_Request_Size to %d; max is %d\n", rq, max_mrrs); return -EINVAL; } } ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, v); return pcibios_err_to_errno(ret); } EXPORT_SYMBOL(pcie_set_readrq); /** * pcie_get_mps - get PCI Express maximum payload size * @dev: PCI device to query * * Returns maximum payload size in bytes */ int pcie_get_mps(struct pci_dev *dev) { u16 ctl; pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5); } EXPORT_SYMBOL(pcie_get_mps); /** * pcie_set_mps - set PCI Express maximum payload size * @dev: PCI device to query * @mps: maximum payload size in bytes * valid values are 128, 256, 512, 1024, 2048, 4096 * * If possible sets maximum payload size */ int pcie_set_mps(struct pci_dev *dev, int mps) { u16 v; int ret; if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) return -EINVAL; v = ffs(mps) - 8; if (v > dev->pcie_mpss) return -EINVAL; v <<= 5; ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_PAYLOAD, v); return pcibios_err_to_errno(ret); } EXPORT_SYMBOL(pcie_set_mps); /** * pcie_bandwidth_available - determine minimum link settings of a PCIe * device and its bandwidth limitation * @dev: PCI device to query * @limiting_dev: storage for device causing the bandwidth limitation * @speed: storage for speed of limiting device * @width: storage for width of limiting device * * Walk up the PCI device chain and find the point where the minimum * bandwidth is available. Return the bandwidth available there and (if * limiting_dev, speed, and width pointers are supplied) information about * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of * raw bandwidth. */ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, enum pci_bus_speed *speed, enum pcie_link_width *width) { u16 lnksta; enum pci_bus_speed next_speed; enum pcie_link_width next_width; u32 bw, next_bw; if (speed) *speed = PCI_SPEED_UNKNOWN; if (width) *width = PCIE_LNK_WIDTH_UNKNOWN; bw = 0; while (dev) { pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT; next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed); /* Check if current device limits the total bandwidth */ if (!bw || next_bw <= bw) { bw = next_bw; if (limiting_dev) *limiting_dev = dev; if (speed) *speed = next_speed; if (width) *width = next_width; } dev = pci_upstream_bridge(dev); } return bw; } EXPORT_SYMBOL(pcie_bandwidth_available); /** * pcie_get_speed_cap - query for the PCI device's link speed capability * @dev: PCI device to query * * Query the PCI device speed capability. Return the maximum link speed * supported by the device. */ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev) { u32 lnkcap2, lnkcap; /* * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18. The * implementation note there recommends using the Supported Link * Speeds Vector in Link Capabilities 2 when supported. * * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software * should use the Supported Link Speeds field in Link Capabilities, * where only 2.5 GT/s and 5.0 GT/s speeds were defined. */ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2); /* PCIe r3.0-compliant */ if (lnkcap2) return PCIE_LNKCAP2_SLS2SPEED(lnkcap2); pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB) return PCIE_SPEED_5_0GT; else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB) return PCIE_SPEED_2_5GT; return PCI_SPEED_UNKNOWN; } EXPORT_SYMBOL(pcie_get_speed_cap); /** * pcie_get_width_cap - query for the PCI device's link width capability * @dev: PCI device to query * * Query the PCI device width capability. Return the maximum link width * supported by the device. */ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev) { u32 lnkcap; pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); if (lnkcap) return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4; return PCIE_LNK_WIDTH_UNKNOWN; } EXPORT_SYMBOL(pcie_get_width_cap); /** * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability * @dev: PCI device * @speed: storage for link speed * @width: storage for link width * * Calculate a PCI device's link bandwidth by querying for its link speed * and width, multiplying them, and applying encoding overhead. The result * is in Mb/s, i.e., megabits/second of raw bandwidth. */ u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, enum pcie_link_width *width) { *speed = pcie_get_speed_cap(dev); *width = pcie_get_width_cap(dev); if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN) return 0; return *width * PCIE_SPEED2MBS_ENC(*speed); } /** * __pcie_print_link_status - Report the PCI device's link speed and width * @dev: PCI device to query * @verbose: Print info even when enough bandwidth is available * * If the available bandwidth at the device is less than the device is * capable of, report the device's maximum possible bandwidth and the * upstream link that limits its performance. If @verbose, always print * the available bandwidth, even if the device isn't constrained. */ void __pcie_print_link_status(struct pci_dev *dev, bool verbose) { enum pcie_link_width width, width_cap; enum pci_bus_speed speed, speed_cap; struct pci_dev *limiting_dev = NULL; u32 bw_avail, bw_cap; bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap); bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width); if (bw_avail >= bw_cap && verbose) pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n", bw_cap / 1000, bw_cap % 1000, pci_speed_string(speed_cap), width_cap); else if (bw_avail < bw_cap) pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n", bw_avail / 1000, bw_avail % 1000, pci_speed_string(speed), width, limiting_dev ? pci_name(limiting_dev) : "<unknown>", bw_cap / 1000, bw_cap % 1000, pci_speed_string(speed_cap), width_cap); } /** * pcie_print_link_status - Report the PCI device's link speed and width * @dev: PCI device to query * * Report the available bandwidth at the device. */ void pcie_print_link_status(struct pci_dev *dev) { __pcie_print_link_status(dev, true); } EXPORT_SYMBOL(pcie_print_link_status); /** * pci_select_bars - Make BAR mask from the type of resource * @dev: the PCI device for which BAR mask is made * @flags: resource type mask to be selected * * This helper routine makes bar mask from the type of resource. */ int pci_select_bars(struct pci_dev *dev, unsigned long flags) { int i, bars = 0; for (i = 0; i < PCI_NUM_RESOURCES; i++) if (pci_resource_flags(dev, i) & flags) bars |= (1 << i); return bars; } EXPORT_SYMBOL(pci_select_bars); /* Some architectures require additional programming to enable VGA */ static arch_set_vga_state_t arch_set_vga_state; void __init pci_register_set_vga_state(arch_set_vga_state_t func) { arch_set_vga_state = func; /* NULL disables */ } static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, unsigned int command_bits, u32 flags) { if (arch_set_vga_state) return arch_set_vga_state(dev, decode, command_bits, flags); return 0; } /** * pci_set_vga_state - set VGA decode state on device and parents if requested * @dev: the PCI device * @decode: true = enable decoding, false = disable decoding * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY * @flags: traverse ancestors and change bridges * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE */ int pci_set_vga_state(struct pci_dev *dev, bool decode, unsigned int command_bits, u32 flags) { struct pci_bus *bus; struct pci_dev *bridge; u16 cmd; int rc; WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY))); /* ARCH specific VGA enables */ rc = pci_set_vga_state_arch(dev, decode, command_bits, flags); if (rc) return rc; if (flags & PCI_VGA_STATE_CHANGE_DECODES) { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (decode) cmd |= command_bits; else cmd &= ~command_bits; pci_write_config_word(dev, PCI_COMMAND, cmd); } if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE)) return 0; bus = dev->bus; while (bus) { bridge = bus->self; if (bridge) { pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &cmd); if (decode) cmd |= PCI_BRIDGE_CTL_VGA; else cmd &= ~PCI_BRIDGE_CTL_VGA; pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, cmd); } bus = bus->parent; } return 0; } #ifdef CONFIG_ACPI bool pci_pr3_present(struct pci_dev *pdev) { struct acpi_device *adev; if (acpi_disabled) return false; adev = ACPI_COMPANION(&pdev->dev); if (!adev) return false; return adev->power.flags.power_resources && acpi_has_method(adev->handle, "_PR3"); } EXPORT_SYMBOL_GPL(pci_pr3_present); #endif /** * pci_add_dma_alias - Add a DMA devfn alias for a device * @dev: the PCI device for which alias is added * @devfn_from: alias slot and function * @nr_devfns: number of subsequent devfns to alias * * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask * which is used to program permissible bus-devfn source addresses for DMA * requests in an IOMMU. These aliases factor into IOMMU group creation * and are useful for devices generating DMA requests beyond or different * from their logical bus-devfn. Examples include device quirks where the * device simply uses the wrong devfn, as well as non-transparent bridges * where the alias may be a proxy for devices in another domain. * * IOMMU group creation is performed during device discovery or addition, * prior to any potential DMA mapping and therefore prior to driver probing * (especially for userspace assigned devices where IOMMU group definition * cannot be left as a userspace activity). DMA aliases should therefore * be configured via quirks, such as the PCI fixup header quirk. */ void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned int nr_devfns) { int devfn_to; nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from); devfn_to = devfn_from + nr_devfns - 1; if (!dev->dma_alias_mask) dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL); if (!dev->dma_alias_mask) { pci_warn(dev, "Unable to allocate DMA alias mask\n"); return; } bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns); if (nr_devfns == 1) pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n", PCI_SLOT(devfn_from), PCI_FUNC(devfn_from)); else if (nr_devfns > 1) pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n", PCI_SLOT(devfn_from), PCI_FUNC(devfn_from), PCI_SLOT(devfn_to), PCI_FUNC(devfn_to)); } bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2) { return (dev1->dma_alias_mask && test_bit(dev2->devfn, dev1->dma_alias_mask)) || (dev2->dma_alias_mask && test_bit(dev1->devfn, dev2->dma_alias_mask)) || pci_real_dma_dev(dev1) == dev2 || pci_real_dma_dev(dev2) == dev1; } bool pci_device_is_present(struct pci_dev *pdev) { u32 v; /* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */ pdev = pci_physfn(pdev); if (pci_dev_is_disconnected(pdev)) return false; return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0); } EXPORT_SYMBOL_GPL(pci_device_is_present); void pci_ignore_hotplug(struct pci_dev *dev) { struct pci_dev *bridge = dev->bus->self; dev->ignore_hotplug = 1; /* Propagate the "ignore hotplug" setting to the parent bridge. */ if (bridge) bridge->ignore_hotplug = 1; } EXPORT_SYMBOL_GPL(pci_ignore_hotplug); /** * pci_real_dma_dev - Get PCI DMA device for PCI device * @dev: the PCI device that may have a PCI DMA alias * * Permits the platform to provide architecture-specific functionality to * devices needing to alias DMA to another PCI device on another PCI bus. If * the PCI device is on the same bus, it is recommended to use * pci_add_dma_alias(). This is the default implementation. Architecture * implementations can override this. */ struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev) { return dev; } resource_size_t __weak pcibios_default_alignment(void) { return 0; } /* * Arches that don't want to expose struct resource to userland as-is in * sysfs and /proc can implement their own pci_resource_to_user(). */ void __weak pci_resource_to_user(const struct pci_dev *dev, int bar, const struct resource *rsrc, resource_size_t *start, resource_size_t *end) { *start = rsrc->start; *end = rsrc->end; } static char *resource_alignment_param; static DEFINE_SPINLOCK(resource_alignment_lock); /** * pci_specified_resource_alignment - get resource alignment specified by user. * @dev: the PCI device to get * @resize: whether or not to change resources' size when reassigning alignment * * RETURNS: Resource alignment if it is specified. * Zero if it is not specified. */ static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev, bool *resize) { int align_order, count; resource_size_t align = pcibios_default_alignment(); const char *p; int ret; spin_lock(&resource_alignment_lock); p = resource_alignment_param; if (!p || !*p) goto out; if (pci_has_flag(PCI_PROBE_ONLY)) { align = 0; pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n"); goto out; } while (*p) { count = 0; if (sscanf(p, "%d%n", &align_order, &count) == 1 && p[count] == '@') { p += count + 1; if (align_order > 63) { pr_err("PCI: Invalid requested alignment (order %d)\n", align_order); align_order = PAGE_SHIFT; } } else { align_order = PAGE_SHIFT; } ret = pci_dev_str_match(dev, p, &p); if (ret == 1) { *resize = true; align = 1ULL << align_order; break; } else if (ret < 0) { pr_err("PCI: Can't parse resource_alignment parameter: %s\n", p); break; } if (*p != ';' && *p != ',') { /* End of param or invalid format */ break; } p++; } out: spin_unlock(&resource_alignment_lock); return align; } static void pci_request_resource_alignment(struct pci_dev *dev, int bar, resource_size_t align, bool resize) { struct resource *r = &dev->resource[bar]; resource_size_t size; if (!(r->flags & IORESOURCE_MEM)) return; if (r->flags & IORESOURCE_PCI_FIXED) { pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n", bar, r, (unsigned long long)align); return; } size = resource_size(r); if (size >= align) return; /* * Increase the alignment of the resource. There are two ways we * can do this: * * 1) Increase the size of the resource. BARs are aligned on their * size, so when we reallocate space for this resource, we'll * allocate it with the larger alignment. This also prevents * assignment of any other BARs inside the alignment region, so * if we're requesting page alignment, this means no other BARs * will share the page. * * The disadvantage is that this makes the resource larger than * the hardware BAR, which may break drivers that compute things * based on the resource size, e.g., to find registers at a * fixed offset before the end of the BAR. * * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and * set r->start to the desired alignment. By itself this * doesn't prevent other BARs being put inside the alignment * region, but if we realign *every* resource of every device in * the system, none of them will share an alignment region. * * When the user has requested alignment for only some devices via * the "pci=resource_alignment" argument, "resize" is true and we * use the first method. Otherwise we assume we're aligning all * devices and we use the second. */ pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n", bar, r, (unsigned long long)align); if (resize) { r->start = 0; r->end = align - 1; } else { r->flags &= ~IORESOURCE_SIZEALIGN; r->flags |= IORESOURCE_STARTALIGN; r->start = align; r->end = r->start + size - 1; } r->flags |= IORESOURCE_UNSET; } /* * This function disables memory decoding and releases memory resources * of the device specified by kernel's boot parameter 'pci=resource_alignment='. * It also rounds up size to specified alignment. * Later on, the kernel will assign page-aligned memory resource back * to the device. */ void pci_reassigndev_resource_alignment(struct pci_dev *dev) { int i; struct resource *r; resource_size_t align; u16 command; bool resize = false; /* * VF BARs are read-only zero according to SR-IOV spec r1.1, sec * 3.4.1.11. Their resources are allocated from the space * described by the VF BARx register in the PF's SR-IOV capability. * We can't influence their alignment here. */ if (dev->is_virtfn) return; /* check if specified PCI is target device to reassign */ align = pci_specified_resource_alignment(dev, &resize); if (!align) return; if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL && (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) { pci_warn(dev, "Can't reassign resources to host bridge\n"); return; } pci_read_config_word(dev, PCI_COMMAND, &command); command &= ~PCI_COMMAND_MEMORY; pci_write_config_word(dev, PCI_COMMAND, command); for (i = 0; i <= PCI_ROM_RESOURCE; i++) pci_request_resource_alignment(dev, i, align, resize); /* * Need to disable bridge's resource window, * to enable the kernel to reassign new resource * window later on. */ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) { r = &dev->resource[i]; if (!(r->flags & IORESOURCE_MEM)) continue; r->flags |= IORESOURCE_UNSET; r->end = resource_size(r) - 1; r->start = 0; } pci_disable_bridge_window(dev); } } static ssize_t resource_alignment_show(const struct bus_type *bus, char *buf) { size_t count = 0; spin_lock(&resource_alignment_lock); if (resource_alignment_param) count = sysfs_emit(buf, "%s\n", resource_alignment_param); spin_unlock(&resource_alignment_lock); return count; } static ssize_t resource_alignment_store(const struct bus_type *bus, const char *buf, size_t count) { char *param, *old, *end; if (count >= (PAGE_SIZE - 1)) return -EINVAL; param = kstrndup(buf, count, GFP_KERNEL); if (!param) return -ENOMEM; end = strchr(param, '\n'); if (end) *end = '\0'; spin_lock(&resource_alignment_lock); old = resource_alignment_param; if (strlen(param)) { resource_alignment_param = param; } else { kfree(param); resource_alignment_param = NULL; } spin_unlock(&resource_alignment_lock); kfree(old); return count; } static BUS_ATTR_RW(resource_alignment); static int __init pci_resource_alignment_sysfs_init(void) { return bus_create_file(&pci_bus_type, &bus_attr_resource_alignment); } late_initcall(pci_resource_alignment_sysfs_init); static void pci_no_domains(void) { #ifdef CONFIG_PCI_DOMAINS pci_domains_supported = 0; #endif } #ifdef CONFIG_PCI_DOMAINS_GENERIC static DEFINE_IDA(pci_domain_nr_static_ida); static DEFINE_IDA(pci_domain_nr_dynamic_ida); static void of_pci_reserve_static_domain_nr(void) { struct device_node *np; int domain_nr; for_each_node_by_type(np, "pci") { domain_nr = of_get_pci_domain_nr(np); if (domain_nr < 0) continue; /* * Permanently allocate domain_nr in dynamic_ida * to prevent it from dynamic allocation. */ ida_alloc_range(&pci_domain_nr_dynamic_ida, domain_nr, domain_nr, GFP_KERNEL); } } static int of_pci_bus_find_domain_nr(struct device *parent) { static bool static_domains_reserved = false; int domain_nr; /* On the first call scan device tree for static allocations. */ if (!static_domains_reserved) { of_pci_reserve_static_domain_nr(); static_domains_reserved = true; } if (parent) { /* * If domain is in DT, allocate it in static IDA. This * prevents duplicate static allocations in case of errors * in DT. */ domain_nr = of_get_pci_domain_nr(parent->of_node); if (domain_nr >= 0) return ida_alloc_range(&pci_domain_nr_static_ida, domain_nr, domain_nr, GFP_KERNEL); } /* * If domain was not specified in DT, choose a free ID from dynamic * allocations. All domain numbers from DT are permanently in * dynamic allocations to prevent assigning them to other DT nodes * without static domain. */ return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL); } static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent) { if (bus->domain_nr < 0) return; /* Release domain from IDA where it was allocated. */ if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr) ida_free(&pci_domain_nr_static_ida, bus->domain_nr); else ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr); } int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent) { return acpi_disabled ? of_pci_bus_find_domain_nr(parent) : acpi_pci_bus_find_domain_nr(bus); } void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent) { if (!acpi_disabled) return; of_pci_bus_release_domain_nr(bus, parent); } #endif /** * pci_ext_cfg_avail - can we access extended PCI config space? * * Returns 1 if we can access PCI extended config space (offsets * greater than 0xff). This is the default implementation. Architecture * implementations can override this. */ int __weak pci_ext_cfg_avail(void) { return 1; } void __weak pci_fixup_cardbus(struct pci_bus *bus) { } EXPORT_SYMBOL(pci_fixup_cardbus); static int __init pci_setup(char *str) { while (str) { char *k = strchr(str, ','); if (k) *k++ = 0; if (*str && (str = pcibios_setup(str)) && *str) { if (!strcmp(str, "nomsi")) { pci_no_msi(); } else if (!strncmp(str, "noats", 5)) { pr_info("PCIe: ATS is disabled\n"); pcie_ats_disabled = true; } else if (!strcmp(str, "noaer")) { pci_no_aer(); } else if (!strcmp(str, "earlydump")) { pci_early_dump = true; } else if (!strncmp(str, "realloc=", 8)) { pci_realloc_get_opt(str + 8); } else if (!strncmp(str, "realloc", 7)) { pci_realloc_get_opt("on"); } else if (!strcmp(str, "nodomains")) { pci_no_domains(); } else if (!strncmp(str, "noari", 5)) { pcie_ari_disabled = true; } else if (!strncmp(str, "cbiosize=", 9)) { pci_cardbus_io_size = memparse(str + 9, &str); } else if (!strncmp(str, "cbmemsize=", 10)) { pci_cardbus_mem_size = memparse(str + 10, &str); } else if (!strncmp(str, "resource_alignment=", 19)) { resource_alignment_param = str + 19; } else if (!strncmp(str, "ecrc=", 5)) { pcie_ecrc_get_policy(str + 5); } else if (!strncmp(str, "hpiosize=", 9)) { pci_hotplug_io_size = memparse(str + 9, &str); } else if (!strncmp(str, "hpmmiosize=", 11)) { pci_hotplug_mmio_size = memparse(str + 11, &str); } else if (!strncmp(str, "hpmmioprefsize=", 15)) { pci_hotplug_mmio_pref_size = memparse(str + 15, &str); } else if (!strncmp(str, "hpmemsize=", 10)) { pci_hotplug_mmio_size = memparse(str + 10, &str); pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size; } else if (!strncmp(str, "hpbussize=", 10)) { pci_hotplug_bus_size = simple_strtoul(str + 10, &str, 0); if (pci_hotplug_bus_size > 0xff) pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE; } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { pcie_bus_config = PCIE_BUS_TUNE_OFF; } else if (!strncmp(str, "pcie_bus_safe", 13)) { pcie_bus_config = PCIE_BUS_SAFE; } else if (!strncmp(str, "pcie_bus_perf", 13)) { pcie_bus_config = PCIE_BUS_PERFORMANCE; } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { pcie_bus_config = PCIE_BUS_PEER2PEER; } else if (!strncmp(str, "pcie_scan_all", 13)) { pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS); } else if (!strncmp(str, "disable_acs_redir=", 18)) { disable_acs_redir_param = str + 18; } else { pr_err("PCI: Unknown option `%s'\n", str); } } str = k; } return 0; } early_param("pci", pci_setup); /* * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized * in pci_setup(), above, to point to data in the __initdata section which * will be freed after the init sequence is complete. We can't allocate memory * in pci_setup() because some architectures do not have any memory allocation * service available during an early_param() call. So we allocate memory and * copy the variable here before the init section is freed. * */ static int __init pci_realloc_setup_params(void) { resource_alignment_param = kstrdup(resource_alignment_param, GFP_KERNEL); disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL); return 0; } pure_initcall(pci_realloc_setup_params);
linux-master
drivers/pci/pci.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI VPD support * * Copyright (C) 2010 Broadcom Corporation. */ #include <linux/pci.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/sched/signal.h> #include <asm/unaligned.h> #include "pci.h" #define PCI_VPD_LRDT_TAG_SIZE 3 #define PCI_VPD_SRDT_LEN_MASK 0x07 #define PCI_VPD_SRDT_TAG_SIZE 1 #define PCI_VPD_STIN_END 0x0f #define PCI_VPD_INFO_FLD_HDR_SIZE 3 static u16 pci_vpd_lrdt_size(const u8 *lrdt) { return get_unaligned_le16(lrdt + 1); } static u8 pci_vpd_srdt_tag(const u8 *srdt) { return *srdt >> 3; } static u8 pci_vpd_srdt_size(const u8 *srdt) { return *srdt & PCI_VPD_SRDT_LEN_MASK; } static u8 pci_vpd_info_field_size(const u8 *info_field) { return info_field[2]; } /* VPD access through PCI 2.2+ VPD capability */ static struct pci_dev *pci_get_func0_dev(struct pci_dev *dev) { return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); } #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1) #define PCI_VPD_SZ_INVALID UINT_MAX /** * pci_vpd_size - determine actual size of Vital Product Data * @dev: pci device struct */ static size_t pci_vpd_size(struct pci_dev *dev) { size_t off = 0, size; unsigned char tag, header[1+2]; /* 1 byte tag, 2 bytes length */ while (pci_read_vpd_any(dev, off, 1, header) == 1) { size = 0; if (off == 0 && (header[0] == 0x00 || header[0] == 0xff)) goto error; if (header[0] & PCI_VPD_LRDT) { /* Large Resource Data Type Tag */ if (pci_read_vpd_any(dev, off + 1, 2, &header[1]) != 2) { pci_warn(dev, "failed VPD read at offset %zu\n", off + 1); return off ?: PCI_VPD_SZ_INVALID; } size = pci_vpd_lrdt_size(header); if (off + size > PCI_VPD_MAX_SIZE) goto error; off += PCI_VPD_LRDT_TAG_SIZE + size; } else { /* Short Resource Data Type Tag */ tag = pci_vpd_srdt_tag(header); size = pci_vpd_srdt_size(header); if (off + size > PCI_VPD_MAX_SIZE) goto error; off += PCI_VPD_SRDT_TAG_SIZE + size; if (tag == PCI_VPD_STIN_END) /* End tag descriptor */ return off; } } return off; error: pci_info(dev, "invalid VPD tag %#04x (size %zu) at offset %zu%s\n", header[0], size, off, off == 0 ? "; assume missing optional EEPROM" : ""); return off ?: PCI_VPD_SZ_INVALID; } static bool pci_vpd_available(struct pci_dev *dev, bool check_size) { struct pci_vpd *vpd = &dev->vpd; if (!vpd->cap) return false; if (vpd->len == 0 && check_size) { vpd->len = pci_vpd_size(dev); if (vpd->len == PCI_VPD_SZ_INVALID) { vpd->cap = 0; return false; } } return true; } /* * Wait for last operation to complete. * This code has to spin since there is no other notification from the PCI * hardware. Since the VPD is often implemented by serial attachment to an * EEPROM, it may take many milliseconds to complete. * @set: if true wait for flag to be set, else wait for it to be cleared * * Returns 0 on success, negative values indicate error. */ static int pci_vpd_wait(struct pci_dev *dev, bool set) { struct pci_vpd *vpd = &dev->vpd; unsigned long timeout = jiffies + msecs_to_jiffies(125); unsigned long max_sleep = 16; u16 status; int ret; do { ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR, &status); if (ret < 0) return ret; if (!!(status & PCI_VPD_ADDR_F) == set) return 0; if (time_after(jiffies, timeout)) break; usleep_range(10, max_sleep); if (max_sleep < 1024) max_sleep *= 2; } while (true); pci_warn(dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n"); return -ETIMEDOUT; } static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count, void *arg, bool check_size) { struct pci_vpd *vpd = &dev->vpd; unsigned int max_len; int ret = 0; loff_t end = pos + count; u8 *buf = arg; if (!pci_vpd_available(dev, check_size)) return -ENODEV; if (pos < 0) return -EINVAL; max_len = check_size ? vpd->len : PCI_VPD_MAX_SIZE; if (pos >= max_len) return 0; if (end > max_len) { end = max_len; count = end - pos; } if (mutex_lock_killable(&vpd->lock)) return -EINTR; while (pos < end) { u32 val; unsigned int i, skip; if (fatal_signal_pending(current)) { ret = -EINTR; break; } ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, pos & ~3); if (ret < 0) break; ret = pci_vpd_wait(dev, true); if (ret < 0) break; ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val); if (ret < 0) break; skip = pos & 3; for (i = 0; i < sizeof(u32); i++) { if (i >= skip) { *buf++ = val; if (++pos == end) break; } val >>= 8; } } mutex_unlock(&vpd->lock); return ret ? ret : count; } static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count, const void *arg, bool check_size) { struct pci_vpd *vpd = &dev->vpd; unsigned int max_len; const u8 *buf = arg; loff_t end = pos + count; int ret = 0; if (!pci_vpd_available(dev, check_size)) return -ENODEV; if (pos < 0 || (pos & 3) || (count & 3)) return -EINVAL; max_len = check_size ? vpd->len : PCI_VPD_MAX_SIZE; if (end > max_len) return -EINVAL; if (mutex_lock_killable(&vpd->lock)) return -EINTR; while (pos < end) { ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, get_unaligned_le32(buf)); if (ret < 0) break; ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR, pos | PCI_VPD_ADDR_F); if (ret < 0) break; ret = pci_vpd_wait(dev, false); if (ret < 0) break; buf += sizeof(u32); pos += sizeof(u32); } mutex_unlock(&vpd->lock); return ret ? ret : count; } void pci_vpd_init(struct pci_dev *dev) { if (dev->vpd.len == PCI_VPD_SZ_INVALID) return; dev->vpd.cap = pci_find_capability(dev, PCI_CAP_ID_VPD); mutex_init(&dev->vpd.lock); } static ssize_t vpd_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); struct pci_dev *vpd_dev = dev; ssize_t ret; if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) { vpd_dev = pci_get_func0_dev(dev); if (!vpd_dev) return -ENODEV; } pci_config_pm_runtime_get(vpd_dev); ret = pci_read_vpd(vpd_dev, off, count, buf); pci_config_pm_runtime_put(vpd_dev); if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) pci_dev_put(vpd_dev); return ret; } static ssize_t vpd_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct pci_dev *dev = to_pci_dev(kobj_to_dev(kobj)); struct pci_dev *vpd_dev = dev; ssize_t ret; if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) { vpd_dev = pci_get_func0_dev(dev); if (!vpd_dev) return -ENODEV; } pci_config_pm_runtime_get(vpd_dev); ret = pci_write_vpd(vpd_dev, off, count, buf); pci_config_pm_runtime_put(vpd_dev); if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) pci_dev_put(vpd_dev); return ret; } static BIN_ATTR(vpd, 0600, vpd_read, vpd_write, 0); static struct bin_attribute *vpd_attrs[] = { &bin_attr_vpd, NULL, }; static umode_t vpd_attr_is_visible(struct kobject *kobj, struct bin_attribute *a, int n) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); if (!pdev->vpd.cap) return 0; return a->attr.mode; } const struct attribute_group pci_dev_vpd_attr_group = { .bin_attrs = vpd_attrs, .is_bin_visible = vpd_attr_is_visible, }; void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size) { unsigned int len; void *buf; int cnt; if (!pci_vpd_available(dev, true)) return ERR_PTR(-ENODEV); len = dev->vpd.len; buf = kmalloc(len, GFP_KERNEL); if (!buf) return ERR_PTR(-ENOMEM); cnt = pci_read_vpd(dev, 0, len, buf); if (cnt != len) { kfree(buf); return ERR_PTR(-EIO); } if (size) *size = len; return buf; } EXPORT_SYMBOL_GPL(pci_vpd_alloc); static int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt, unsigned int *size) { int i = 0; /* look for LRDT tags only, end tag is the only SRDT tag */ while (i + PCI_VPD_LRDT_TAG_SIZE <= len && buf[i] & PCI_VPD_LRDT) { unsigned int lrdt_len = pci_vpd_lrdt_size(buf + i); u8 tag = buf[i]; i += PCI_VPD_LRDT_TAG_SIZE; if (tag == rdt) { if (i + lrdt_len > len) lrdt_len = len - i; if (size) *size = lrdt_len; return i; } i += lrdt_len; } return -ENOENT; } int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size) { return pci_vpd_find_tag(buf, len, PCI_VPD_LRDT_ID_STRING, size); } EXPORT_SYMBOL_GPL(pci_vpd_find_id_string); static int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, unsigned int len, const char *kw) { int i; for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) { if (buf[i + 0] == kw[0] && buf[i + 1] == kw[1]) return i; i += PCI_VPD_INFO_FLD_HDR_SIZE + pci_vpd_info_field_size(&buf[i]); } return -ENOENT; } static ssize_t __pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf, bool check_size) { ssize_t ret; if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) { dev = pci_get_func0_dev(dev); if (!dev) return -ENODEV; ret = pci_vpd_read(dev, pos, count, buf, check_size); pci_dev_put(dev); return ret; } return pci_vpd_read(dev, pos, count, buf, check_size); } /** * pci_read_vpd - Read one entry from Vital Product Data * @dev: PCI device struct * @pos: offset in VPD space * @count: number of bytes to read * @buf: pointer to where to store result */ ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf) { return __pci_read_vpd(dev, pos, count, buf, true); } EXPORT_SYMBOL(pci_read_vpd); /* Same, but allow to access any address */ ssize_t pci_read_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, void *buf) { return __pci_read_vpd(dev, pos, count, buf, false); } EXPORT_SYMBOL(pci_read_vpd_any); static ssize_t __pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf, bool check_size) { ssize_t ret; if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) { dev = pci_get_func0_dev(dev); if (!dev) return -ENODEV; ret = pci_vpd_write(dev, pos, count, buf, check_size); pci_dev_put(dev); return ret; } return pci_vpd_write(dev, pos, count, buf, check_size); } /** * pci_write_vpd - Write entry to Vital Product Data * @dev: PCI device struct * @pos: offset in VPD space * @count: number of bytes to write * @buf: buffer containing write data */ ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf) { return __pci_write_vpd(dev, pos, count, buf, true); } EXPORT_SYMBOL(pci_write_vpd); /* Same, but allow to access any address */ ssize_t pci_write_vpd_any(struct pci_dev *dev, loff_t pos, size_t count, const void *buf) { return __pci_write_vpd(dev, pos, count, buf, false); } EXPORT_SYMBOL(pci_write_vpd_any); int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len, const char *kw, unsigned int *size) { int ro_start, infokw_start; unsigned int ro_len, infokw_size; ro_start = pci_vpd_find_tag(buf, len, PCI_VPD_LRDT_RO_DATA, &ro_len); if (ro_start < 0) return ro_start; infokw_start = pci_vpd_find_info_keyword(buf, ro_start, ro_len, kw); if (infokw_start < 0) return infokw_start; infokw_size = pci_vpd_info_field_size(buf + infokw_start); infokw_start += PCI_VPD_INFO_FLD_HDR_SIZE; if (infokw_start + infokw_size > len) return -EINVAL; if (size) *size = infokw_size; return infokw_start; } EXPORT_SYMBOL_GPL(pci_vpd_find_ro_info_keyword); int pci_vpd_check_csum(const void *buf, unsigned int len) { const u8 *vpd = buf; unsigned int size; u8 csum = 0; int rv_start; rv_start = pci_vpd_find_ro_info_keyword(buf, len, PCI_VPD_RO_KEYWORD_CHKSUM, &size); if (rv_start == -ENOENT) /* no checksum in VPD */ return 1; else if (rv_start < 0) return rv_start; if (!size) return -EINVAL; while (rv_start >= 0) csum += vpd[rv_start--]; return csum ? -EILSEQ : 0; } EXPORT_SYMBOL_GPL(pci_vpd_check_csum); #ifdef CONFIG_PCI_QUIRKS /* * Quirk non-zero PCI functions to route VPD access through function 0 for * devices that share VPD resources between functions. The functions are * expected to be identical devices. */ static void quirk_f0_vpd_link(struct pci_dev *dev) { struct pci_dev *f0; if (!PCI_FUNC(dev->devfn)) return; f0 = pci_get_func0_dev(dev); if (!f0) return; if (f0->vpd.cap && dev->class == f0->class && dev->vendor == f0->vendor && dev->device == f0->device) dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0; pci_dev_put(f0); } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link); /* * If a device follows the VPD format spec, the PCI core will not read or * write past the VPD End Tag. But some vendors do not follow the VPD * format spec, so we can't tell how much data is safe to access. Devices * may behave unpredictably if we access too much. Blacklist these devices * so we don't touch VPD at all. */ static void quirk_blacklist_vpd(struct pci_dev *dev) { dev->vpd.len = PCI_VPD_SZ_INVALID; pci_warn(dev, FW_BUG "disabling VPD access (can't determine size of non-standard VPD format)\n"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID, quirk_blacklist_vpd); /* * The Amazon Annapurna Labs 0x0031 device id is reused for other non Root Port * device types, so the quirk is registered for the PCI_CLASS_BRIDGE_PCI class. */ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, PCI_CLASS_BRIDGE_PCI, 8, quirk_blacklist_vpd); static void quirk_chelsio_extend_vpd(struct pci_dev *dev) { int chip = (dev->device & 0xf000) >> 12; int func = (dev->device & 0x0f00) >> 8; int prod = (dev->device & 0x00ff) >> 0; /* * If this is a T3-based adapter, there's a 1KB VPD area at offset * 0xc00 which contains the preferred VPD values. If this is a T4 or * later based adapter, the special VPD is at offset 0x400 for the * Physical Functions (the SR-IOV Virtual Functions have no VPD * Capabilities). The PCI VPD Access core routines will normally * compute the size of the VPD by parsing the VPD Data Structure at * offset 0x000. This will result in silent failures when attempting * to accesses these other VPD areas which are beyond those computed * limits. */ if (chip == 0x0 && prod >= 0x20) dev->vpd.len = 8192; else if (chip >= 0x4 && func < 0x8) dev->vpd.len = 2048; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, quirk_chelsio_extend_vpd); #endif
linux-master
drivers/pci/vpd.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI Virtual Channel support * * Copyright (C) 2013 Red Hat, Inc. All rights reserved. * Author: Alex Williamson <[email protected]> */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pci_regs.h> #include <linux/types.h> #include "pci.h" /** * pci_vc_save_restore_dwords - Save or restore a series of dwords * @dev: device * @pos: starting config space position * @buf: buffer to save to or restore from * @dwords: number of dwords to save/restore * @save: whether to save or restore */ static void pci_vc_save_restore_dwords(struct pci_dev *dev, int pos, u32 *buf, int dwords, bool save) { int i; for (i = 0; i < dwords; i++, buf++) { if (save) pci_read_config_dword(dev, pos + (i * 4), buf); else pci_write_config_dword(dev, pos + (i * 4), *buf); } } /** * pci_vc_load_arb_table - load and wait for VC arbitration table * @dev: device * @pos: starting position of VC capability (VC/VC9/MFVC) * * Set Load VC Arbitration Table bit requesting hardware to apply the VC * Arbitration Table (previously loaded). When the VC Arbitration Table * Status clears, hardware has latched the table into VC arbitration logic. */ static void pci_vc_load_arb_table(struct pci_dev *dev, int pos) { u16 ctrl; pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL, &ctrl); pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL, ctrl | PCI_VC_PORT_CTRL_LOAD_TABLE); if (pci_wait_for_pending(dev, pos + PCI_VC_PORT_STATUS, PCI_VC_PORT_STATUS_TABLE)) return; pci_err(dev, "VC arbitration table failed to load\n"); } /** * pci_vc_load_port_arb_table - Load and wait for VC port arbitration table * @dev: device * @pos: starting position of VC capability (VC/VC9/MFVC) * @res: VC resource number, ie. VCn (0-7) * * Set Load Port Arbitration Table bit requesting hardware to apply the Port * Arbitration Table (previously loaded). When the Port Arbitration Table * Status clears, hardware has latched the table into port arbitration logic. */ static void pci_vc_load_port_arb_table(struct pci_dev *dev, int pos, int res) { int ctrl_pos, status_pos; u32 ctrl; ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF); status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF); pci_read_config_dword(dev, ctrl_pos, &ctrl); pci_write_config_dword(dev, ctrl_pos, ctrl | PCI_VC_RES_CTRL_LOAD_TABLE); if (pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_TABLE)) return; pci_err(dev, "VC%d port arbitration table failed to load\n", res); } /** * pci_vc_enable - Enable virtual channel * @dev: device * @pos: starting position of VC capability (VC/VC9/MFVC) * @res: VC res number, ie. VCn (0-7) * * A VC is enabled by setting the enable bit in matching resource control * registers on both sides of a link. We therefore need to find the opposite * end of the link. To keep this simple we enable from the downstream device. * RC devices do not have an upstream device, nor does it seem that VC9 do * (spec is unclear). Once we find the upstream device, match the VC ID to * get the correct resource, disable and enable on both ends. */ static void pci_vc_enable(struct pci_dev *dev, int pos, int res) { int ctrl_pos, status_pos, id, pos2, evcc, i, ctrl_pos2, status_pos2; u32 ctrl, header, cap1, ctrl2; struct pci_dev *link = NULL; /* Enable VCs from the downstream device */ if (!pci_is_pcie(dev) || !pcie_downstream_port(dev)) return; ctrl_pos = pos + PCI_VC_RES_CTRL + (res * PCI_CAP_VC_PER_VC_SIZEOF); status_pos = pos + PCI_VC_RES_STATUS + (res * PCI_CAP_VC_PER_VC_SIZEOF); pci_read_config_dword(dev, ctrl_pos, &ctrl); id = ctrl & PCI_VC_RES_CTRL_ID; pci_read_config_dword(dev, pos, &header); /* If there is no opposite end of the link, skip to enable */ if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_VC9 || pci_is_root_bus(dev->bus)) goto enable; pos2 = pci_find_ext_capability(dev->bus->self, PCI_EXT_CAP_ID_VC); if (!pos2) goto enable; pci_read_config_dword(dev->bus->self, pos2 + PCI_VC_PORT_CAP1, &cap1); evcc = cap1 & PCI_VC_CAP1_EVCC; /* VC0 is hardwired enabled, so we can start with 1 */ for (i = 1; i < evcc + 1; i++) { ctrl_pos2 = pos2 + PCI_VC_RES_CTRL + (i * PCI_CAP_VC_PER_VC_SIZEOF); status_pos2 = pos2 + PCI_VC_RES_STATUS + (i * PCI_CAP_VC_PER_VC_SIZEOF); pci_read_config_dword(dev->bus->self, ctrl_pos2, &ctrl2); if ((ctrl2 & PCI_VC_RES_CTRL_ID) == id) { link = dev->bus->self; break; } } if (!link) goto enable; /* Disable if enabled */ if (ctrl2 & PCI_VC_RES_CTRL_ENABLE) { ctrl2 &= ~PCI_VC_RES_CTRL_ENABLE; pci_write_config_dword(link, ctrl_pos2, ctrl2); } /* Enable on both ends */ ctrl2 |= PCI_VC_RES_CTRL_ENABLE; pci_write_config_dword(link, ctrl_pos2, ctrl2); enable: ctrl |= PCI_VC_RES_CTRL_ENABLE; pci_write_config_dword(dev, ctrl_pos, ctrl); if (!pci_wait_for_pending(dev, status_pos, PCI_VC_RES_STATUS_NEGO)) pci_err(dev, "VC%d negotiation stuck pending\n", id); if (link && !pci_wait_for_pending(link, status_pos2, PCI_VC_RES_STATUS_NEGO)) pci_err(link, "VC%d negotiation stuck pending\n", id); } /** * pci_vc_do_save_buffer - Size, save, or restore VC state * @dev: device * @pos: starting position of VC capability (VC/VC9/MFVC) * @save_state: buffer for save/restore * @save: if provided a buffer, this indicates what to do with it * * Walking Virtual Channel config space to size, save, or restore it * is complicated, so we do it all from one function to reduce code and * guarantee ordering matches in the buffer. When called with NULL * @save_state, return the size of the necessary save buffer. When called * with a non-NULL @save_state, @save determines whether we save to the * buffer or restore from it. */ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos, struct pci_cap_saved_state *save_state, bool save) { u32 cap1; char evcc, lpevcc, parb_size; int i, len = 0; u8 *buf = save_state ? (u8 *)save_state->cap.data : NULL; /* Sanity check buffer size for save/restore */ if (buf && save_state->cap.size != pci_vc_do_save_buffer(dev, pos, NULL, save)) { pci_err(dev, "VC save buffer size does not match @0x%x\n", pos); return -ENOMEM; } pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP1, &cap1); /* Extended VC Count (not counting VC0) */ evcc = cap1 & PCI_VC_CAP1_EVCC; /* Low Priority Extended VC Count (not counting VC0) */ lpevcc = (cap1 & PCI_VC_CAP1_LPEVCC) >> 4; /* Port Arbitration Table Entry Size (bits) */ parb_size = 1 << ((cap1 & PCI_VC_CAP1_ARB_SIZE) >> 10); /* * Port VC Control Register contains VC Arbitration Select, which * cannot be modified when more than one LPVC is in operation. We * therefore save/restore it first, as only VC0 should be enabled * after device reset. */ if (buf) { if (save) pci_read_config_word(dev, pos + PCI_VC_PORT_CTRL, (u16 *)buf); else pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL, *(u16 *)buf); buf += 4; } len += 4; /* * If we have any Low Priority VCs and a VC Arbitration Table Offset * in Port VC Capability Register 2 then save/restore it next. */ if (lpevcc) { u32 cap2; int vcarb_offset; pci_read_config_dword(dev, pos + PCI_VC_PORT_CAP2, &cap2); vcarb_offset = ((cap2 & PCI_VC_CAP2_ARB_OFF) >> 24) * 16; if (vcarb_offset) { int size, vcarb_phases = 0; if (cap2 & PCI_VC_CAP2_128_PHASE) vcarb_phases = 128; else if (cap2 & PCI_VC_CAP2_64_PHASE) vcarb_phases = 64; else if (cap2 & PCI_VC_CAP2_32_PHASE) vcarb_phases = 32; /* Fixed 4 bits per phase per lpevcc (plus VC0) */ size = ((lpevcc + 1) * vcarb_phases * 4) / 8; if (size && buf) { pci_vc_save_restore_dwords(dev, pos + vcarb_offset, (u32 *)buf, size / 4, save); /* * On restore, we need to signal hardware to * re-load the VC Arbitration Table. */ if (!save) pci_vc_load_arb_table(dev, pos); buf += size; } len += size; } } /* * In addition to each VC Resource Control Register, we may have a * Port Arbitration Table attached to each VC. The Port Arbitration * Table Offset in each VC Resource Capability Register tells us if * it exists. The entry size is global from the Port VC Capability * Register1 above. The number of phases is determined per VC. */ for (i = 0; i < evcc + 1; i++) { u32 cap; int parb_offset; pci_read_config_dword(dev, pos + PCI_VC_RES_CAP + (i * PCI_CAP_VC_PER_VC_SIZEOF), &cap); parb_offset = ((cap & PCI_VC_RES_CAP_ARB_OFF) >> 24) * 16; if (parb_offset) { int size, parb_phases = 0; if (cap & PCI_VC_RES_CAP_256_PHASE) parb_phases = 256; else if (cap & (PCI_VC_RES_CAP_128_PHASE | PCI_VC_RES_CAP_128_PHASE_TB)) parb_phases = 128; else if (cap & PCI_VC_RES_CAP_64_PHASE) parb_phases = 64; else if (cap & PCI_VC_RES_CAP_32_PHASE) parb_phases = 32; size = (parb_size * parb_phases) / 8; if (size && buf) { pci_vc_save_restore_dwords(dev, pos + parb_offset, (u32 *)buf, size / 4, save); buf += size; } len += size; } /* VC Resource Control Register */ if (buf) { int ctrl_pos = pos + PCI_VC_RES_CTRL + (i * PCI_CAP_VC_PER_VC_SIZEOF); if (save) pci_read_config_dword(dev, ctrl_pos, (u32 *)buf); else { u32 tmp, ctrl = *(u32 *)buf; /* * For an FLR case, the VC config may remain. * Preserve enable bit, restore the rest. */ pci_read_config_dword(dev, ctrl_pos, &tmp); tmp &= PCI_VC_RES_CTRL_ENABLE; tmp |= ctrl & ~PCI_VC_RES_CTRL_ENABLE; pci_write_config_dword(dev, ctrl_pos, tmp); /* Load port arbitration table if used */ if (ctrl & PCI_VC_RES_CTRL_ARB_SELECT) pci_vc_load_port_arb_table(dev, pos, i); /* Re-enable if needed */ if ((ctrl ^ tmp) & PCI_VC_RES_CTRL_ENABLE) pci_vc_enable(dev, pos, i); } buf += 4; } len += 4; } return buf ? 0 : len; } static struct { u16 id; const char *name; } vc_caps[] = { { PCI_EXT_CAP_ID_MFVC, "MFVC" }, { PCI_EXT_CAP_ID_VC, "VC" }, { PCI_EXT_CAP_ID_VC9, "VC9" } }; /** * pci_save_vc_state - Save VC state to pre-allocate save buffer * @dev: device * * For each type of VC capability, VC/VC9/MFVC, find the capability and * save it to the pre-allocated save buffer. */ int pci_save_vc_state(struct pci_dev *dev) { int i; for (i = 0; i < ARRAY_SIZE(vc_caps); i++) { int pos, ret; struct pci_cap_saved_state *save_state; pos = pci_find_ext_capability(dev, vc_caps[i].id); if (!pos) continue; save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id); if (!save_state) { pci_err(dev, "%s buffer not found in %s\n", vc_caps[i].name, __func__); return -ENOMEM; } ret = pci_vc_do_save_buffer(dev, pos, save_state, true); if (ret) { pci_err(dev, "%s save unsuccessful %s\n", vc_caps[i].name, __func__); return ret; } } return 0; } /** * pci_restore_vc_state - Restore VC state from save buffer * @dev: device * * For each type of VC capability, VC/VC9/MFVC, find the capability and * restore it from the previously saved buffer. */ void pci_restore_vc_state(struct pci_dev *dev) { int i; for (i = 0; i < ARRAY_SIZE(vc_caps); i++) { int pos; struct pci_cap_saved_state *save_state; pos = pci_find_ext_capability(dev, vc_caps[i].id); save_state = pci_find_saved_ext_cap(dev, vc_caps[i].id); if (!save_state || !pos) continue; pci_vc_do_save_buffer(dev, pos, save_state, false); } } /** * pci_allocate_vc_save_buffers - Allocate save buffers for VC caps * @dev: device * * For each type of VC capability, VC/VC9/MFVC, find the capability, size * it, and allocate a buffer for save/restore. */ void pci_allocate_vc_save_buffers(struct pci_dev *dev) { int i; for (i = 0; i < ARRAY_SIZE(vc_caps); i++) { int len, pos = pci_find_ext_capability(dev, vc_caps[i].id); if (!pos) continue; len = pci_vc_do_save_buffer(dev, pos, NULL, false); if (pci_add_ext_cap_save_buffer(dev, vc_caps[i].id, len)) pci_err(dev, "unable to preallocate %s save buffer\n", vc_caps[i].name); } }
linux-master
drivers/pci/vc.c
// SPDX-License-Identifier: GPL-2.0 /* * Simple stub driver to reserve a PCI device * * Copyright (C) 2008 Red Hat, Inc. * Author: * Chris Wright * * Usage is simple, allocate a new id to the stub driver and bind the * device to it. For example: * * # echo "8086 10f5" > /sys/bus/pci/drivers/pci-stub/new_id * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind * # echo -n 0000:00:19.0 > /sys/bus/pci/drivers/pci-stub/bind * # ls -l /sys/bus/pci/devices/0000:00:19.0/driver * .../0000:00:19.0/driver -> ../../../bus/pci/drivers/pci-stub */ #include <linux/module.h> #include <linux/pci.h> static char ids[1024] __initdata; module_param_string(ids, ids, sizeof(ids), 0); MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the stub driver, format is " "\"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\"" " and multiple comma separated entries can be specified"); static int pci_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) { pci_info(dev, "claimed by stub\n"); return 0; } static struct pci_driver stub_driver = { .name = "pci-stub", .id_table = NULL, /* only dynamic id's */ .probe = pci_stub_probe, .driver_managed_dma = true, }; static int __init pci_stub_init(void) { char *p, *id; int rc; rc = pci_register_driver(&stub_driver); if (rc) return rc; /* no ids passed actually */ if (ids[0] == '\0') return 0; /* add ids specified in the module parameter */ p = ids; while ((id = strsep(&p, ","))) { unsigned int vendor, device, subvendor = PCI_ANY_ID, subdevice = PCI_ANY_ID, class = 0, class_mask = 0; int fields; if (!strlen(id)) continue; fields = sscanf(id, "%x:%x:%x:%x:%x:%x", &vendor, &device, &subvendor, &subdevice, &class, &class_mask); if (fields < 2) { pr_warn("pci-stub: invalid ID string \"%s\"\n", id); continue; } pr_info("pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n", vendor, device, subvendor, subdevice, class, class_mask); rc = pci_add_dynid(&stub_driver, vendor, device, subvendor, subdevice, class, class_mask, 0); if (rc) pr_warn("pci-stub: failed to add dynamic ID (%d)\n", rc); } return 0; } static void __exit pci_stub_exit(void) { pci_unregister_driver(&stub_driver); } module_init(pci_stub_init); module_exit(pci_stub_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Chris Wright <[email protected]>");
linux-master
drivers/pci/pci-stub.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI Express I/O Virtualization (IOV) support * Single Root IOV 1.0 * Address Translation Service 1.0 * * Copyright (C) 2009 Intel Corporation, Yu Zhao <[email protected]> */ #include <linux/pci.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/string.h> #include <linux/delay.h> #include "pci.h" #define VIRTFN_ID_LEN 17 /* "virtfn%u\0" for 2^32 - 1 */ int pci_iov_virtfn_bus(struct pci_dev *dev, int vf_id) { if (!dev->is_physfn) return -EINVAL; return dev->bus->number + ((dev->devfn + dev->sriov->offset + dev->sriov->stride * vf_id) >> 8); } int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id) { if (!dev->is_physfn) return -EINVAL; return (dev->devfn + dev->sriov->offset + dev->sriov->stride * vf_id) & 0xff; } EXPORT_SYMBOL_GPL(pci_iov_virtfn_devfn); int pci_iov_vf_id(struct pci_dev *dev) { struct pci_dev *pf; if (!dev->is_virtfn) return -EINVAL; pf = pci_physfn(dev); return (pci_dev_id(dev) - (pci_dev_id(pf) + pf->sriov->offset)) / pf->sriov->stride; } EXPORT_SYMBOL_GPL(pci_iov_vf_id); /** * pci_iov_get_pf_drvdata - Return the drvdata of a PF * @dev: VF pci_dev * @pf_driver: Device driver required to own the PF * * This must be called from a context that ensures that a VF driver is attached. * The value returned is invalid once the VF driver completes its remove() * callback. * * Locking is achieved by the driver core. A VF driver cannot be probed until * pci_enable_sriov() is called and pci_disable_sriov() does not return until * all VF drivers have completed their remove(). * * The PF driver must call pci_disable_sriov() before it begins to destroy the * drvdata. */ void *pci_iov_get_pf_drvdata(struct pci_dev *dev, struct pci_driver *pf_driver) { struct pci_dev *pf_dev; if (!dev->is_virtfn) return ERR_PTR(-EINVAL); pf_dev = dev->physfn; if (pf_dev->driver != pf_driver) return ERR_PTR(-EINVAL); return pci_get_drvdata(pf_dev); } EXPORT_SYMBOL_GPL(pci_iov_get_pf_drvdata); /* * Per SR-IOV spec sec 3.3.10 and 3.3.11, First VF Offset and VF Stride may * change when NumVFs changes. * * Update iov->offset and iov->stride when NumVFs is written. */ static inline void pci_iov_set_numvfs(struct pci_dev *dev, int nr_virtfn) { struct pci_sriov *iov = dev->sriov; pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn); pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &iov->offset); pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &iov->stride); } /* * The PF consumes one bus number. NumVFs, First VF Offset, and VF Stride * determine how many additional bus numbers will be consumed by VFs. * * Iterate over all valid NumVFs, validate offset and stride, and calculate * the maximum number of bus numbers that could ever be required. */ static int compute_max_vf_buses(struct pci_dev *dev) { struct pci_sriov *iov = dev->sriov; int nr_virtfn, busnr, rc = 0; for (nr_virtfn = iov->total_VFs; nr_virtfn; nr_virtfn--) { pci_iov_set_numvfs(dev, nr_virtfn); if (!iov->offset || (nr_virtfn > 1 && !iov->stride)) { rc = -EIO; goto out; } busnr = pci_iov_virtfn_bus(dev, nr_virtfn - 1); if (busnr > iov->max_VF_buses) iov->max_VF_buses = busnr; } out: pci_iov_set_numvfs(dev, 0); return rc; } static struct pci_bus *virtfn_add_bus(struct pci_bus *bus, int busnr) { struct pci_bus *child; if (bus->number == busnr) return bus; child = pci_find_bus(pci_domain_nr(bus), busnr); if (child) return child; child = pci_add_new_bus(bus, NULL, busnr); if (!child) return NULL; pci_bus_insert_busn_res(child, busnr, busnr); return child; } static void virtfn_remove_bus(struct pci_bus *physbus, struct pci_bus *virtbus) { if (physbus != virtbus && list_empty(&virtbus->devices)) pci_remove_bus(virtbus); } resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) { if (!dev->is_physfn) return 0; return dev->sriov->barsz[resno - PCI_IOV_RESOURCES]; } static void pci_read_vf_config_common(struct pci_dev *virtfn) { struct pci_dev *physfn = virtfn->physfn; /* * Some config registers are the same across all associated VFs. * Read them once from VF0 so we can skip reading them from the * other VFs. * * PCIe r4.0, sec 9.3.4.1, technically doesn't require all VFs to * have the same Revision ID and Subsystem ID, but we assume they * do. */ pci_read_config_dword(virtfn, PCI_CLASS_REVISION, &physfn->sriov->class); pci_read_config_byte(virtfn, PCI_HEADER_TYPE, &physfn->sriov->hdr_type); pci_read_config_word(virtfn, PCI_SUBSYSTEM_VENDOR_ID, &physfn->sriov->subsystem_vendor); pci_read_config_word(virtfn, PCI_SUBSYSTEM_ID, &physfn->sriov->subsystem_device); } int pci_iov_sysfs_link(struct pci_dev *dev, struct pci_dev *virtfn, int id) { char buf[VIRTFN_ID_LEN]; int rc; sprintf(buf, "virtfn%u", id); rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); if (rc) goto failed; rc = sysfs_create_link(&virtfn->dev.kobj, &dev->dev.kobj, "physfn"); if (rc) goto failed1; kobject_uevent(&virtfn->dev.kobj, KOBJ_CHANGE); return 0; failed1: sysfs_remove_link(&dev->dev.kobj, buf); failed: return rc; } #ifdef CONFIG_PCI_MSI static ssize_t sriov_vf_total_msix_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); u32 vf_total_msix = 0; device_lock(dev); if (!pdev->driver || !pdev->driver->sriov_get_vf_total_msix) goto unlock; vf_total_msix = pdev->driver->sriov_get_vf_total_msix(pdev); unlock: device_unlock(dev); return sysfs_emit(buf, "%u\n", vf_total_msix); } static DEVICE_ATTR_RO(sriov_vf_total_msix); static ssize_t sriov_vf_msix_count_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *vf_dev = to_pci_dev(dev); struct pci_dev *pdev = pci_physfn(vf_dev); int val, ret = 0; if (kstrtoint(buf, 0, &val) < 0) return -EINVAL; if (val < 0) return -EINVAL; device_lock(&pdev->dev); if (!pdev->driver || !pdev->driver->sriov_set_msix_vec_count) { ret = -EOPNOTSUPP; goto err_pdev; } device_lock(&vf_dev->dev); if (vf_dev->driver) { /* * A driver is already attached to this VF and has configured * itself based on the current MSI-X vector count. Changing * the vector size could mess up the driver, so block it. */ ret = -EBUSY; goto err_dev; } ret = pdev->driver->sriov_set_msix_vec_count(vf_dev, val); err_dev: device_unlock(&vf_dev->dev); err_pdev: device_unlock(&pdev->dev); return ret ? : count; } static DEVICE_ATTR_WO(sriov_vf_msix_count); #endif static struct attribute *sriov_vf_dev_attrs[] = { #ifdef CONFIG_PCI_MSI &dev_attr_sriov_vf_msix_count.attr, #endif NULL, }; static umode_t sriov_vf_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); struct pci_dev *pdev = to_pci_dev(dev); if (!pdev->is_virtfn) return 0; return a->mode; } const struct attribute_group sriov_vf_dev_attr_group = { .attrs = sriov_vf_dev_attrs, .is_visible = sriov_vf_attrs_are_visible, }; int pci_iov_add_virtfn(struct pci_dev *dev, int id) { int i; int rc = -ENOMEM; u64 size; struct pci_dev *virtfn; struct resource *res; struct pci_sriov *iov = dev->sriov; struct pci_bus *bus; bus = virtfn_add_bus(dev->bus, pci_iov_virtfn_bus(dev, id)); if (!bus) goto failed; virtfn = pci_alloc_dev(bus); if (!virtfn) goto failed0; virtfn->devfn = pci_iov_virtfn_devfn(dev, id); virtfn->vendor = dev->vendor; virtfn->device = iov->vf_device; virtfn->is_virtfn = 1; virtfn->physfn = pci_dev_get(dev); virtfn->no_command_memory = 1; if (id == 0) pci_read_vf_config_common(virtfn); rc = pci_setup_device(virtfn); if (rc) goto failed1; virtfn->dev.parent = dev->dev.parent; virtfn->multifunction = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &dev->resource[i + PCI_IOV_RESOURCES]; if (!res->parent) continue; virtfn->resource[i].name = pci_name(virtfn); virtfn->resource[i].flags = res->flags; size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES); virtfn->resource[i].start = res->start + size * id; virtfn->resource[i].end = virtfn->resource[i].start + size - 1; rc = request_resource(res, &virtfn->resource[i]); BUG_ON(rc); } pci_device_add(virtfn, virtfn->bus); rc = pci_iov_sysfs_link(dev, virtfn, id); if (rc) goto failed1; pci_bus_add_device(virtfn); return 0; failed1: pci_stop_and_remove_bus_device(virtfn); pci_dev_put(dev); failed0: virtfn_remove_bus(dev->bus, bus); failed: return rc; } void pci_iov_remove_virtfn(struct pci_dev *dev, int id) { char buf[VIRTFN_ID_LEN]; struct pci_dev *virtfn; virtfn = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), pci_iov_virtfn_bus(dev, id), pci_iov_virtfn_devfn(dev, id)); if (!virtfn) return; sprintf(buf, "virtfn%u", id); sysfs_remove_link(&dev->dev.kobj, buf); /* * pci_stop_dev() could have been called for this virtfn already, * so the directory for the virtfn may have been removed before. * Double check to avoid spurious sysfs warnings. */ if (virtfn->dev.kobj.sd) sysfs_remove_link(&virtfn->dev.kobj, "physfn"); pci_stop_and_remove_bus_device(virtfn); virtfn_remove_bus(dev->bus, virtfn->bus); /* balance pci_get_domain_bus_and_slot() */ pci_dev_put(virtfn); pci_dev_put(dev); } static ssize_t sriov_totalvfs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return sysfs_emit(buf, "%u\n", pci_sriov_get_totalvfs(pdev)); } static ssize_t sriov_numvfs_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); u16 num_vfs; /* Serialize vs sriov_numvfs_store() so readers see valid num_VFs */ device_lock(&pdev->dev); num_vfs = pdev->sriov->num_VFs; device_unlock(&pdev->dev); return sysfs_emit(buf, "%u\n", num_vfs); } /* * num_vfs > 0; number of VFs to enable * num_vfs = 0; disable all VFs * * Note: SRIOV spec does not allow partial VF * disable, so it's all or none. */ static ssize_t sriov_numvfs_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); int ret = 0; u16 num_vfs; if (kstrtou16(buf, 0, &num_vfs) < 0) return -EINVAL; if (num_vfs > pci_sriov_get_totalvfs(pdev)) return -ERANGE; device_lock(&pdev->dev); if (num_vfs == pdev->sriov->num_VFs) goto exit; /* is PF driver loaded */ if (!pdev->driver) { pci_info(pdev, "no driver bound to device; cannot configure SR-IOV\n"); ret = -ENOENT; goto exit; } /* is PF driver loaded w/callback */ if (!pdev->driver->sriov_configure) { pci_info(pdev, "driver does not support SR-IOV configuration via sysfs\n"); ret = -ENOENT; goto exit; } if (num_vfs == 0) { /* disable VFs */ ret = pdev->driver->sriov_configure(pdev, 0); goto exit; } /* enable VFs */ if (pdev->sriov->num_VFs) { pci_warn(pdev, "%d VFs already enabled. Disable before enabling %d VFs\n", pdev->sriov->num_VFs, num_vfs); ret = -EBUSY; goto exit; } ret = pdev->driver->sriov_configure(pdev, num_vfs); if (ret < 0) goto exit; if (ret != num_vfs) pci_warn(pdev, "%d VFs requested; only %d enabled\n", num_vfs, ret); exit: device_unlock(&pdev->dev); if (ret < 0) return ret; return count; } static ssize_t sriov_offset_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return sysfs_emit(buf, "%u\n", pdev->sriov->offset); } static ssize_t sriov_stride_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return sysfs_emit(buf, "%u\n", pdev->sriov->stride); } static ssize_t sriov_vf_device_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return sysfs_emit(buf, "%x\n", pdev->sriov->vf_device); } static ssize_t sriov_drivers_autoprobe_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return sysfs_emit(buf, "%u\n", pdev->sriov->drivers_autoprobe); } static ssize_t sriov_drivers_autoprobe_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct pci_dev *pdev = to_pci_dev(dev); bool drivers_autoprobe; if (kstrtobool(buf, &drivers_autoprobe) < 0) return -EINVAL; pdev->sriov->drivers_autoprobe = drivers_autoprobe; return count; } static DEVICE_ATTR_RO(sriov_totalvfs); static DEVICE_ATTR_RW(sriov_numvfs); static DEVICE_ATTR_RO(sriov_offset); static DEVICE_ATTR_RO(sriov_stride); static DEVICE_ATTR_RO(sriov_vf_device); static DEVICE_ATTR_RW(sriov_drivers_autoprobe); static struct attribute *sriov_pf_dev_attrs[] = { &dev_attr_sriov_totalvfs.attr, &dev_attr_sriov_numvfs.attr, &dev_attr_sriov_offset.attr, &dev_attr_sriov_stride.attr, &dev_attr_sriov_vf_device.attr, &dev_attr_sriov_drivers_autoprobe.attr, #ifdef CONFIG_PCI_MSI &dev_attr_sriov_vf_total_msix.attr, #endif NULL, }; static umode_t sriov_pf_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); if (!dev_is_pf(dev)) return 0; return a->mode; } const struct attribute_group sriov_pf_dev_attr_group = { .attrs = sriov_pf_dev_attrs, .is_visible = sriov_pf_attrs_are_visible, }; int __weak pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs) { return 0; } int __weak pcibios_sriov_disable(struct pci_dev *pdev) { return 0; } static int sriov_add_vfs(struct pci_dev *dev, u16 num_vfs) { unsigned int i; int rc; if (dev->no_vf_scan) return 0; for (i = 0; i < num_vfs; i++) { rc = pci_iov_add_virtfn(dev, i); if (rc) goto failed; } return 0; failed: while (i--) pci_iov_remove_virtfn(dev, i); return rc; } static int sriov_enable(struct pci_dev *dev, int nr_virtfn) { int rc; int i; int nres; u16 initial; struct resource *res; struct pci_dev *pdev; struct pci_sriov *iov = dev->sriov; int bars = 0; int bus; if (!nr_virtfn) return 0; if (iov->num_VFs) return -EINVAL; pci_read_config_word(dev, iov->pos + PCI_SRIOV_INITIAL_VF, &initial); if (initial > iov->total_VFs || (!(iov->cap & PCI_SRIOV_CAP_VFM) && (initial != iov->total_VFs))) return -EIO; if (nr_virtfn < 0 || nr_virtfn > iov->total_VFs || (!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial))) return -EINVAL; nres = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { bars |= (1 << (i + PCI_IOV_RESOURCES)); res = &dev->resource[i + PCI_IOV_RESOURCES]; if (res->parent) nres++; } if (nres != iov->nres) { pci_err(dev, "not enough MMIO resources for SR-IOV\n"); return -ENOMEM; } bus = pci_iov_virtfn_bus(dev, nr_virtfn - 1); if (bus > dev->bus->busn_res.end) { pci_err(dev, "can't enable %d VFs (bus %02x out of range of %pR)\n", nr_virtfn, bus, &dev->bus->busn_res); return -ENOMEM; } if (pci_enable_resources(dev, bars)) { pci_err(dev, "SR-IOV: IOV BARS not allocated\n"); return -ENOMEM; } if (iov->link != dev->devfn) { pdev = pci_get_slot(dev->bus, iov->link); if (!pdev) return -ENODEV; if (!pdev->is_physfn) { pci_dev_put(pdev); return -ENOSYS; } rc = sysfs_create_link(&dev->dev.kobj, &pdev->dev.kobj, "dep_link"); pci_dev_put(pdev); if (rc) return rc; } iov->initial_VFs = initial; if (nr_virtfn < initial) initial = nr_virtfn; rc = pcibios_sriov_enable(dev, initial); if (rc) { pci_err(dev, "failure %d from pcibios_sriov_enable()\n", rc); goto err_pcibios; } pci_iov_set_numvfs(dev, nr_virtfn); iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE; pci_cfg_access_lock(dev); pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); msleep(100); pci_cfg_access_unlock(dev); rc = sriov_add_vfs(dev, initial); if (rc) goto err_pcibios; kobject_uevent(&dev->dev.kobj, KOBJ_CHANGE); iov->num_VFs = nr_virtfn; return 0; err_pcibios: iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); pci_cfg_access_lock(dev); pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); ssleep(1); pci_cfg_access_unlock(dev); pcibios_sriov_disable(dev); if (iov->link != dev->devfn) sysfs_remove_link(&dev->dev.kobj, "dep_link"); pci_iov_set_numvfs(dev, 0); return rc; } static void sriov_del_vfs(struct pci_dev *dev) { struct pci_sriov *iov = dev->sriov; int i; for (i = 0; i < iov->num_VFs; i++) pci_iov_remove_virtfn(dev, i); } static void sriov_disable(struct pci_dev *dev) { struct pci_sriov *iov = dev->sriov; if (!iov->num_VFs) return; sriov_del_vfs(dev); iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE); pci_cfg_access_lock(dev); pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); ssleep(1); pci_cfg_access_unlock(dev); pcibios_sriov_disable(dev); if (iov->link != dev->devfn) sysfs_remove_link(&dev->dev.kobj, "dep_link"); iov->num_VFs = 0; pci_iov_set_numvfs(dev, 0); } static int sriov_init(struct pci_dev *dev, int pos) { int i, bar64; int rc; int nres; u32 pgsz; u16 ctrl, total; struct pci_sriov *iov; struct resource *res; struct pci_dev *pdev; pci_read_config_word(dev, pos + PCI_SRIOV_CTRL, &ctrl); if (ctrl & PCI_SRIOV_CTRL_VFE) { pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, 0); ssleep(1); } ctrl = 0; list_for_each_entry(pdev, &dev->bus->devices, bus_list) if (pdev->is_physfn) goto found; pdev = NULL; if (pci_ari_enabled(dev->bus)) ctrl |= PCI_SRIOV_CTRL_ARI; found: pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl); pci_read_config_word(dev, pos + PCI_SRIOV_TOTAL_VF, &total); if (!total) return 0; pci_read_config_dword(dev, pos + PCI_SRIOV_SUP_PGSIZE, &pgsz); i = PAGE_SHIFT > 12 ? PAGE_SHIFT - 12 : 0; pgsz &= ~((1 << i) - 1); if (!pgsz) return -EIO; pgsz &= ~(pgsz - 1); pci_write_config_dword(dev, pos + PCI_SRIOV_SYS_PGSIZE, pgsz); iov = kzalloc(sizeof(*iov), GFP_KERNEL); if (!iov) return -ENOMEM; nres = 0; for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &dev->resource[i + PCI_IOV_RESOURCES]; /* * If it is already FIXED, don't change it, something * (perhaps EA or header fixups) wants it this way. */ if (res->flags & IORESOURCE_PCI_FIXED) bar64 = (res->flags & IORESOURCE_MEM_64) ? 1 : 0; else bar64 = __pci_read_base(dev, pci_bar_unknown, res, pos + PCI_SRIOV_BAR + i * 4); if (!res->flags) continue; if (resource_size(res) & (PAGE_SIZE - 1)) { rc = -EIO; goto failed; } iov->barsz[i] = resource_size(res); res->end = res->start + resource_size(res) * total - 1; pci_info(dev, "VF(n) BAR%d space: %pR (contains BAR%d for %d VFs)\n", i, res, i, total); i += bar64; nres++; } iov->pos = pos; iov->nres = nres; iov->ctrl = ctrl; iov->total_VFs = total; iov->driver_max_VFs = total; pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &iov->vf_device); iov->pgsz = pgsz; iov->self = dev; iov->drivers_autoprobe = true; pci_read_config_dword(dev, pos + PCI_SRIOV_CAP, &iov->cap); pci_read_config_byte(dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) iov->link = PCI_DEVFN(PCI_SLOT(dev->devfn), iov->link); if (pdev) iov->dev = pci_dev_get(pdev); else iov->dev = dev; dev->sriov = iov; dev->is_physfn = 1; rc = compute_max_vf_buses(dev); if (rc) goto fail_max_buses; return 0; fail_max_buses: dev->sriov = NULL; dev->is_physfn = 0; failed: for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { res = &dev->resource[i + PCI_IOV_RESOURCES]; res->flags = 0; } kfree(iov); return rc; } static void sriov_release(struct pci_dev *dev) { BUG_ON(dev->sriov->num_VFs); if (dev != dev->sriov->dev) pci_dev_put(dev->sriov->dev); kfree(dev->sriov); dev->sriov = NULL; } static void sriov_restore_state(struct pci_dev *dev) { int i; u16 ctrl; struct pci_sriov *iov = dev->sriov; pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &ctrl); if (ctrl & PCI_SRIOV_CTRL_VFE) return; /* * Restore PCI_SRIOV_CTRL_ARI before pci_iov_set_numvfs() because * it reads offset & stride, which depend on PCI_SRIOV_CTRL_ARI. */ ctrl &= ~PCI_SRIOV_CTRL_ARI; ctrl |= iov->ctrl & PCI_SRIOV_CTRL_ARI; pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, ctrl); for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) pci_update_resource(dev, i + PCI_IOV_RESOURCES); pci_write_config_dword(dev, iov->pos + PCI_SRIOV_SYS_PGSIZE, iov->pgsz); pci_iov_set_numvfs(dev, iov->num_VFs); pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl); if (iov->ctrl & PCI_SRIOV_CTRL_VFE) msleep(100); } /** * pci_iov_init - initialize the IOV capability * @dev: the PCI device * * Returns 0 on success, or negative on failure. */ int pci_iov_init(struct pci_dev *dev) { int pos; if (!pci_is_pcie(dev)) return -ENODEV; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); if (pos) return sriov_init(dev, pos); return -ENODEV; } /** * pci_iov_release - release resources used by the IOV capability * @dev: the PCI device */ void pci_iov_release(struct pci_dev *dev) { if (dev->is_physfn) sriov_release(dev); } /** * pci_iov_remove - clean up SR-IOV state after PF driver is detached * @dev: the PCI device */ void pci_iov_remove(struct pci_dev *dev) { struct pci_sriov *iov = dev->sriov; if (!dev->is_physfn) return; iov->driver_max_VFs = iov->total_VFs; if (iov->num_VFs) pci_warn(dev, "driver left SR-IOV enabled after remove\n"); } /** * pci_iov_update_resource - update a VF BAR * @dev: the PCI device * @resno: the resource number * * Update a VF BAR in the SR-IOV capability of a PF. */ void pci_iov_update_resource(struct pci_dev *dev, int resno) { struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL; struct resource *res = dev->resource + resno; int vf_bar = resno - PCI_IOV_RESOURCES; struct pci_bus_region region; u16 cmd; u32 new; int reg; /* * The generic pci_restore_bars() path calls this for all devices, * including VFs and non-SR-IOV devices. If this is not a PF, we * have nothing to do. */ if (!iov) return; pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd); if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) { dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n", vf_bar, res); return; } /* * Ignore unimplemented BARs, unused resource slots for 64-bit * BARs, and non-movable resources, e.g., those described via * Enhanced Allocation. */ if (!res->flags) return; if (res->flags & IORESOURCE_UNSET) return; if (res->flags & IORESOURCE_PCI_FIXED) return; pcibios_resource_to_bus(dev->bus, &region, res); new = region.start; new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar; pci_write_config_dword(dev, reg, new); if (res->flags & IORESOURCE_MEM_64) { new = region.start >> 16 >> 16; pci_write_config_dword(dev, reg + 4, new); } } resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev, int resno) { return pci_iov_resource_size(dev, resno); } /** * pci_sriov_resource_alignment - get resource alignment for VF BAR * @dev: the PCI device * @resno: the resource number * * Returns the alignment of the VF BAR found in the SR-IOV capability. * This is not the same as the resource size which is defined as * the VF BAR size multiplied by the number of VFs. The alignment * is just the VF BAR size. */ resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno) { return pcibios_iov_resource_alignment(dev, resno); } /** * pci_restore_iov_state - restore the state of the IOV capability * @dev: the PCI device */ void pci_restore_iov_state(struct pci_dev *dev) { if (dev->is_physfn) sriov_restore_state(dev); } /** * pci_vf_drivers_autoprobe - set PF property drivers_autoprobe for VFs * @dev: the PCI device * @auto_probe: set VF drivers auto probe flag */ void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool auto_probe) { if (dev->is_physfn) dev->sriov->drivers_autoprobe = auto_probe; } /** * pci_iov_bus_range - find bus range used by Virtual Function * @bus: the PCI bus * * Returns max number of buses (exclude current one) used by Virtual * Functions. */ int pci_iov_bus_range(struct pci_bus *bus) { int max = 0; struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { if (!dev->is_physfn) continue; if (dev->sriov->max_VF_buses > max) max = dev->sriov->max_VF_buses; } return max ? max - bus->number : 0; } /** * pci_enable_sriov - enable the SR-IOV capability * @dev: the PCI device * @nr_virtfn: number of virtual functions to enable * * Returns 0 on success, or negative on failure. */ int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn) { might_sleep(); if (!dev->is_physfn) return -ENOSYS; return sriov_enable(dev, nr_virtfn); } EXPORT_SYMBOL_GPL(pci_enable_sriov); /** * pci_disable_sriov - disable the SR-IOV capability * @dev: the PCI device */ void pci_disable_sriov(struct pci_dev *dev) { might_sleep(); if (!dev->is_physfn) return; sriov_disable(dev); } EXPORT_SYMBOL_GPL(pci_disable_sriov); /** * pci_num_vf - return number of VFs associated with a PF device_release_driver * @dev: the PCI device * * Returns number of VFs, or 0 if SR-IOV is not enabled. */ int pci_num_vf(struct pci_dev *dev) { if (!dev->is_physfn) return 0; return dev->sriov->num_VFs; } EXPORT_SYMBOL_GPL(pci_num_vf); /** * pci_vfs_assigned - returns number of VFs are assigned to a guest * @dev: the PCI device * * Returns number of VFs belonging to this device that are assigned to a guest. * If device is not a physical function returns 0. */ int pci_vfs_assigned(struct pci_dev *dev) { struct pci_dev *vfdev; unsigned int vfs_assigned = 0; unsigned short dev_id; /* only search if we are a PF */ if (!dev->is_physfn) return 0; /* * determine the device ID for the VFs, the vendor ID will be the * same as the PF so there is no need to check for that one */ dev_id = dev->sriov->vf_device; /* loop through all the VFs to see if we own any that are assigned */ vfdev = pci_get_device(dev->vendor, dev_id, NULL); while (vfdev) { /* * It is considered assigned if it is a virtual function with * our dev as the physical function and the assigned bit is set */ if (vfdev->is_virtfn && (vfdev->physfn == dev) && pci_is_dev_assigned(vfdev)) vfs_assigned++; vfdev = pci_get_device(dev->vendor, dev_id, vfdev); } return vfs_assigned; } EXPORT_SYMBOL_GPL(pci_vfs_assigned); /** * pci_sriov_set_totalvfs -- reduce the TotalVFs available * @dev: the PCI PF device * @numvfs: number that should be used for TotalVFs supported * * Should be called from PF driver's probe routine with * device's mutex held. * * Returns 0 if PF is an SRIOV-capable device and * value of numvfs valid. If not a PF return -ENOSYS; * if numvfs is invalid return -EINVAL; * if VFs already enabled, return -EBUSY. */ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs) { if (!dev->is_physfn) return -ENOSYS; if (numvfs > dev->sriov->total_VFs) return -EINVAL; /* Shouldn't change if VFs already enabled */ if (dev->sriov->ctrl & PCI_SRIOV_CTRL_VFE) return -EBUSY; dev->sriov->driver_max_VFs = numvfs; return 0; } EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs); /** * pci_sriov_get_totalvfs -- get total VFs supported on this device * @dev: the PCI PF device * * For a PCIe device with SRIOV support, return the PCIe * SRIOV capability value of TotalVFs or the value of driver_max_VFs * if the driver reduced it. Otherwise 0. */ int pci_sriov_get_totalvfs(struct pci_dev *dev) { if (!dev->is_physfn) return 0; return dev->sriov->driver_max_VFs; } EXPORT_SYMBOL_GPL(pci_sriov_get_totalvfs); /** * pci_sriov_configure_simple - helper to configure SR-IOV * @dev: the PCI device * @nr_virtfn: number of virtual functions to enable, 0 to disable * * Enable or disable SR-IOV for devices that don't require any PF setup * before enabling SR-IOV. Return value is negative on error, or number of * VFs allocated on success. */ int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn) { int rc; might_sleep(); if (!dev->is_physfn) return -ENODEV; if (pci_vfs_assigned(dev)) { pci_warn(dev, "Cannot modify SR-IOV while VFs are assigned\n"); return -EPERM; } if (nr_virtfn == 0) { sriov_disable(dev); return 0; } rc = sriov_enable(dev, nr_virtfn); if (rc < 0) return rc; return nr_virtfn; } EXPORT_SYMBOL_GPL(pci_sriov_configure_simple);
linux-master
drivers/pci/iov.c
// SPDX-License-Identifier: GPL-2.0 /* * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <[email protected]> * (C) Copyright 2007 Novell Inc. */ #include <linux/pci.h> #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/mempolicy.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/sched/isolation.h> #include <linux/cpu.h> #include <linux/pm_runtime.h> #include <linux/suspend.h> #include <linux/kexec.h> #include <linux/of_device.h> #include <linux/acpi.h> #include <linux/dma-map-ops.h> #include <linux/iommu.h> #include "pci.h" #include "pcie/portdrv.h" struct pci_dynid { struct list_head node; struct pci_device_id id; }; /** * pci_add_dynid - add a new PCI device ID to this driver and re-probe devices * @drv: target pci driver * @vendor: PCI vendor ID * @device: PCI device ID * @subvendor: PCI subvendor ID * @subdevice: PCI subdevice ID * @class: PCI class * @class_mask: PCI class mask * @driver_data: private driver data * * Adds a new dynamic pci device ID to this driver and causes the * driver to probe for all devices again. @drv must have been * registered prior to calling this function. * * CONTEXT: * Does GFP_KERNEL allocation. * * RETURNS: * 0 on success, -errno on failure. */ int pci_add_dynid(struct pci_driver *drv, unsigned int vendor, unsigned int device, unsigned int subvendor, unsigned int subdevice, unsigned int class, unsigned int class_mask, unsigned long driver_data) { struct pci_dynid *dynid; dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); if (!dynid) return -ENOMEM; dynid->id.vendor = vendor; dynid->id.device = device; dynid->id.subvendor = subvendor; dynid->id.subdevice = subdevice; dynid->id.class = class; dynid->id.class_mask = class_mask; dynid->id.driver_data = driver_data; spin_lock(&drv->dynids.lock); list_add_tail(&dynid->node, &drv->dynids.list); spin_unlock(&drv->dynids.lock); return driver_attach(&drv->driver); } EXPORT_SYMBOL_GPL(pci_add_dynid); static void pci_free_dynids(struct pci_driver *drv) { struct pci_dynid *dynid, *n; spin_lock(&drv->dynids.lock); list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { list_del(&dynid->node); kfree(dynid); } spin_unlock(&drv->dynids.lock); } /** * pci_match_id - See if a PCI device matches a given pci_id table * @ids: array of PCI device ID structures to search in * @dev: the PCI device structure to match against. * * Used by a driver to check whether a PCI device is in its list of * supported devices. Returns the matching pci_device_id structure or * %NULL if there is no match. * * Deprecated; don't use this as it will not catch any dynamic IDs * that a driver might want to check for. */ const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, struct pci_dev *dev) { if (ids) { while (ids->vendor || ids->subvendor || ids->class_mask) { if (pci_match_one_device(ids, dev)) return ids; ids++; } } return NULL; } EXPORT_SYMBOL(pci_match_id); static const struct pci_device_id pci_device_id_any = { .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }; /** * pci_match_device - See if a device matches a driver's list of IDs * @drv: the PCI driver to match against * @dev: the PCI device structure to match against * * Used by a driver to check whether a PCI device is in its list of * supported devices or in the dynids list, which may have been augmented * via the sysfs "new_id" file. Returns the matching pci_device_id * structure or %NULL if there is no match. */ static const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev) { struct pci_dynid *dynid; const struct pci_device_id *found_id = NULL, *ids; /* When driver_override is set, only bind to the matching driver */ if (dev->driver_override && strcmp(dev->driver_override, drv->name)) return NULL; /* Look at the dynamic ids first, before the static ones */ spin_lock(&drv->dynids.lock); list_for_each_entry(dynid, &drv->dynids.list, node) { if (pci_match_one_device(&dynid->id, dev)) { found_id = &dynid->id; break; } } spin_unlock(&drv->dynids.lock); if (found_id) return found_id; for (ids = drv->id_table; (found_id = pci_match_id(ids, dev)); ids = found_id + 1) { /* * The match table is split based on driver_override. * In case override_only was set, enforce driver_override * matching. */ if (found_id->override_only) { if (dev->driver_override) return found_id; } else { return found_id; } } /* driver_override will always match, send a dummy id */ if (dev->driver_override) return &pci_device_id_any; return NULL; } /** * new_id_store - sysfs frontend to pci_add_dynid() * @driver: target device driver * @buf: buffer for scanning device ID data * @count: input size * * Allow PCI IDs to be added to an existing driver via sysfs. */ static ssize_t new_id_store(struct device_driver *driver, const char *buf, size_t count) { struct pci_driver *pdrv = to_pci_driver(driver); const struct pci_device_id *ids = pdrv->id_table; u32 vendor, device, subvendor = PCI_ANY_ID, subdevice = PCI_ANY_ID, class = 0, class_mask = 0; unsigned long driver_data = 0; int fields; int retval = 0; fields = sscanf(buf, "%x %x %x %x %x %x %lx", &vendor, &device, &subvendor, &subdevice, &class, &class_mask, &driver_data); if (fields < 2) return -EINVAL; if (fields != 7) { struct pci_dev *pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); if (!pdev) return -ENOMEM; pdev->vendor = vendor; pdev->device = device; pdev->subsystem_vendor = subvendor; pdev->subsystem_device = subdevice; pdev->class = class; if (pci_match_device(pdrv, pdev)) retval = -EEXIST; kfree(pdev); if (retval) return retval; } /* Only accept driver_data values that match an existing id_table entry */ if (ids) { retval = -EINVAL; while (ids->vendor || ids->subvendor || ids->class_mask) { if (driver_data == ids->driver_data) { retval = 0; break; } ids++; } if (retval) /* No match */ return retval; } retval = pci_add_dynid(pdrv, vendor, device, subvendor, subdevice, class, class_mask, driver_data); if (retval) return retval; return count; } static DRIVER_ATTR_WO(new_id); /** * remove_id_store - remove a PCI device ID from this driver * @driver: target device driver * @buf: buffer for scanning device ID data * @count: input size * * Removes a dynamic pci device ID to this driver. */ static ssize_t remove_id_store(struct device_driver *driver, const char *buf, size_t count) { struct pci_dynid *dynid, *n; struct pci_driver *pdrv = to_pci_driver(driver); u32 vendor, device, subvendor = PCI_ANY_ID, subdevice = PCI_ANY_ID, class = 0, class_mask = 0; int fields; size_t retval = -ENODEV; fields = sscanf(buf, "%x %x %x %x %x %x", &vendor, &device, &subvendor, &subdevice, &class, &class_mask); if (fields < 2) return -EINVAL; spin_lock(&pdrv->dynids.lock); list_for_each_entry_safe(dynid, n, &pdrv->dynids.list, node) { struct pci_device_id *id = &dynid->id; if ((id->vendor == vendor) && (id->device == device) && (subvendor == PCI_ANY_ID || id->subvendor == subvendor) && (subdevice == PCI_ANY_ID || id->subdevice == subdevice) && !((id->class ^ class) & class_mask)) { list_del(&dynid->node); kfree(dynid); retval = count; break; } } spin_unlock(&pdrv->dynids.lock); return retval; } static DRIVER_ATTR_WO(remove_id); static struct attribute *pci_drv_attrs[] = { &driver_attr_new_id.attr, &driver_attr_remove_id.attr, NULL, }; ATTRIBUTE_GROUPS(pci_drv); struct drv_dev_and_id { struct pci_driver *drv; struct pci_dev *dev; const struct pci_device_id *id; }; static long local_pci_probe(void *_ddi) { struct drv_dev_and_id *ddi = _ddi; struct pci_dev *pci_dev = ddi->dev; struct pci_driver *pci_drv = ddi->drv; struct device *dev = &pci_dev->dev; int rc; /* * Unbound PCI devices are always put in D0, regardless of * runtime PM status. During probe, the device is set to * active and the usage count is incremented. If the driver * supports runtime PM, it should call pm_runtime_put_noidle(), * or any other runtime PM helper function decrementing the usage * count, in its probe routine and pm_runtime_get_noresume() in * its remove routine. */ pm_runtime_get_sync(dev); pci_dev->driver = pci_drv; rc = pci_drv->probe(pci_dev, ddi->id); if (!rc) return rc; if (rc < 0) { pci_dev->driver = NULL; pm_runtime_put_sync(dev); return rc; } /* * Probe function should return < 0 for failure, 0 for success * Treat values > 0 as success, but warn. */ pci_warn(pci_dev, "Driver probe function unexpectedly returned %d\n", rc); return 0; } static bool pci_physfn_is_probed(struct pci_dev *dev) { #ifdef CONFIG_PCI_IOV return dev->is_virtfn && dev->physfn->is_probed; #else return false; #endif } static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, const struct pci_device_id *id) { int error, node, cpu; struct drv_dev_and_id ddi = { drv, dev, id }; /* * Execute driver initialization on node where the device is * attached. This way the driver likely allocates its local memory * on the right node. */ node = dev_to_node(&dev->dev); dev->is_probed = 1; cpu_hotplug_disable(); /* * Prevent nesting work_on_cpu() for the case where a Virtual Function * device is probed from work_on_cpu() of the Physical device. */ if (node < 0 || node >= MAX_NUMNODES || !node_online(node) || pci_physfn_is_probed(dev)) { cpu = nr_cpu_ids; } else { cpumask_var_t wq_domain_mask; if (!zalloc_cpumask_var(&wq_domain_mask, GFP_KERNEL)) { error = -ENOMEM; goto out; } cpumask_and(wq_domain_mask, housekeeping_cpumask(HK_TYPE_WQ), housekeeping_cpumask(HK_TYPE_DOMAIN)); cpu = cpumask_any_and(cpumask_of_node(node), wq_domain_mask); free_cpumask_var(wq_domain_mask); } if (cpu < nr_cpu_ids) error = work_on_cpu(cpu, local_pci_probe, &ddi); else error = local_pci_probe(&ddi); out: dev->is_probed = 0; cpu_hotplug_enable(); return error; } /** * __pci_device_probe - check if a driver wants to claim a specific PCI device * @drv: driver to call to check if it wants the PCI device * @pci_dev: PCI device being probed * * returns 0 on success, else error. * side-effect: pci_dev->driver is set to drv when drv claims pci_dev. */ static int __pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) { const struct pci_device_id *id; int error = 0; if (drv->probe) { error = -ENODEV; id = pci_match_device(drv, pci_dev); if (id) error = pci_call_probe(drv, pci_dev, id); } return error; } int __weak pcibios_alloc_irq(struct pci_dev *dev) { return 0; } void __weak pcibios_free_irq(struct pci_dev *dev) { } #ifdef CONFIG_PCI_IOV static inline bool pci_device_can_probe(struct pci_dev *pdev) { return (!pdev->is_virtfn || pdev->physfn->sriov->drivers_autoprobe || pdev->driver_override); } #else static inline bool pci_device_can_probe(struct pci_dev *pdev) { return true; } #endif static int pci_device_probe(struct device *dev) { int error; struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = to_pci_driver(dev->driver); if (!pci_device_can_probe(pci_dev)) return -ENODEV; pci_assign_irq(pci_dev); error = pcibios_alloc_irq(pci_dev); if (error < 0) return error; pci_dev_get(pci_dev); error = __pci_device_probe(drv, pci_dev); if (error) { pcibios_free_irq(pci_dev); pci_dev_put(pci_dev); } return error; } static void pci_device_remove(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = pci_dev->driver; if (drv->remove) { pm_runtime_get_sync(dev); drv->remove(pci_dev); pm_runtime_put_noidle(dev); } pcibios_free_irq(pci_dev); pci_dev->driver = NULL; pci_iov_remove(pci_dev); /* Undo the runtime PM settings in local_pci_probe() */ pm_runtime_put_sync(dev); /* * If the device is still on, set the power state as "unknown", * since it might change by the next time we load the driver. */ if (pci_dev->current_state == PCI_D0) pci_dev->current_state = PCI_UNKNOWN; /* * We would love to complain here if pci_dev->is_enabled is set, that * the driver should have called pci_disable_device(), but the * unfortunate fact is there are too many odd BIOS and bridge setups * that don't like drivers doing that all of the time. * Oh well, we can dream of sane hardware when we sleep, no matter how * horrible the crap we have to deal with is when we are awake... */ pci_dev_put(pci_dev); } static void pci_device_shutdown(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = pci_dev->driver; pm_runtime_resume(dev); if (drv && drv->shutdown) drv->shutdown(pci_dev); /* * If this is a kexec reboot, turn off Bus Master bit on the * device to tell it to not continue to do DMA. Don't touch * devices in D3cold or unknown states. * If it is not a kexec reboot, firmware will hit the PCI * devices with big hammer and stop their DMA any way. */ if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot)) pci_clear_master(pci_dev); } #ifdef CONFIG_PM_SLEEP /* Auxiliary functions used for system resume */ /** * pci_restore_standard_config - restore standard config registers of PCI device * @pci_dev: PCI device to handle */ static int pci_restore_standard_config(struct pci_dev *pci_dev) { pci_update_current_state(pci_dev, PCI_UNKNOWN); if (pci_dev->current_state != PCI_D0) { int error = pci_set_power_state(pci_dev, PCI_D0); if (error) return error; } pci_restore_state(pci_dev); pci_pme_restore(pci_dev); return 0; } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM /* Auxiliary functions used for system resume and run-time resume */ static void pci_pm_default_resume(struct pci_dev *pci_dev) { pci_fixup_device(pci_fixup_resume, pci_dev); pci_enable_wake(pci_dev, PCI_D0, false); } static void pci_pm_power_up_and_verify_state(struct pci_dev *pci_dev) { pci_power_up(pci_dev); pci_update_current_state(pci_dev, PCI_D0); } static void pci_pm_default_resume_early(struct pci_dev *pci_dev) { pci_pm_power_up_and_verify_state(pci_dev); pci_restore_state(pci_dev); pci_pme_restore(pci_dev); } static void pci_pm_bridge_power_up_actions(struct pci_dev *pci_dev) { pci_bridge_wait_for_secondary_bus(pci_dev, "resume"); /* * When powering on a bridge from D3cold, the whole hierarchy may be * powered on into D0uninitialized state, resume them to give them a * chance to suspend again */ pci_resume_bus(pci_dev->subordinate); } #endif /* CONFIG_PM */ #ifdef CONFIG_PM_SLEEP /* * Default "suspend" method for devices that have no driver provided suspend, * or not even a driver at all (second part). */ static void pci_pm_set_unknown_state(struct pci_dev *pci_dev) { /* * mark its power state as "unknown", since we don't know if * e.g. the BIOS will change its device state when we suspend. */ if (pci_dev->current_state == PCI_D0) pci_dev->current_state = PCI_UNKNOWN; } /* * Default "resume" method for devices that have no driver provided resume, * or not even a driver at all (second part). */ static int pci_pm_reenable_device(struct pci_dev *pci_dev) { int retval; /* if the device was enabled before suspend, re-enable */ retval = pci_reenable_device(pci_dev); /* * if the device was busmaster before the suspend, make it busmaster * again */ if (pci_dev->is_busmaster) pci_set_master(pci_dev); return retval; } static int pci_legacy_suspend(struct device *dev, pm_message_t state) { struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = pci_dev->driver; if (drv && drv->suspend) { pci_power_t prev = pci_dev->current_state; int error; error = drv->suspend(pci_dev, state); suspend_report_result(dev, drv->suspend, error); if (error) return error; if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev, "PCI PM: Device state not saved by %pS\n", drv->suspend); } } pci_fixup_device(pci_fixup_suspend, pci_dev); return 0; } static int pci_legacy_suspend_late(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); if (!pci_dev->state_saved) pci_save_state(pci_dev); pci_pm_set_unknown_state(pci_dev); pci_fixup_device(pci_fixup_suspend_late, pci_dev); return 0; } static int pci_legacy_resume(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *drv = pci_dev->driver; pci_fixup_device(pci_fixup_resume, pci_dev); return drv && drv->resume ? drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev); } /* Auxiliary functions used by the new power management framework */ static void pci_pm_default_suspend(struct pci_dev *pci_dev) { /* Disable non-bridge devices without PM support */ if (!pci_has_subordinate(pci_dev)) pci_disable_enabled_device(pci_dev); } static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev) { struct pci_driver *drv = pci_dev->driver; bool ret = drv && (drv->suspend || drv->resume); /* * Legacy PM support is used by default, so warn if the new framework is * supported as well. Drivers are supposed to support either the * former, or the latter, but not both at the same time. */ pci_WARN(pci_dev, ret && drv->driver.pm, "device %04x:%04x\n", pci_dev->vendor, pci_dev->device); return ret; } /* New power management framework */ static int pci_pm_prepare(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pm && pm->prepare) { int error = pm->prepare(dev); if (error < 0) return error; if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE)) return 0; } if (pci_dev_need_resume(pci_dev)) return 0; /* * The PME setting needs to be adjusted here in case the direct-complete * optimization is used with respect to this device. */ pci_dev_adjust_pme(pci_dev); return 1; } static void pci_pm_complete(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); pci_dev_complete_resume(pci_dev); pm_generic_complete(dev); /* Resume device if platform firmware has put it in reset-power-on */ if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) { pci_power_t pre_sleep_state = pci_dev->current_state; pci_refresh_power_state(pci_dev); /* * On platforms with ACPI this check may also trigger for * devices sharing power resources if one of those power * resources has been activated as a result of a change of the * power state of another device sharing it. However, in that * case it is also better to resume the device, in general. */ if (pci_dev->current_state < pre_sleep_state) pm_request_resume(dev); } } #else /* !CONFIG_PM_SLEEP */ #define pci_pm_prepare NULL #define pci_pm_complete NULL #endif /* !CONFIG_PM_SLEEP */ #ifdef CONFIG_SUSPEND static void pcie_pme_root_status_cleanup(struct pci_dev *pci_dev) { /* * Some BIOSes forget to clear Root PME Status bits after system * wakeup, which breaks ACPI-based runtime wakeup on PCI Express. * Clear those bits now just in case (shouldn't hurt). */ if (pci_is_pcie(pci_dev) && (pci_pcie_type(pci_dev) == PCI_EXP_TYPE_ROOT_PORT || pci_pcie_type(pci_dev) == PCI_EXP_TYPE_RC_EC)) pcie_clear_root_pme_status(pci_dev); } static int pci_pm_suspend(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; pci_dev->skip_bus_pm = false; /* * Disabling PTM allows some systems, e.g., Intel mobile chips * since Coffee Lake, to enter a lower-power PM state. */ pci_suspend_ptm(pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_SUSPEND); if (!pm) { pci_pm_default_suspend(pci_dev); return 0; } /* * PCI devices suspended at run time may need to be resumed at this * point, because in general it may be necessary to reconfigure them for * system suspend. Namely, if the device is expected to wake up the * system from the sleep state, it may have to be reconfigured for this * purpose, or if the device is not expected to wake up the system from * the sleep state, it should be prevented from signaling wakeup events * going forward. * * Also if the driver of the device does not indicate that its system * suspend callbacks can cope with runtime-suspended devices, it is * better to resume the device from runtime suspend here. */ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || pci_dev_need_resume(pci_dev)) { pm_runtime_resume(dev); pci_dev->state_saved = false; } else { pci_dev_adjust_pme(pci_dev); } if (pm->suspend) { pci_power_t prev = pci_dev->current_state; int error; error = pm->suspend(dev); suspend_report_result(dev, pm->suspend, error); if (error) return error; if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev, "PCI PM: State of device not saved by %pS\n", pm->suspend); } } return 0; } static int pci_pm_suspend_late(struct device *dev) { if (dev_pm_skip_suspend(dev)) return 0; pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev)); return pm_generic_suspend_late(dev); } static int pci_pm_suspend_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (dev_pm_skip_suspend(dev)) return 0; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend_late(dev); if (!pm) { pci_save_state(pci_dev); goto Fixup; } if (pm->suspend_noirq) { pci_power_t prev = pci_dev->current_state; int error; error = pm->suspend_noirq(dev); suspend_report_result(dev, pm->suspend_noirq, error); if (error) return error; if (!pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev, "PCI PM: State of device not saved by %pS\n", pm->suspend_noirq); goto Fixup; } } if (!pci_dev->state_saved) { pci_save_state(pci_dev); /* * If the device is a bridge with a child in D0 below it, * it needs to stay in D0, so check skip_bus_pm to avoid * putting it into a low-power state in that case. */ if (!pci_dev->skip_bus_pm && pci_power_manageable(pci_dev)) pci_prepare_to_sleep(pci_dev); } pci_dbg(pci_dev, "PCI PM: Suspend power state: %s\n", pci_power_name(pci_dev->current_state)); if (pci_dev->current_state == PCI_D0) { pci_dev->skip_bus_pm = true; /* * Per PCI PM r1.2, table 6-1, a bridge must be in D0 if any * downstream device is in D0, so avoid changing the power state * of the parent bridge by setting the skip_bus_pm flag for it. */ if (pci_dev->bus->self) pci_dev->bus->self->skip_bus_pm = true; } if (pci_dev->skip_bus_pm && pm_suspend_no_platform()) { pci_dbg(pci_dev, "PCI PM: Skipped\n"); goto Fixup; } pci_pm_set_unknown_state(pci_dev); /* * Some BIOSes from ASUS have a bug: If a USB EHCI host controller's * PCI COMMAND register isn't 0, the BIOS assumes that the controller * hasn't been quiesced and tries to turn it off. If the controller * is already in D3, this can hang or cause memory corruption. * * Since the value of the COMMAND register doesn't matter once the * device has been suspended, we can safely set it to 0 here. */ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) pci_write_config_word(pci_dev, PCI_COMMAND, 0); Fixup: pci_fixup_device(pci_fixup_suspend_late, pci_dev); /* * If the target system sleep state is suspend-to-idle, it is sufficient * to check whether or not the device's wakeup settings are good for * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause * pci_pm_complete() to take care of fixing up the device's state * anyway, if need be. */ if (device_can_wakeup(dev) && !device_may_wakeup(dev)) dev->power.may_skip_resume = false; return 0; } static int pci_pm_resume_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; pci_power_t prev_state = pci_dev->current_state; bool skip_bus_pm = pci_dev->skip_bus_pm; if (dev_pm_skip_resume(dev)) return 0; /* * In the suspend-to-idle case, devices left in D0 during suspend will * stay in D0, so it is not necessary to restore or update their * configuration here and attempting to put them into D0 again is * pointless, so avoid doing that. */ if (!(skip_bus_pm && pm_suspend_no_platform())) pci_pm_default_resume_early(pci_dev); pci_fixup_device(pci_fixup_resume_early, pci_dev); pcie_pme_root_status_cleanup(pci_dev); if (!skip_bus_pm && prev_state == PCI_D3cold) pci_pm_bridge_power_up_actions(pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return 0; if (pm && pm->resume_noirq) return pm->resume_noirq(dev); return 0; } static int pci_pm_resume_early(struct device *dev) { if (dev_pm_skip_resume(dev)) return 0; return pm_generic_resume_early(dev); } static int pci_pm_resume(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; /* * This is necessary for the suspend error path in which resume is * called without restoring the standard config registers of the device. */ if (pci_dev->state_saved) pci_restore_standard_config(pci_dev); pci_resume_ptm(pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume(dev); pci_pm_default_resume(pci_dev); if (pm) { if (pm->resume) return pm->resume(dev); } else { pci_pm_reenable_device(pci_dev); } return 0; } #else /* !CONFIG_SUSPEND */ #define pci_pm_suspend NULL #define pci_pm_suspend_late NULL #define pci_pm_suspend_noirq NULL #define pci_pm_resume NULL #define pci_pm_resume_early NULL #define pci_pm_resume_noirq NULL #endif /* !CONFIG_SUSPEND */ #ifdef CONFIG_HIBERNATE_CALLBACKS static int pci_pm_freeze(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_FREEZE); if (!pm) { pci_pm_default_suspend(pci_dev); return 0; } /* * Resume all runtime-suspended devices before creating a snapshot * image of system memory, because the restore kernel generally cannot * be expected to always handle them consistently and they need to be * put into the runtime-active metastate during system resume anyway, * so it is better to ensure that the state saved in the image will be * always consistent with that. */ pm_runtime_resume(dev); pci_dev->state_saved = false; if (pm->freeze) { int error; error = pm->freeze(dev); suspend_report_result(dev, pm->freeze, error); if (error) return error; } return 0; } static int pci_pm_freeze_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend_late(dev); if (pm && pm->freeze_noirq) { int error; error = pm->freeze_noirq(dev); suspend_report_result(dev, pm->freeze_noirq, error); if (error) return error; } if (!pci_dev->state_saved) pci_save_state(pci_dev); pci_pm_set_unknown_state(pci_dev); return 0; } static int pci_pm_thaw_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; /* * The pm->thaw_noirq() callback assumes the device has been * returned to D0 and its config state has been restored. * * In addition, pci_restore_state() restores MSI-X state in MMIO * space, which requires the device to be in D0, so return it to D0 * in case the driver's "freeze" callbacks put it into a low-power * state. */ pci_pm_power_up_and_verify_state(pci_dev); pci_restore_state(pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return 0; if (pm && pm->thaw_noirq) return pm->thaw_noirq(dev); return 0; } static int pci_pm_thaw(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; int error = 0; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume(dev); if (pm) { if (pm->thaw) error = pm->thaw(dev); } else { pci_pm_reenable_device(pci_dev); } pci_dev->state_saved = false; return error; } static int pci_pm_poweroff(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend(dev, PMSG_HIBERNATE); if (!pm) { pci_pm_default_suspend(pci_dev); return 0; } /* The reason to do that is the same as in pci_pm_suspend(). */ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) || pci_dev_need_resume(pci_dev)) { pm_runtime_resume(dev); pci_dev->state_saved = false; } else { pci_dev_adjust_pme(pci_dev); } if (pm->poweroff) { int error; error = pm->poweroff(dev); suspend_report_result(dev, pm->poweroff, error); if (error) return error; } return 0; } static int pci_pm_poweroff_late(struct device *dev) { if (dev_pm_skip_suspend(dev)) return 0; pci_fixup_device(pci_fixup_suspend, to_pci_dev(dev)); return pm_generic_poweroff_late(dev); } static int pci_pm_poweroff_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; if (dev_pm_skip_suspend(dev)) return 0; if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend_late(dev); if (!pm) { pci_fixup_device(pci_fixup_suspend_late, pci_dev); return 0; } if (pm->poweroff_noirq) { int error; error = pm->poweroff_noirq(dev); suspend_report_result(dev, pm->poweroff_noirq, error); if (error) return error; } if (!pci_dev->state_saved && !pci_has_subordinate(pci_dev)) pci_prepare_to_sleep(pci_dev); /* * The reason for doing this here is the same as for the analogous code * in pci_pm_suspend_noirq(). */ if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) pci_write_config_word(pci_dev, PCI_COMMAND, 0); pci_fixup_device(pci_fixup_suspend_late, pci_dev); return 0; } static int pci_pm_restore_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; pci_pm_default_resume_early(pci_dev); pci_fixup_device(pci_fixup_resume_early, pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return 0; if (pm && pm->restore_noirq) return pm->restore_noirq(dev); return 0; } static int pci_pm_restore(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; /* * This is necessary for the hibernation error path in which restore is * called without restoring the standard config registers of the device. */ if (pci_dev->state_saved) pci_restore_standard_config(pci_dev); if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_resume(dev); pci_pm_default_resume(pci_dev); if (pm) { if (pm->restore) return pm->restore(dev); } else { pci_pm_reenable_device(pci_dev); } return 0; } #else /* !CONFIG_HIBERNATE_CALLBACKS */ #define pci_pm_freeze NULL #define pci_pm_freeze_noirq NULL #define pci_pm_thaw NULL #define pci_pm_thaw_noirq NULL #define pci_pm_poweroff NULL #define pci_pm_poweroff_late NULL #define pci_pm_poweroff_noirq NULL #define pci_pm_restore NULL #define pci_pm_restore_noirq NULL #endif /* !CONFIG_HIBERNATE_CALLBACKS */ #ifdef CONFIG_PM static int pci_pm_runtime_suspend(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; pci_power_t prev = pci_dev->current_state; int error; pci_suspend_ptm(pci_dev); /* * If pci_dev->driver is not set (unbound), we leave the device in D0, * but it may go to D3cold when the bridge above it runtime suspends. * Save its config space in case that happens. */ if (!pci_dev->driver) { pci_save_state(pci_dev); return 0; } pci_dev->state_saved = false; if (pm && pm->runtime_suspend) { error = pm->runtime_suspend(dev); /* * -EBUSY and -EAGAIN is used to request the runtime PM core * to schedule a new suspend, so log the event only with debug * log level. */ if (error == -EBUSY || error == -EAGAIN) { pci_dbg(pci_dev, "can't suspend now (%ps returned %d)\n", pm->runtime_suspend, error); return error; } else if (error) { pci_err(pci_dev, "can't suspend (%ps returned %d)\n", pm->runtime_suspend, error); return error; } } pci_fixup_device(pci_fixup_suspend, pci_dev); if (pm && pm->runtime_suspend && !pci_dev->state_saved && pci_dev->current_state != PCI_D0 && pci_dev->current_state != PCI_UNKNOWN) { pci_WARN_ONCE(pci_dev, pci_dev->current_state != prev, "PCI PM: State of device not saved by %pS\n", pm->runtime_suspend); return 0; } if (!pci_dev->state_saved) { pci_save_state(pci_dev); pci_finish_runtime_suspend(pci_dev); } return 0; } static int pci_pm_runtime_resume(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; pci_power_t prev_state = pci_dev->current_state; int error = 0; /* * Restoring config space is necessary even if the device is not bound * to a driver because although we left it in D0, it may have gone to * D3cold when the bridge above it runtime suspended. */ pci_pm_default_resume_early(pci_dev); pci_resume_ptm(pci_dev); if (!pci_dev->driver) return 0; pci_fixup_device(pci_fixup_resume_early, pci_dev); pci_pm_default_resume(pci_dev); if (prev_state == PCI_D3cold) pci_pm_bridge_power_up_actions(pci_dev); if (pm && pm->runtime_resume) error = pm->runtime_resume(dev); return error; } static int pci_pm_runtime_idle(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; /* * If pci_dev->driver is not set (unbound), the device should * always remain in D0 regardless of the runtime PM status */ if (!pci_dev->driver) return 0; if (!pm) return -ENOSYS; if (pm->runtime_idle) return pm->runtime_idle(dev); return 0; } static const struct dev_pm_ops pci_dev_pm_ops = { .prepare = pci_pm_prepare, .complete = pci_pm_complete, .suspend = pci_pm_suspend, .suspend_late = pci_pm_suspend_late, .resume = pci_pm_resume, .resume_early = pci_pm_resume_early, .freeze = pci_pm_freeze, .thaw = pci_pm_thaw, .poweroff = pci_pm_poweroff, .poweroff_late = pci_pm_poweroff_late, .restore = pci_pm_restore, .suspend_noirq = pci_pm_suspend_noirq, .resume_noirq = pci_pm_resume_noirq, .freeze_noirq = pci_pm_freeze_noirq, .thaw_noirq = pci_pm_thaw_noirq, .poweroff_noirq = pci_pm_poweroff_noirq, .restore_noirq = pci_pm_restore_noirq, .runtime_suspend = pci_pm_runtime_suspend, .runtime_resume = pci_pm_runtime_resume, .runtime_idle = pci_pm_runtime_idle, }; #define PCI_PM_OPS_PTR (&pci_dev_pm_ops) #else /* !CONFIG_PM */ #define pci_pm_runtime_suspend NULL #define pci_pm_runtime_resume NULL #define pci_pm_runtime_idle NULL #define PCI_PM_OPS_PTR NULL #endif /* !CONFIG_PM */ /** * __pci_register_driver - register a new pci driver * @drv: the driver structure to register * @owner: owner module of drv * @mod_name: module name string * * Adds the driver structure to the list of registered drivers. * Returns a negative value on error, otherwise 0. * If no error occurred, the driver remains registered even if * no device was claimed during registration. */ int __pci_register_driver(struct pci_driver *drv, struct module *owner, const char *mod_name) { /* initialize common driver fields */ drv->driver.name = drv->name; drv->driver.bus = &pci_bus_type; drv->driver.owner = owner; drv->driver.mod_name = mod_name; drv->driver.groups = drv->groups; drv->driver.dev_groups = drv->dev_groups; spin_lock_init(&drv->dynids.lock); INIT_LIST_HEAD(&drv->dynids.list); /* register with core */ return driver_register(&drv->driver); } EXPORT_SYMBOL(__pci_register_driver); /** * pci_unregister_driver - unregister a pci driver * @drv: the driver structure to unregister * * Deletes the driver structure from the list of registered PCI drivers, * gives it a chance to clean up by calling its remove() function for * each device it was responsible for, and marks those devices as * driverless. */ void pci_unregister_driver(struct pci_driver *drv) { driver_unregister(&drv->driver); pci_free_dynids(drv); } EXPORT_SYMBOL(pci_unregister_driver); static struct pci_driver pci_compat_driver = { .name = "compat" }; /** * pci_dev_driver - get the pci_driver of a device * @dev: the device to query * * Returns the appropriate pci_driver structure or %NULL if there is no * registered driver for the device. */ struct pci_driver *pci_dev_driver(const struct pci_dev *dev) { int i; if (dev->driver) return dev->driver; for (i = 0; i <= PCI_ROM_RESOURCE; i++) if (dev->resource[i].flags & IORESOURCE_BUSY) return &pci_compat_driver; return NULL; } EXPORT_SYMBOL(pci_dev_driver); /** * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure * @dev: the PCI device structure to match against * @drv: the device driver to search for matching PCI device id structures * * Used by a driver to check whether a PCI device present in the * system is in its list of supported devices. Returns the matching * pci_device_id structure or %NULL if there is no match. */ static int pci_bus_match(struct device *dev, struct device_driver *drv) { struct pci_dev *pci_dev = to_pci_dev(dev); struct pci_driver *pci_drv; const struct pci_device_id *found_id; if (!pci_dev->match_driver) return 0; pci_drv = to_pci_driver(drv); found_id = pci_match_device(pci_drv, pci_dev); if (found_id) return 1; return 0; } /** * pci_dev_get - increments the reference count of the pci device structure * @dev: the device being referenced * * Each live reference to a device should be refcounted. * * Drivers for PCI devices should normally record such references in * their probe() methods, when they bind to a device, and release * them by calling pci_dev_put(), in their disconnect() methods. * * A pointer to the device with the incremented reference counter is returned. */ struct pci_dev *pci_dev_get(struct pci_dev *dev) { if (dev) get_device(&dev->dev); return dev; } EXPORT_SYMBOL(pci_dev_get); /** * pci_dev_put - release a use of the pci device structure * @dev: device that's been disconnected * * Must be called when a user of a device is finished with it. When the last * user of the device calls this function, the memory of the device is freed. */ void pci_dev_put(struct pci_dev *dev) { if (dev) put_device(&dev->dev); } EXPORT_SYMBOL(pci_dev_put); static int pci_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct pci_dev *pdev; if (!dev) return -ENODEV; pdev = to_pci_dev(dev); if (add_uevent_var(env, "PCI_CLASS=%04X", pdev->class)) return -ENOMEM; if (add_uevent_var(env, "PCI_ID=%04X:%04X", pdev->vendor, pdev->device)) return -ENOMEM; if (add_uevent_var(env, "PCI_SUBSYS_ID=%04X:%04X", pdev->subsystem_vendor, pdev->subsystem_device)) return -ENOMEM; if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev))) return -ENOMEM; if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X", pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device, (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), (u8)(pdev->class))) return -ENOMEM; return 0; } #if defined(CONFIG_PCIEAER) || defined(CONFIG_EEH) /** * pci_uevent_ers - emit a uevent during recovery path of PCI device * @pdev: PCI device undergoing error recovery * @err_type: type of error event */ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type) { int idx = 0; char *envp[3]; switch (err_type) { case PCI_ERS_RESULT_NONE: case PCI_ERS_RESULT_CAN_RECOVER: envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY"; envp[idx++] = "DEVICE_ONLINE=0"; break; case PCI_ERS_RESULT_RECOVERED: envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY"; envp[idx++] = "DEVICE_ONLINE=1"; break; case PCI_ERS_RESULT_DISCONNECT: envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY"; envp[idx++] = "DEVICE_ONLINE=0"; break; default: break; } if (idx > 0) { envp[idx++] = NULL; kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp); } } #endif static int pci_bus_num_vf(struct device *dev) { return pci_num_vf(to_pci_dev(dev)); } /** * pci_dma_configure - Setup DMA configuration * @dev: ptr to dev structure * * Function to update PCI devices's DMA configuration using the same * info from the OF node or ACPI node of host bridge's parent (if any). */ static int pci_dma_configure(struct device *dev) { struct pci_driver *driver = to_pci_driver(dev->driver); struct device *bridge; int ret = 0; bridge = pci_get_host_bridge_device(to_pci_dev(dev)); if (IS_ENABLED(CONFIG_OF) && bridge->parent && bridge->parent->of_node) { ret = of_dma_configure(dev, bridge->parent->of_node, true); } else if (has_acpi_companion(bridge)) { struct acpi_device *adev = to_acpi_device_node(bridge->fwnode); ret = acpi_dma_configure(dev, acpi_get_dma_attr(adev)); } pci_put_host_bridge_device(bridge); if (!ret && !driver->driver_managed_dma) { ret = iommu_device_use_default_domain(dev); if (ret) arch_teardown_dma_ops(dev); } return ret; } static void pci_dma_cleanup(struct device *dev) { struct pci_driver *driver = to_pci_driver(dev->driver); if (!driver->driver_managed_dma) iommu_device_unuse_default_domain(dev); } struct bus_type pci_bus_type = { .name = "pci", .match = pci_bus_match, .uevent = pci_uevent, .probe = pci_device_probe, .remove = pci_device_remove, .shutdown = pci_device_shutdown, .dev_groups = pci_dev_groups, .bus_groups = pci_bus_groups, .drv_groups = pci_drv_groups, .pm = PCI_PM_OPS_PTR, .num_vf = pci_bus_num_vf, .dma_configure = pci_dma_configure, .dma_cleanup = pci_dma_cleanup, }; EXPORT_SYMBOL(pci_bus_type); #ifdef CONFIG_PCIEPORTBUS static int pcie_port_bus_match(struct device *dev, struct device_driver *drv) { struct pcie_device *pciedev; struct pcie_port_service_driver *driver; if (drv->bus != &pcie_port_bus_type || dev->bus != &pcie_port_bus_type) return 0; pciedev = to_pcie_device(dev); driver = to_service_driver(drv); if (driver->service != pciedev->service) return 0; if (driver->port_type != PCIE_ANY_PORT && driver->port_type != pci_pcie_type(pciedev->port)) return 0; return 1; } struct bus_type pcie_port_bus_type = { .name = "pci_express", .match = pcie_port_bus_match, }; #endif static int __init pci_driver_init(void) { int ret; ret = bus_register(&pci_bus_type); if (ret) return ret; #ifdef CONFIG_PCIEPORTBUS ret = bus_register(&pcie_port_bus_type); if (ret) return ret; #endif dma_debug_add_bus(&pci_bus_type); return 0; } postcore_initcall(pci_driver_init);
linux-master
drivers/pci/pci-driver.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2006 Matthew Wilcox <[email protected]> * Copyright (C) 2006-2009 Hewlett-Packard Development Company, L.P. * Alex Chiang <[email protected]> */ #include <linux/kobject.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/err.h> #include "pci.h" struct kset *pci_slots_kset; EXPORT_SYMBOL_GPL(pci_slots_kset); static ssize_t pci_slot_attr_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct pci_slot *slot = to_pci_slot(kobj); struct pci_slot_attribute *attribute = to_pci_slot_attr(attr); return attribute->show ? attribute->show(slot, buf) : -EIO; } static ssize_t pci_slot_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t len) { struct pci_slot *slot = to_pci_slot(kobj); struct pci_slot_attribute *attribute = to_pci_slot_attr(attr); return attribute->store ? attribute->store(slot, buf, len) : -EIO; } static const struct sysfs_ops pci_slot_sysfs_ops = { .show = pci_slot_attr_show, .store = pci_slot_attr_store, }; static ssize_t address_read_file(struct pci_slot *slot, char *buf) { if (slot->number == 0xff) return sysfs_emit(buf, "%04x:%02x\n", pci_domain_nr(slot->bus), slot->bus->number); return sysfs_emit(buf, "%04x:%02x:%02x\n", pci_domain_nr(slot->bus), slot->bus->number, slot->number); } static ssize_t bus_speed_read(enum pci_bus_speed speed, char *buf) { return sysfs_emit(buf, "%s\n", pci_speed_string(speed)); } static ssize_t max_speed_read_file(struct pci_slot *slot, char *buf) { return bus_speed_read(slot->bus->max_bus_speed, buf); } static ssize_t cur_speed_read_file(struct pci_slot *slot, char *buf) { return bus_speed_read(slot->bus->cur_bus_speed, buf); } static void pci_slot_release(struct kobject *kobj) { struct pci_dev *dev; struct pci_slot *slot = to_pci_slot(kobj); dev_dbg(&slot->bus->dev, "dev %02x, released physical slot %s\n", slot->number, pci_slot_name(slot)); down_read(&pci_bus_sem); list_for_each_entry(dev, &slot->bus->devices, bus_list) if (PCI_SLOT(dev->devfn) == slot->number) dev->slot = NULL; up_read(&pci_bus_sem); list_del(&slot->list); kfree(slot); } static struct pci_slot_attribute pci_slot_attr_address = __ATTR(address, S_IRUGO, address_read_file, NULL); static struct pci_slot_attribute pci_slot_attr_max_speed = __ATTR(max_bus_speed, S_IRUGO, max_speed_read_file, NULL); static struct pci_slot_attribute pci_slot_attr_cur_speed = __ATTR(cur_bus_speed, S_IRUGO, cur_speed_read_file, NULL); static struct attribute *pci_slot_default_attrs[] = { &pci_slot_attr_address.attr, &pci_slot_attr_max_speed.attr, &pci_slot_attr_cur_speed.attr, NULL, }; ATTRIBUTE_GROUPS(pci_slot_default); static const struct kobj_type pci_slot_ktype = { .sysfs_ops = &pci_slot_sysfs_ops, .release = &pci_slot_release, .default_groups = pci_slot_default_groups, }; static char *make_slot_name(const char *name) { char *new_name; int len, max, dup; new_name = kstrdup(name, GFP_KERNEL); if (!new_name) return NULL; /* * Make sure we hit the realloc case the first time through the * loop. 'len' will be strlen(name) + 3 at that point which is * enough space for "name-X" and the trailing NUL. */ len = strlen(name) + 2; max = 1; dup = 1; for (;;) { struct kobject *dup_slot; dup_slot = kset_find_obj(pci_slots_kset, new_name); if (!dup_slot) break; kobject_put(dup_slot); if (dup == max) { len++; max *= 10; kfree(new_name); new_name = kmalloc(len, GFP_KERNEL); if (!new_name) break; } sprintf(new_name, "%s-%d", name, dup++); } return new_name; } static int rename_slot(struct pci_slot *slot, const char *name) { int result = 0; char *slot_name; if (strcmp(pci_slot_name(slot), name) == 0) return result; slot_name = make_slot_name(name); if (!slot_name) return -ENOMEM; result = kobject_rename(&slot->kobj, slot_name); kfree(slot_name); return result; } void pci_dev_assign_slot(struct pci_dev *dev) { struct pci_slot *slot; mutex_lock(&pci_slot_mutex); list_for_each_entry(slot, &dev->bus->slots, list) if (PCI_SLOT(dev->devfn) == slot->number) dev->slot = slot; mutex_unlock(&pci_slot_mutex); } static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr) { struct pci_slot *slot; /* We already hold pci_slot_mutex */ list_for_each_entry(slot, &parent->slots, list) if (slot->number == slot_nr) { kobject_get(&slot->kobj); return slot; } return NULL; } /** * pci_create_slot - create or increment refcount for physical PCI slot * @parent: struct pci_bus of parent bridge * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder * @name: user visible string presented in /sys/bus/pci/slots/<name> * @hotplug: set if caller is hotplug driver, NULL otherwise * * PCI slots have first class attributes such as address, speed, width, * and a &struct pci_slot is used to manage them. This interface will * either return a new &struct pci_slot to the caller, or if the pci_slot * already exists, its refcount will be incremented. * * Slots are uniquely identified by a @pci_bus, @slot_nr tuple. * * There are known platforms with broken firmware that assign the same * name to multiple slots. Workaround these broken platforms by renaming * the slots on behalf of the caller. If firmware assigns name N to * multiple slots: * * The first slot is assigned N * The second slot is assigned N-1 * The third slot is assigned N-2 * etc. * * Placeholder slots: * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify * a slot. There is one notable exception - pSeries (rpaphp), where the * @slot_nr cannot be determined until a device is actually inserted into * the slot. In this scenario, the caller may pass -1 for @slot_nr. * * The following semantics are imposed when the caller passes @slot_nr == * -1. First, we no longer check for an existing %struct pci_slot, as there * may be many slots with @slot_nr of -1. The other change in semantics is * user-visible, which is the 'address' parameter presented in sysfs will * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the * %struct pci_bus and bb is the bus number. In other words, the devfn of * the 'placeholder' slot will not be displayed. */ struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, const char *name, struct hotplug_slot *hotplug) { struct pci_dev *dev; struct pci_slot *slot; int err = 0; char *slot_name = NULL; mutex_lock(&pci_slot_mutex); if (slot_nr == -1) goto placeholder; /* * Hotplug drivers are allowed to rename an existing slot, * but only if not already claimed. */ slot = get_slot(parent, slot_nr); if (slot) { if (hotplug) { if ((err = slot->hotplug ? -EBUSY : 0) || (err = rename_slot(slot, name))) { kobject_put(&slot->kobj); slot = NULL; goto err; } } goto out; } placeholder: slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) { err = -ENOMEM; goto err; } slot->bus = parent; slot->number = slot_nr; slot->kobj.kset = pci_slots_kset; slot_name = make_slot_name(name); if (!slot_name) { err = -ENOMEM; kfree(slot); goto err; } INIT_LIST_HEAD(&slot->list); list_add(&slot->list, &parent->slots); err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, "%s", slot_name); if (err) { kobject_put(&slot->kobj); goto err; } down_read(&pci_bus_sem); list_for_each_entry(dev, &parent->devices, bus_list) if (PCI_SLOT(dev->devfn) == slot_nr) dev->slot = slot; up_read(&pci_bus_sem); dev_dbg(&parent->dev, "dev %02x, created physical slot %s\n", slot_nr, pci_slot_name(slot)); out: kfree(slot_name); mutex_unlock(&pci_slot_mutex); return slot; err: slot = ERR_PTR(err); goto out; } EXPORT_SYMBOL_GPL(pci_create_slot); /** * pci_destroy_slot - decrement refcount for physical PCI slot * @slot: struct pci_slot to decrement * * %struct pci_slot is refcounted, so destroying them is really easy; we * just call kobject_put on its kobj and let our release methods do the * rest. */ void pci_destroy_slot(struct pci_slot *slot) { dev_dbg(&slot->bus->dev, "dev %02x, dec refcount to %d\n", slot->number, kref_read(&slot->kobj.kref) - 1); mutex_lock(&pci_slot_mutex); kobject_put(&slot->kobj); mutex_unlock(&pci_slot_mutex); } EXPORT_SYMBOL_GPL(pci_destroy_slot); #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) #include <linux/pci_hotplug.h> /** * pci_hp_create_module_link - create symbolic link to hotplug driver module * @pci_slot: struct pci_slot * * Helper function for pci_hotplug_core.c to create symbolic link to * the hotplug driver module. */ void pci_hp_create_module_link(struct pci_slot *pci_slot) { struct hotplug_slot *slot = pci_slot->hotplug; struct kobject *kobj = NULL; int ret; if (!slot || !slot->ops) return; kobj = kset_find_obj(module_kset, slot->mod_name); if (!kobj) return; ret = sysfs_create_link(&pci_slot->kobj, kobj, "module"); if (ret) dev_err(&pci_slot->bus->dev, "Error creating sysfs link (%d)\n", ret); kobject_put(kobj); } EXPORT_SYMBOL_GPL(pci_hp_create_module_link); /** * pci_hp_remove_module_link - remove symbolic link to the hotplug driver * module. * @pci_slot: struct pci_slot * * Helper function for pci_hotplug_core.c to remove symbolic link to * the hotplug driver module. */ void pci_hp_remove_module_link(struct pci_slot *pci_slot) { sysfs_remove_link(&pci_slot->kobj, "module"); } EXPORT_SYMBOL_GPL(pci_hp_remove_module_link); #endif static int pci_slot_init(void) { struct kset *pci_bus_kset; pci_bus_kset = bus_get_kset(&pci_bus_type); pci_slots_kset = kset_create_and_add("slots", NULL, &pci_bus_kset->kobj); if (!pci_slots_kset) { pr_err("PCI: Slot initialization failure\n"); return -ENOMEM; } return 0; } subsys_initcall(pci_slot_init);
linux-master
drivers/pci/slot.c
// SPDX-License-Identifier: GPL-2.0 /* * For architectures where we want to allow direct access to the PCI config * stuff - it would probably be preferable on PCs too, but there people * just do it by hand with the magic northbridge registers. */ #include <linux/errno.h> #include <linux/pci.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/uaccess.h> #include "pci.h" SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn, unsigned long, off, unsigned long, len, void __user *, buf) { struct pci_dev *dev; u8 byte; u16 word; u32 dword; int err, cfg_ret; err = -EPERM; dev = NULL; if (!capable(CAP_SYS_ADMIN)) goto error; err = -ENODEV; dev = pci_get_domain_bus_and_slot(0, bus, dfn); if (!dev) goto error; switch (len) { case 1: cfg_ret = pci_user_read_config_byte(dev, off, &byte); break; case 2: cfg_ret = pci_user_read_config_word(dev, off, &word); break; case 4: cfg_ret = pci_user_read_config_dword(dev, off, &dword); break; default: err = -EINVAL; goto error; } err = -EIO; if (cfg_ret) goto error; switch (len) { case 1: err = put_user(byte, (u8 __user *)buf); break; case 2: err = put_user(word, (u16 __user *)buf); break; case 4: err = put_user(dword, (u32 __user *)buf); break; } pci_dev_put(dev); return err; error: /* ??? XFree86 doesn't even check the return value. They just look for 0xffffffff in the output, since that's what they get instead of a machine check on x86. */ switch (len) { case 1: put_user(-1, (u8 __user *)buf); break; case 2: put_user(-1, (u16 __user *)buf); break; case 4: put_user(-1, (u32 __user *)buf); break; } pci_dev_put(dev); return err; } SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn, unsigned long, off, unsigned long, len, void __user *, buf) { struct pci_dev *dev; u8 byte; u16 word; u32 dword; int err = 0; if (!capable(CAP_SYS_ADMIN) || security_locked_down(LOCKDOWN_PCI_ACCESS)) return -EPERM; dev = pci_get_domain_bus_and_slot(0, bus, dfn); if (!dev) return -ENODEV; switch (len) { case 1: err = get_user(byte, (u8 __user *)buf); if (err) break; err = pci_user_write_config_byte(dev, off, byte); if (err) err = -EIO; break; case 2: err = get_user(word, (u16 __user *)buf); if (err) break; err = pci_user_write_config_word(dev, off, word); if (err) err = -EIO; break; case 4: err = get_user(dword, (u32 __user *)buf); if (err) break; err = pci_user_write_config_dword(dev, off, dword); if (err) err = -EIO; break; default: err = -EINVAL; break; } pci_dev_put(dev); return err; }
linux-master
drivers/pci/syscall.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI searching functions * * Copyright (C) 1993 -- 1997 Drew Eckhardt, Frederic Potter, * David Mosberger-Tang * Copyright (C) 1997 -- 2000 Martin Mares <[email protected]> * Copyright (C) 2003 -- 2004 Greg Kroah-Hartman <[email protected]> */ #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/interrupt.h> #include "pci.h" DECLARE_RWSEM(pci_bus_sem); /* * pci_for_each_dma_alias - Iterate over DMA aliases for a device * @pdev: starting downstream device * @fn: function to call for each alias * @data: opaque data to pass to @fn * * Starting @pdev, walk up the bus calling @fn for each possible alias * of @pdev at the root bus. */ int pci_for_each_dma_alias(struct pci_dev *pdev, int (*fn)(struct pci_dev *pdev, u16 alias, void *data), void *data) { struct pci_bus *bus; int ret; /* * The device may have an explicit alias requester ID for DMA where the * requester is on another PCI bus. */ pdev = pci_real_dma_dev(pdev); ret = fn(pdev, pci_dev_id(pdev), data); if (ret) return ret; /* * If the device is broken and uses an alias requester ID for * DMA, iterate over that too. */ if (unlikely(pdev->dma_alias_mask)) { unsigned int devfn; for_each_set_bit(devfn, pdev->dma_alias_mask, MAX_NR_DEVFNS) { ret = fn(pdev, PCI_DEVID(pdev->bus->number, devfn), data); if (ret) return ret; } } for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) { struct pci_dev *tmp; /* Skip virtual buses */ if (!bus->self) continue; tmp = bus->self; /* stop at bridge where translation unit is associated */ if (tmp->dev_flags & PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT) return ret; /* * PCIe-to-PCI/X bridges alias transactions from downstream * devices using the subordinate bus number (PCI Express to * PCI/PCI-X Bridge Spec, rev 1.0, sec 2.3). For all cases * where the upstream bus is PCI/X we alias to the bridge * (there are various conditions in the previous reference * where the bridge may take ownership of transactions, even * when the secondary interface is PCI-X). */ if (pci_is_pcie(tmp)) { switch (pci_pcie_type(tmp)) { case PCI_EXP_TYPE_ROOT_PORT: case PCI_EXP_TYPE_UPSTREAM: case PCI_EXP_TYPE_DOWNSTREAM: continue; case PCI_EXP_TYPE_PCI_BRIDGE: ret = fn(tmp, PCI_DEVID(tmp->subordinate->number, PCI_DEVFN(0, 0)), data); if (ret) return ret; continue; case PCI_EXP_TYPE_PCIE_BRIDGE: ret = fn(tmp, pci_dev_id(tmp), data); if (ret) return ret; continue; } } else { if (tmp->dev_flags & PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS) ret = fn(tmp, PCI_DEVID(tmp->subordinate->number, PCI_DEVFN(0, 0)), data); else ret = fn(tmp, pci_dev_id(tmp), data); if (ret) return ret; } } return ret; } static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr) { struct pci_bus *child; struct pci_bus *tmp; if (bus->number == busnr) return bus; list_for_each_entry(tmp, &bus->children, node) { child = pci_do_find_bus(tmp, busnr); if (child) return child; } return NULL; } /** * pci_find_bus - locate PCI bus from a given domain and bus number * @domain: number of PCI domain to search * @busnr: number of desired PCI bus * * Given a PCI bus number and domain number, the desired PCI bus is located * in the global list of PCI buses. If the bus is found, a pointer to its * data structure is returned. If no bus is found, %NULL is returned. */ struct pci_bus *pci_find_bus(int domain, int busnr) { struct pci_bus *bus = NULL; struct pci_bus *tmp_bus; while ((bus = pci_find_next_bus(bus)) != NULL) { if (pci_domain_nr(bus) != domain) continue; tmp_bus = pci_do_find_bus(bus, busnr); if (tmp_bus) return tmp_bus; } return NULL; } EXPORT_SYMBOL(pci_find_bus); /** * pci_find_next_bus - begin or continue searching for a PCI bus * @from: Previous PCI bus found, or %NULL for new search. * * Iterates through the list of known PCI buses. A new search is * initiated by passing %NULL as the @from argument. Otherwise if * @from is not %NULL, searches continue from next device on the * global list. */ struct pci_bus *pci_find_next_bus(const struct pci_bus *from) { struct list_head *n; struct pci_bus *b = NULL; down_read(&pci_bus_sem); n = from ? from->node.next : pci_root_buses.next; if (n != &pci_root_buses) b = list_entry(n, struct pci_bus, node); up_read(&pci_bus_sem); return b; } EXPORT_SYMBOL(pci_find_next_bus); /** * pci_get_slot - locate PCI device for a given PCI slot * @bus: PCI bus on which desired PCI device resides * @devfn: encodes number of PCI slot in which the desired PCI * device resides and the logical device number within that slot * in case of multi-function devices. * * Given a PCI bus and slot/function number, the desired PCI device * is located in the list of PCI devices. * If the device is found, its reference count is increased and this * function returns a pointer to its data structure. The caller must * decrement the reference count by calling pci_dev_put(). * If no device is found, %NULL is returned. */ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn) { struct pci_dev *dev; down_read(&pci_bus_sem); list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->devfn == devfn) goto out; } dev = NULL; out: pci_dev_get(dev); up_read(&pci_bus_sem); return dev; } EXPORT_SYMBOL(pci_get_slot); /** * pci_get_domain_bus_and_slot - locate PCI device for a given PCI domain (segment), bus, and slot * @domain: PCI domain/segment on which the PCI device resides. * @bus: PCI bus on which desired PCI device resides * @devfn: encodes number of PCI slot in which the desired PCI device * resides and the logical device number within that slot in case of * multi-function devices. * * Given a PCI domain, bus, and slot/function number, the desired PCI * device is located in the list of PCI devices. If the device is * found, its reference count is increased and this function returns a * pointer to its data structure. The caller must decrement the * reference count by calling pci_dev_put(). If no device is found, * %NULL is returned. */ struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, unsigned int devfn) { struct pci_dev *dev = NULL; for_each_pci_dev(dev) { if (pci_domain_nr(dev->bus) == domain && (dev->bus->number == bus && dev->devfn == devfn)) return dev; } return NULL; } EXPORT_SYMBOL(pci_get_domain_bus_and_slot); static int match_pci_dev_by_id(struct device *dev, const void *data) { struct pci_dev *pdev = to_pci_dev(dev); const struct pci_device_id *id = data; if (pci_match_one_device(id, pdev)) return 1; return 0; } /* * pci_get_dev_by_id - begin or continue searching for a PCI device by id * @id: pointer to struct pci_device_id to match for the device * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is found * with a matching id a pointer to its device structure is returned, and the * reference count to the device is incremented. Otherwise, %NULL is returned. * A new search is initiated by passing %NULL as the @from argument. Otherwise * if @from is not %NULL, searches continue from next device on the global * list. The reference count for @from is always decremented if it is not * %NULL. * * This is an internal function for use by the other search functions in * this file. */ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, struct pci_dev *from) { struct device *dev; struct device *dev_start = NULL; struct pci_dev *pdev = NULL; if (from) dev_start = &from->dev; dev = bus_find_device(&pci_bus_type, dev_start, (void *)id, match_pci_dev_by_id); if (dev) pdev = to_pci_dev(dev); pci_dev_put(from); return pdev; } /** * pci_get_subsys - begin or continue searching for a PCI device by vendor/subvendor/device/subdevice id * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids * @ss_vendor: PCI subsystem vendor id to match, or %PCI_ANY_ID to match all vendor ids * @ss_device: PCI subsystem device id to match, or %PCI_ANY_ID to match all device ids * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is found * with a matching @vendor, @device, @ss_vendor and @ss_device, a pointer to its * device structure is returned, and the reference count to the device is * incremented. Otherwise, %NULL is returned. A new search is initiated by * passing %NULL as the @from argument. Otherwise if @from is not %NULL, * searches continue from next device on the global list. * The reference count for @from is always decremented if it is not %NULL. */ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, struct pci_dev *from) { struct pci_device_id id = { .vendor = vendor, .device = device, .subvendor = ss_vendor, .subdevice = ss_device, }; return pci_get_dev_by_id(&id, from); } EXPORT_SYMBOL(pci_get_subsys); /** * pci_get_device - begin or continue searching for a PCI device by vendor/device id * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is * found with a matching @vendor and @device, the reference count to the * device is incremented and a pointer to its device structure is returned. * Otherwise, %NULL is returned. A new search is initiated by passing %NULL * as the @from argument. Otherwise if @from is not %NULL, searches continue * from next device on the global list. The reference count for @from is * always decremented if it is not %NULL. */ struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from) { return pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); } EXPORT_SYMBOL(pci_get_device); /** * pci_get_class - begin or continue searching for a PCI device by class * @class: search for a PCI device with this class designation * @from: Previous PCI device found in search, or %NULL for new search. * * Iterates through the list of known PCI devices. If a PCI device is * found with a matching @class, the reference count to the device is * incremented and a pointer to its device structure is returned. * Otherwise, %NULL is returned. * A new search is initiated by passing %NULL as the @from argument. * Otherwise if @from is not %NULL, searches continue from next device * on the global list. The reference count for @from is always decremented * if it is not %NULL. */ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from) { struct pci_device_id id = { .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, .class_mask = PCI_ANY_ID, .class = class, }; return pci_get_dev_by_id(&id, from); } EXPORT_SYMBOL(pci_get_class); /** * pci_dev_present - Returns 1 if device matching the device list is present, 0 if not. * @ids: A pointer to a null terminated list of struct pci_device_id structures * that describe the type of PCI device the caller is trying to find. * * Obvious fact: You do not have a reference to any device that might be found * by this function, so if that device is removed from the system right after * this function is finished, the value will be stale. Use this function to * find devices that are usually built into a system, or for a general hint as * to if another device happens to be present at this specific moment in time. */ int pci_dev_present(const struct pci_device_id *ids) { struct pci_dev *found = NULL; while (ids->vendor || ids->subvendor || ids->class_mask) { found = pci_get_dev_by_id(ids, NULL); if (found) { pci_dev_put(found); return 1; } ids++; } return 0; } EXPORT_SYMBOL(pci_dev_present);
linux-master
drivers/pci/search.c
// SPDX-License-Identifier: GPL-2.0 /* pci-pf-stub - simple stub driver for PCI SR-IOV PF device * * This driver is meant to act as a "whitelist" for devices that provide * SR-IOV functionality while at the same time not actually needing a * driver of their own. */ #include <linux/module.h> #include <linux/pci.h> /* * pci_pf_stub_whitelist - White list of devices to bind pci-pf-stub onto * * This table provides the list of IDs this driver is supposed to bind * onto. You could think of this as a list of "quirked" devices where we * are adding support for SR-IOV here since there are no other drivers * that they would be running under. */ static const struct pci_device_id pci_pf_stub_whitelist[] = { { PCI_VDEVICE(AMAZON, 0x0053) }, /* required last entry */ { 0 } }; MODULE_DEVICE_TABLE(pci, pci_pf_stub_whitelist); static int pci_pf_stub_probe(struct pci_dev *dev, const struct pci_device_id *id) { pci_info(dev, "claimed by pci-pf-stub\n"); return 0; } static struct pci_driver pf_stub_driver = { .name = "pci-pf-stub", .id_table = pci_pf_stub_whitelist, .probe = pci_pf_stub_probe, .sriov_configure = pci_sriov_configure_simple, }; module_pci_driver(pf_stub_driver); MODULE_LICENSE("GPL");
linux-master
drivers/pci/pci-pf-stub.c
// SPDX-License-Identifier: GPL-2.0 /* * Support routines for initializing a PCI subsystem * * Extruded from code written by * Dave Rusling ([email protected]) * David Mosberger ([email protected]) * David Miller ([email protected]) * * Fixed for multiple PCI buses, 1999 Andrea Arcangeli <[email protected]> * * Nov 2000, Ivan Kokshaysky <[email protected]> * Resource sorting */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/cache.h> #include <linux/slab.h> #include "pci.h" static void pci_std_update_resource(struct pci_dev *dev, int resno) { struct pci_bus_region region; bool disable; u16 cmd; u32 new, check, mask; int reg; struct resource *res = dev->resource + resno; /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */ if (dev->is_virtfn) return; /* * Ignore resources for unimplemented BARs and unused resource slots * for 64 bit BARs. */ if (!res->flags) return; if (res->flags & IORESOURCE_UNSET) return; /* * Ignore non-moveable resources. This might be legacy resources for * which no functional BAR register exists or another important * system resource we shouldn't move around. */ if (res->flags & IORESOURCE_PCI_FIXED) return; pcibios_resource_to_bus(dev->bus, &region, res); new = region.start; if (res->flags & IORESOURCE_IO) { mask = (u32)PCI_BASE_ADDRESS_IO_MASK; new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK; } else if (resno == PCI_ROM_RESOURCE) { mask = PCI_ROM_ADDRESS_MASK; } else { mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK; } if (resno < PCI_ROM_RESOURCE) { reg = PCI_BASE_ADDRESS_0 + 4 * resno; } else if (resno == PCI_ROM_RESOURCE) { /* * Apparently some Matrox devices have ROM BARs that read * as zero when disabled, so don't update ROM BARs unless * they're enabled. See * https://lore.kernel.org/r/[email protected]/ * But we must update ROM BAR for buggy devices where even a * disabled ROM can conflict with other BARs. */ if (!(res->flags & IORESOURCE_ROM_ENABLE) && !dev->rom_bar_overlap) return; reg = dev->rom_base_reg; if (res->flags & IORESOURCE_ROM_ENABLE) new |= PCI_ROM_ADDRESS_ENABLE; } else return; /* * We can't update a 64-bit BAR atomically, so when possible, * disable decoding so that a half-updated BAR won't conflict * with another device. */ disable = (res->flags & IORESOURCE_MEM_64) && !dev->mmio_always_on; if (disable) { pci_read_config_word(dev, PCI_COMMAND, &cmd); pci_write_config_word(dev, PCI_COMMAND, cmd & ~PCI_COMMAND_MEMORY); } pci_write_config_dword(dev, reg, new); pci_read_config_dword(dev, reg, &check); if ((new ^ check) & mask) { pci_err(dev, "BAR %d: error updating (%#010x != %#010x)\n", resno, new, check); } if (res->flags & IORESOURCE_MEM_64) { new = region.start >> 16 >> 16; pci_write_config_dword(dev, reg + 4, new); pci_read_config_dword(dev, reg + 4, &check); if (check != new) { pci_err(dev, "BAR %d: error updating (high %#010x != %#010x)\n", resno, new, check); } } if (disable) pci_write_config_word(dev, PCI_COMMAND, cmd); } void pci_update_resource(struct pci_dev *dev, int resno) { if (resno <= PCI_ROM_RESOURCE) pci_std_update_resource(dev, resno); #ifdef CONFIG_PCI_IOV else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) pci_iov_update_resource(dev, resno); #endif } int pci_claim_resource(struct pci_dev *dev, int resource) { struct resource *res = &dev->resource[resource]; struct resource *root, *conflict; if (res->flags & IORESOURCE_UNSET) { pci_info(dev, "can't claim BAR %d %pR: no address assigned\n", resource, res); return -EINVAL; } /* * If we have a shadow copy in RAM, the PCI device doesn't respond * to the shadow range, so we don't need to claim it, and upstream * bridges don't need to route the range to the device. */ if (res->flags & IORESOURCE_ROM_SHADOW) return 0; root = pci_find_parent_resource(dev, res); if (!root) { pci_info(dev, "can't claim BAR %d %pR: no compatible bridge window\n", resource, res); res->flags |= IORESOURCE_UNSET; return -EINVAL; } conflict = request_resource_conflict(root, res); if (conflict) { pci_info(dev, "can't claim BAR %d %pR: address conflict with %s %pR\n", resource, res, conflict->name, conflict); res->flags |= IORESOURCE_UNSET; return -EBUSY; } return 0; } EXPORT_SYMBOL(pci_claim_resource); void pci_disable_bridge_window(struct pci_dev *dev) { /* MMIO Base/Limit */ pci_write_config_dword(dev, PCI_MEMORY_BASE, 0x0000fff0); /* Prefetchable MMIO Base/Limit */ pci_write_config_dword(dev, PCI_PREF_LIMIT_UPPER32, 0); pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0x0000fff0); pci_write_config_dword(dev, PCI_PREF_BASE_UPPER32, 0xffffffff); } /* * Generic function that returns a value indicating that the device's * original BIOS BAR address was not saved and so is not available for * reinstatement. * * Can be over-ridden by architecture specific code that implements * reinstatement functionality rather than leaving it disabled when * normal allocation attempts fail. */ resource_size_t __weak pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx) { return 0; } static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev, int resno, resource_size_t size) { struct resource *root, *conflict; resource_size_t fw_addr, start, end; fw_addr = pcibios_retrieve_fw_addr(dev, resno); if (!fw_addr) return -ENOMEM; start = res->start; end = res->end; res->start = fw_addr; res->end = res->start + size - 1; res->flags &= ~IORESOURCE_UNSET; root = pci_find_parent_resource(dev, res); if (!root) { /* * If dev is behind a bridge, accesses will only reach it * if res is inside the relevant bridge window. */ if (pci_upstream_bridge(dev)) return -ENXIO; /* * On the root bus, assume the host bridge will forward * everything. */ if (res->flags & IORESOURCE_IO) root = &ioport_resource; else root = &iomem_resource; } pci_info(dev, "BAR %d: trying firmware assignment %pR\n", resno, res); conflict = request_resource_conflict(root, res); if (conflict) { pci_info(dev, "BAR %d: %pR conflicts with %s %pR\n", resno, res, conflict->name, conflict); res->start = start; res->end = end; res->flags |= IORESOURCE_UNSET; return -EBUSY; } return 0; } /* * We don't have to worry about legacy ISA devices, so nothing to do here. * This is marked as __weak because multiple architectures define it; it should * eventually go away. */ resource_size_t __weak pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { return res->start; } static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, int resno, resource_size_t size, resource_size_t align) { struct resource *res = dev->resource + resno; resource_size_t min; int ret; min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; /* * First, try exact prefetching match. Even if a 64-bit * prefetchable bridge window is below 4GB, we can't put a 32-bit * prefetchable resource in it because pbus_size_mem() assumes a * 64-bit window will contain no 32-bit resources. If we assign * things differently than they were sized, not everything will fit. */ ret = pci_bus_alloc_resource(bus, res, size, align, min, IORESOURCE_PREFETCH | IORESOURCE_MEM_64, pcibios_align_resource, dev); if (ret == 0) return 0; /* * If the prefetchable window is only 32 bits wide, we can put * 64-bit prefetchable resources in it. */ if ((res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) == (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) { ret = pci_bus_alloc_resource(bus, res, size, align, min, IORESOURCE_PREFETCH, pcibios_align_resource, dev); if (ret == 0) return 0; } /* * If we didn't find a better match, we can put any memory resource * in a non-prefetchable window. If this resource is 32 bits and * non-prefetchable, the first call already tried the only possibility * so we don't need to try again. */ if (res->flags & (IORESOURCE_PREFETCH | IORESOURCE_MEM_64)) ret = pci_bus_alloc_resource(bus, res, size, align, min, 0, pcibios_align_resource, dev); return ret; } static int _pci_assign_resource(struct pci_dev *dev, int resno, resource_size_t size, resource_size_t min_align) { struct pci_bus *bus; int ret; bus = dev->bus; while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) { if (!bus->parent || !bus->self->transparent) break; bus = bus->parent; } return ret; } int pci_assign_resource(struct pci_dev *dev, int resno) { struct resource *res = dev->resource + resno; resource_size_t align, size; int ret; if (res->flags & IORESOURCE_PCI_FIXED) return 0; res->flags |= IORESOURCE_UNSET; align = pci_resource_alignment(dev, res); if (!align) { pci_info(dev, "BAR %d: can't assign %pR (bogus alignment)\n", resno, res); return -EINVAL; } size = resource_size(res); ret = _pci_assign_resource(dev, resno, size, align); /* * If we failed to assign anything, let's try the address * where firmware left it. That at least has a chance of * working, which is better than just leaving it disabled. */ if (ret < 0) { pci_info(dev, "BAR %d: no space for %pR\n", resno, res); ret = pci_revert_fw_address(res, dev, resno, size); } if (ret < 0) { pci_info(dev, "BAR %d: failed to assign %pR\n", resno, res); return ret; } res->flags &= ~IORESOURCE_UNSET; res->flags &= ~IORESOURCE_STARTALIGN; pci_info(dev, "BAR %d: assigned %pR\n", resno, res); if (resno < PCI_BRIDGE_RESOURCES) pci_update_resource(dev, resno); return 0; } EXPORT_SYMBOL(pci_assign_resource); int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsize, resource_size_t min_align) { struct resource *res = dev->resource + resno; unsigned long flags; resource_size_t new_size; int ret; if (res->flags & IORESOURCE_PCI_FIXED) return 0; flags = res->flags; res->flags |= IORESOURCE_UNSET; if (!res->parent) { pci_info(dev, "BAR %d: can't reassign an unassigned resource %pR\n", resno, res); return -EINVAL; } /* already aligned with min_align */ new_size = resource_size(res) + addsize; ret = _pci_assign_resource(dev, resno, new_size, min_align); if (ret) { res->flags = flags; pci_info(dev, "BAR %d: %pR (failed to expand by %#llx)\n", resno, res, (unsigned long long) addsize); return ret; } res->flags &= ~IORESOURCE_UNSET; res->flags &= ~IORESOURCE_STARTALIGN; pci_info(dev, "BAR %d: reassigned %pR (expanded by %#llx)\n", resno, res, (unsigned long long) addsize); if (resno < PCI_BRIDGE_RESOURCES) pci_update_resource(dev, resno); return 0; } void pci_release_resource(struct pci_dev *dev, int resno) { struct resource *res = dev->resource + resno; pci_info(dev, "BAR %d: releasing %pR\n", resno, res); if (!res->parent) return; release_resource(res); res->end = resource_size(res) - 1; res->start = 0; res->flags |= IORESOURCE_UNSET; } EXPORT_SYMBOL(pci_release_resource); int pci_resize_resource(struct pci_dev *dev, int resno, int size) { struct resource *res = dev->resource + resno; struct pci_host_bridge *host; int old, ret; u32 sizes; u16 cmd; /* Check if we must preserve the firmware's resource assignment */ host = pci_find_host_bridge(dev->bus); if (host->preserve_config) return -ENOTSUPP; /* Make sure the resource isn't assigned before resizing it. */ if (!(res->flags & IORESOURCE_UNSET)) return -EBUSY; pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & PCI_COMMAND_MEMORY) return -EBUSY; sizes = pci_rebar_get_possible_sizes(dev, resno); if (!sizes) return -ENOTSUPP; if (!(sizes & BIT(size))) return -EINVAL; old = pci_rebar_get_current_size(dev, resno); if (old < 0) return old; ret = pci_rebar_set_size(dev, resno, size); if (ret) return ret; res->end = res->start + pci_rebar_size_to_bytes(size) - 1; /* Check if the new config works by trying to assign everything. */ if (dev->bus->self) { ret = pci_reassign_bridge_resources(dev->bus->self, res->flags); if (ret) goto error_resize; } return 0; error_resize: pci_rebar_set_size(dev, resno, old); res->end = res->start + pci_rebar_size_to_bytes(old) - 1; return ret; } EXPORT_SYMBOL(pci_resize_resource); int pci_enable_resources(struct pci_dev *dev, int mask) { u16 cmd, old_cmd; int i; struct resource *r; pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; pci_dev_for_each_resource(dev, r, i) { if (!(mask & (1 << i))) continue; if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM))) continue; if ((i == PCI_ROM_RESOURCE) && (!(r->flags & IORESOURCE_ROM_ENABLE))) continue; if (r->flags & IORESOURCE_UNSET) { pci_err(dev, "can't enable device: BAR %d %pR not assigned\n", i, r); return -EINVAL; } if (!r->parent) { pci_err(dev, "can't enable device: BAR %d %pR not claimed\n", i, r); return -EINVAL; } if (r->flags & IORESOURCE_IO) cmd |= PCI_COMMAND_IO; if (r->flags & IORESOURCE_MEM) cmd |= PCI_COMMAND_MEMORY; } if (cmd != old_cmd) { pci_info(dev, "enabling device (%04x -> %04x)\n", old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; }
linux-master
drivers/pci/setup-res.c
// SPDX-License-Identifier: GPL-2.0 /* * From setup-res.c, by: * Dave Rusling ([email protected]) * David Mosberger ([email protected]) * David Miller ([email protected]) * Ivan Kokshaysky ([email protected]) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/of.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include "pci.h" void pci_add_resource_offset(struct list_head *resources, struct resource *res, resource_size_t offset) { struct resource_entry *entry; entry = resource_list_create_entry(res, 0); if (!entry) { pr_err("PCI: can't add host bridge window %pR\n", res); return; } entry->offset = offset; resource_list_add_tail(entry, resources); } EXPORT_SYMBOL(pci_add_resource_offset); void pci_add_resource(struct list_head *resources, struct resource *res) { pci_add_resource_offset(resources, res, 0); } EXPORT_SYMBOL(pci_add_resource); void pci_free_resource_list(struct list_head *resources) { resource_list_free(resources); } EXPORT_SYMBOL(pci_free_resource_list); void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags) { struct pci_bus_resource *bus_res; bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL); if (!bus_res) { dev_err(&bus->dev, "can't add %pR resource\n", res); return; } bus_res->res = res; bus_res->flags = flags; list_add_tail(&bus_res->list, &bus->resources); } struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n) { struct pci_bus_resource *bus_res; if (n < PCI_BRIDGE_RESOURCE_NUM) return bus->resource[n]; n -= PCI_BRIDGE_RESOURCE_NUM; list_for_each_entry(bus_res, &bus->resources, list) { if (n-- == 0) return bus_res->res; } return NULL; } EXPORT_SYMBOL_GPL(pci_bus_resource_n); void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res) { struct pci_bus_resource *bus_res, *tmp; int i; for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { if (bus->resource[i] == res) { bus->resource[i] = NULL; return; } } list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) { if (bus_res->res == res) { list_del(&bus_res->list); kfree(bus_res); return; } } } void pci_bus_remove_resources(struct pci_bus *bus) { int i; struct pci_bus_resource *bus_res, *tmp; for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) bus->resource[i] = NULL; list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) { list_del(&bus_res->list); kfree(bus_res); } } int devm_request_pci_bus_resources(struct device *dev, struct list_head *resources) { struct resource_entry *win; struct resource *parent, *res; int err; resource_list_for_each_entry(win, resources) { res = win->res; switch (resource_type(res)) { case IORESOURCE_IO: parent = &ioport_resource; break; case IORESOURCE_MEM: parent = &iomem_resource; break; default: continue; } err = devm_request_resource(dev, parent, res); if (err) return err; } return 0; } EXPORT_SYMBOL_GPL(devm_request_pci_bus_resources); static struct pci_bus_region pci_32_bit = {0, 0xffffffffULL}; #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT static struct pci_bus_region pci_64_bit = {0, (pci_bus_addr_t) 0xffffffffffffffffULL}; static struct pci_bus_region pci_high = {(pci_bus_addr_t) 0x100000000ULL, (pci_bus_addr_t) 0xffffffffffffffffULL}; #endif /* * @res contains CPU addresses. Clip it so the corresponding bus addresses * on @bus are entirely within @region. This is used to control the bus * addresses of resources we allocate, e.g., we may need a resource that * can be mapped by a 32-bit BAR. */ static void pci_clip_resource_to_region(struct pci_bus *bus, struct resource *res, struct pci_bus_region *region) { struct pci_bus_region r; pcibios_resource_to_bus(bus, &r, res); if (r.start < region->start) r.start = region->start; if (r.end > region->end) r.end = region->end; if (r.end < r.start) res->end = res->start - 1; else pcibios_bus_to_resource(bus, res, &r); } static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res, resource_size_t size, resource_size_t align, resource_size_t min, unsigned long type_mask, resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t), void *alignf_data, struct pci_bus_region *region) { struct resource *r, avail; resource_size_t max; int ret; type_mask |= IORESOURCE_TYPE_BITS; pci_bus_for_each_resource(bus, r) { resource_size_t min_used = min; if (!r) continue; /* type_mask must match */ if ((res->flags ^ r->flags) & type_mask) continue; /* We cannot allocate a non-prefetching resource from a pre-fetching area */ if ((r->flags & IORESOURCE_PREFETCH) && !(res->flags & IORESOURCE_PREFETCH)) continue; avail = *r; pci_clip_resource_to_region(bus, &avail, region); /* * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to * protect badly documented motherboard resources, but if * this is an already-configured bridge window, its start * overrides "min". */ if (avail.start) min_used = avail.start; max = avail.end; /* Don't bother if available space isn't large enough */ if (size > max - min_used + 1) continue; /* Ok, try it out.. */ ret = allocate_resource(r, res, size, min_used, max, align, alignf, alignf_data); if (ret == 0) return 0; } return -ENOMEM; } /** * pci_bus_alloc_resource - allocate a resource from a parent bus * @bus: PCI bus * @res: resource to allocate * @size: size of resource to allocate * @align: alignment of resource to allocate * @min: minimum /proc/iomem address to allocate * @type_mask: IORESOURCE_* type flags * @alignf: resource alignment function * @alignf_data: data argument for resource alignment function * * Given the PCI bus a device resides on, the size, minimum address, * alignment and type, try to find an acceptable resource allocation * for a specific device resource. */ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res, resource_size_t size, resource_size_t align, resource_size_t min, unsigned long type_mask, resource_size_t (*alignf)(void *, const struct resource *, resource_size_t, resource_size_t), void *alignf_data) { #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT int rc; if (res->flags & IORESOURCE_MEM_64) { rc = pci_bus_alloc_from_region(bus, res, size, align, min, type_mask, alignf, alignf_data, &pci_high); if (rc == 0) return 0; return pci_bus_alloc_from_region(bus, res, size, align, min, type_mask, alignf, alignf_data, &pci_64_bit); } #endif return pci_bus_alloc_from_region(bus, res, size, align, min, type_mask, alignf, alignf_data, &pci_32_bit); } EXPORT_SYMBOL(pci_bus_alloc_resource); /* * The @idx resource of @dev should be a PCI-PCI bridge window. If this * resource fits inside a window of an upstream bridge, do nothing. If it * overlaps an upstream window but extends outside it, clip the resource so * it fits completely inside. */ bool pci_bus_clip_resource(struct pci_dev *dev, int idx) { struct pci_bus *bus = dev->bus; struct resource *res = &dev->resource[idx]; struct resource orig_res = *res; struct resource *r; pci_bus_for_each_resource(bus, r) { resource_size_t start, end; if (!r) continue; if (resource_type(res) != resource_type(r)) continue; start = max(r->start, res->start); end = min(r->end, res->end); if (start > end) continue; /* no overlap */ if (res->start == start && res->end == end) return false; /* no change */ res->start = start; res->end = end; res->flags &= ~IORESOURCE_UNSET; orig_res.flags &= ~IORESOURCE_UNSET; pci_info(dev, "%pR clipped to %pR\n", &orig_res, res); return true; } return false; } void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { } void __weak pcibios_bus_add_device(struct pci_dev *pdev) { } /** * pci_bus_add_device - start driver for a single device * @dev: device to add * * This adds add sysfs entries and start device drivers */ void pci_bus_add_device(struct pci_dev *dev) { struct device_node *dn = dev->dev.of_node; int retval; /* * Can not put in pci_device_add yet because resources * are not assigned yet for some devices. */ pcibios_bus_add_device(dev); pci_fixup_device(pci_fixup_final, dev); if (pci_is_bridge(dev)) of_pci_make_dev_node(dev); pci_create_sysfs_dev_files(dev); pci_proc_attach_device(dev); pci_bridge_d3_update(dev); dev->match_driver = !dn || of_device_is_available(dn); retval = device_attach(&dev->dev); if (retval < 0 && retval != -EPROBE_DEFER) pci_warn(dev, "device attach failed (%d)\n", retval); pci_dev_assign_added(dev, true); } EXPORT_SYMBOL_GPL(pci_bus_add_device); /** * pci_bus_add_devices - start driver for PCI devices * @bus: bus to check for new devices * * Start driver for PCI devices and add some sysfs entries. */ void pci_bus_add_devices(const struct pci_bus *bus) { struct pci_dev *dev; struct pci_bus *child; list_for_each_entry(dev, &bus->devices, bus_list) { /* Skip already-added devices */ if (pci_dev_is_added(dev)) continue; pci_bus_add_device(dev); } list_for_each_entry(dev, &bus->devices, bus_list) { /* Skip if device attach failed */ if (!pci_dev_is_added(dev)) continue; child = dev->subordinate; if (child) pci_bus_add_devices(child); } } EXPORT_SYMBOL(pci_bus_add_devices); /** pci_walk_bus - walk devices on/under bus, calling callback. * @top bus whose devices should be walked * @cb callback to be called for each device found * @userdata arbitrary pointer to be passed to callback. * * Walk the given bus, including any bridged devices * on buses under this bus. Call the provided callback * on each device found. * * We check the return of @cb each time. If it returns anything * other than 0, we break out. * */ void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata) { struct pci_dev *dev; struct pci_bus *bus; struct list_head *next; int retval; bus = top; down_read(&pci_bus_sem); next = top->devices.next; for (;;) { if (next == &bus->devices) { /* end of this bus, go up or finish */ if (bus == top) break; next = bus->self->bus_list.next; bus = bus->self->bus; continue; } dev = list_entry(next, struct pci_dev, bus_list); if (dev->subordinate) { /* this is a pci-pci bridge, do its devices next */ next = dev->subordinate->devices.next; bus = dev->subordinate; } else next = dev->bus_list.next; retval = cb(dev, userdata); if (retval) break; } up_read(&pci_bus_sem); } EXPORT_SYMBOL_GPL(pci_walk_bus); struct pci_bus *pci_bus_get(struct pci_bus *bus) { if (bus) get_device(&bus->dev); return bus; } void pci_bus_put(struct pci_bus *bus) { if (bus) put_device(&bus->dev); }
linux-master
drivers/pci/bus.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI Peer 2 Peer DMA support. * * Copyright (c) 2016-2018, Logan Gunthorpe * Copyright (c) 2016-2017, Microsemi Corporation * Copyright (c) 2017, Christoph Hellwig * Copyright (c) 2018, Eideticom Inc. */ #define pr_fmt(fmt) "pci-p2pdma: " fmt #include <linux/ctype.h> #include <linux/dma-map-ops.h> #include <linux/pci-p2pdma.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/genalloc.h> #include <linux/memremap.h> #include <linux/percpu-refcount.h> #include <linux/random.h> #include <linux/seq_buf.h> #include <linux/xarray.h> struct pci_p2pdma { struct gen_pool *pool; bool p2pmem_published; struct xarray map_types; }; struct pci_p2pdma_pagemap { struct dev_pagemap pgmap; struct pci_dev *provider; u64 bus_offset; }; static struct pci_p2pdma_pagemap *to_p2p_pgmap(struct dev_pagemap *pgmap) { return container_of(pgmap, struct pci_p2pdma_pagemap, pgmap); } static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_p2pdma *p2pdma; size_t size = 0; rcu_read_lock(); p2pdma = rcu_dereference(pdev->p2pdma); if (p2pdma && p2pdma->pool) size = gen_pool_size(p2pdma->pool); rcu_read_unlock(); return sysfs_emit(buf, "%zd\n", size); } static DEVICE_ATTR_RO(size); static ssize_t available_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_p2pdma *p2pdma; size_t avail = 0; rcu_read_lock(); p2pdma = rcu_dereference(pdev->p2pdma); if (p2pdma && p2pdma->pool) avail = gen_pool_avail(p2pdma->pool); rcu_read_unlock(); return sysfs_emit(buf, "%zd\n", avail); } static DEVICE_ATTR_RO(available); static ssize_t published_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); struct pci_p2pdma *p2pdma; bool published = false; rcu_read_lock(); p2pdma = rcu_dereference(pdev->p2pdma); if (p2pdma) published = p2pdma->p2pmem_published; rcu_read_unlock(); return sysfs_emit(buf, "%d\n", published); } static DEVICE_ATTR_RO(published); static int p2pmem_alloc_mmap(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); size_t len = vma->vm_end - vma->vm_start; struct pci_p2pdma *p2pdma; struct percpu_ref *ref; unsigned long vaddr; void *kaddr; int ret; /* prevent private mappings from being established */ if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { pci_info_ratelimited(pdev, "%s: fail, attempted private mapping\n", current->comm); return -EINVAL; } if (vma->vm_pgoff) { pci_info_ratelimited(pdev, "%s: fail, attempted mapping with non-zero offset\n", current->comm); return -EINVAL; } rcu_read_lock(); p2pdma = rcu_dereference(pdev->p2pdma); if (!p2pdma) { ret = -ENODEV; goto out; } kaddr = (void *)gen_pool_alloc_owner(p2pdma->pool, len, (void **)&ref); if (!kaddr) { ret = -ENOMEM; goto out; } /* * vm_insert_page() can sleep, so a reference is taken to mapping * such that rcu_read_unlock() can be done before inserting the * pages */ if (unlikely(!percpu_ref_tryget_live_rcu(ref))) { ret = -ENODEV; goto out_free_mem; } rcu_read_unlock(); for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { ret = vm_insert_page(vma, vaddr, virt_to_page(kaddr)); if (ret) { gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len); return ret; } percpu_ref_get(ref); put_page(virt_to_page(kaddr)); kaddr += PAGE_SIZE; len -= PAGE_SIZE; } percpu_ref_put(ref); return 0; out_free_mem: gen_pool_free(p2pdma->pool, (uintptr_t)kaddr, len); out: rcu_read_unlock(); return ret; } static struct bin_attribute p2pmem_alloc_attr = { .attr = { .name = "allocate", .mode = 0660 }, .mmap = p2pmem_alloc_mmap, /* * Some places where we want to call mmap (ie. python) will check * that the file size is greater than the mmap size before allowing * the mmap to continue. To work around this, just set the size * to be very large. */ .size = SZ_1T, }; static struct attribute *p2pmem_attrs[] = { &dev_attr_size.attr, &dev_attr_available.attr, &dev_attr_published.attr, NULL, }; static struct bin_attribute *p2pmem_bin_attrs[] = { &p2pmem_alloc_attr, NULL, }; static const struct attribute_group p2pmem_group = { .attrs = p2pmem_attrs, .bin_attrs = p2pmem_bin_attrs, .name = "p2pmem", }; static void p2pdma_page_free(struct page *page) { struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page->pgmap); /* safe to dereference while a reference is held to the percpu ref */ struct pci_p2pdma *p2pdma = rcu_dereference_protected(pgmap->provider->p2pdma, 1); struct percpu_ref *ref; gen_pool_free_owner(p2pdma->pool, (uintptr_t)page_to_virt(page), PAGE_SIZE, (void **)&ref); percpu_ref_put(ref); } static const struct dev_pagemap_ops p2pdma_pgmap_ops = { .page_free = p2pdma_page_free, }; static void pci_p2pdma_release(void *data) { struct pci_dev *pdev = data; struct pci_p2pdma *p2pdma; p2pdma = rcu_dereference_protected(pdev->p2pdma, 1); if (!p2pdma) return; /* Flush and disable pci_alloc_p2p_mem() */ pdev->p2pdma = NULL; synchronize_rcu(); gen_pool_destroy(p2pdma->pool); sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group); xa_destroy(&p2pdma->map_types); } static int pci_p2pdma_setup(struct pci_dev *pdev) { int error = -ENOMEM; struct pci_p2pdma *p2p; p2p = devm_kzalloc(&pdev->dev, sizeof(*p2p), GFP_KERNEL); if (!p2p) return -ENOMEM; xa_init(&p2p->map_types); p2p->pool = gen_pool_create(PAGE_SHIFT, dev_to_node(&pdev->dev)); if (!p2p->pool) goto out; error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev); if (error) goto out_pool_destroy; error = sysfs_create_group(&pdev->dev.kobj, &p2pmem_group); if (error) goto out_pool_destroy; rcu_assign_pointer(pdev->p2pdma, p2p); return 0; out_pool_destroy: gen_pool_destroy(p2p->pool); out: devm_kfree(&pdev->dev, p2p); return error; } static void pci_p2pdma_unmap_mappings(void *data) { struct pci_dev *pdev = data; /* * Removing the alloc attribute from sysfs will call * unmap_mapping_range() on the inode, teardown any existing userspace * mappings and prevent new ones from being created. */ sysfs_remove_file_from_group(&pdev->dev.kobj, &p2pmem_alloc_attr.attr, p2pmem_group.name); } /** * pci_p2pdma_add_resource - add memory for use as p2p memory * @pdev: the device to add the memory to * @bar: PCI BAR to add * @size: size of the memory to add, may be zero to use the whole BAR * @offset: offset into the PCI BAR * * The memory will be given ZONE_DEVICE struct pages so that it may * be used with any DMA request. */ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, u64 offset) { struct pci_p2pdma_pagemap *p2p_pgmap; struct dev_pagemap *pgmap; struct pci_p2pdma *p2pdma; void *addr; int error; if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) return -EINVAL; if (offset >= pci_resource_len(pdev, bar)) return -EINVAL; if (!size) size = pci_resource_len(pdev, bar) - offset; if (size + offset > pci_resource_len(pdev, bar)) return -EINVAL; if (!pdev->p2pdma) { error = pci_p2pdma_setup(pdev); if (error) return error; } p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL); if (!p2p_pgmap) return -ENOMEM; pgmap = &p2p_pgmap->pgmap; pgmap->range.start = pci_resource_start(pdev, bar) + offset; pgmap->range.end = pgmap->range.start + size - 1; pgmap->nr_range = 1; pgmap->type = MEMORY_DEVICE_PCI_P2PDMA; pgmap->ops = &p2pdma_pgmap_ops; p2p_pgmap->provider = pdev; p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) - pci_resource_start(pdev, bar); addr = devm_memremap_pages(&pdev->dev, pgmap); if (IS_ERR(addr)) { error = PTR_ERR(addr); goto pgmap_free; } error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_unmap_mappings, pdev); if (error) goto pages_free; p2pdma = rcu_dereference_protected(pdev->p2pdma, 1); error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr, pci_bus_address(pdev, bar) + offset, range_len(&pgmap->range), dev_to_node(&pdev->dev), &pgmap->ref); if (error) goto pages_free; pci_info(pdev, "added peer-to-peer DMA memory %#llx-%#llx\n", pgmap->range.start, pgmap->range.end); return 0; pages_free: devm_memunmap_pages(&pdev->dev, pgmap); pgmap_free: devm_kfree(&pdev->dev, pgmap); return error; } EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource); /* * Note this function returns the parent PCI device with a * reference taken. It is the caller's responsibility to drop * the reference. */ static struct pci_dev *find_parent_pci_dev(struct device *dev) { struct device *parent; dev = get_device(dev); while (dev) { if (dev_is_pci(dev)) return to_pci_dev(dev); parent = get_device(dev->parent); put_device(dev); dev = parent; } return NULL; } /* * Check if a PCI bridge has its ACS redirection bits set to redirect P2P * TLPs upstream via ACS. Returns 1 if the packets will be redirected * upstream, 0 otherwise. */ static int pci_bridge_has_acs_redir(struct pci_dev *pdev) { int pos; u16 ctrl; pos = pdev->acs_cap; if (!pos) return 0; pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl); if (ctrl & (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC)) return 1; return 0; } static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev) { if (!buf) return; seq_buf_printf(buf, "%s;", pci_name(pdev)); } static bool cpu_supports_p2pdma(void) { #ifdef CONFIG_X86 struct cpuinfo_x86 *c = &cpu_data(0); /* Any AMD CPU whose family ID is Zen or newer supports p2pdma */ if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17) return true; #endif return false; } static const struct pci_p2pdma_whitelist_entry { unsigned short vendor; unsigned short device; enum { REQ_SAME_HOST_BRIDGE = 1 << 0, } flags; } pci_p2pdma_whitelist[] = { /* Intel Xeon E5/Core i7 */ {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE}, {PCI_VENDOR_ID_INTEL, 0x3c01, REQ_SAME_HOST_BRIDGE}, /* Intel Xeon E7 v3/Xeon E5 v3/Core i7 */ {PCI_VENDOR_ID_INTEL, 0x2f00, REQ_SAME_HOST_BRIDGE}, {PCI_VENDOR_ID_INTEL, 0x2f01, REQ_SAME_HOST_BRIDGE}, /* Intel Skylake-E */ {PCI_VENDOR_ID_INTEL, 0x2030, 0}, {PCI_VENDOR_ID_INTEL, 0x2031, 0}, {PCI_VENDOR_ID_INTEL, 0x2032, 0}, {PCI_VENDOR_ID_INTEL, 0x2033, 0}, {PCI_VENDOR_ID_INTEL, 0x2020, 0}, {PCI_VENDOR_ID_INTEL, 0x09a2, 0}, {} }; /* * If the first device on host's root bus is either devfn 00.0 or a PCIe * Root Port, return it. Otherwise return NULL. * * We often use a devfn 00.0 "host bridge" in the pci_p2pdma_whitelist[] * (though there is no PCI/PCIe requirement for such a device). On some * platforms, e.g., Intel Skylake, there is no such host bridge device, and * pci_p2pdma_whitelist[] may contain a Root Port at any devfn. * * This function is similar to pci_get_slot(host->bus, 0), but it does * not take the pci_bus_sem lock since __host_bridge_whitelist() must not * sleep. * * For this to be safe, the caller should hold a reference to a device on the * bridge, which should ensure the host_bridge device will not be freed * or removed from the head of the devices list. */ static struct pci_dev *pci_host_bridge_dev(struct pci_host_bridge *host) { struct pci_dev *root; root = list_first_entry_or_null(&host->bus->devices, struct pci_dev, bus_list); if (!root) return NULL; if (root->devfn == PCI_DEVFN(0, 0)) return root; if (pci_pcie_type(root) == PCI_EXP_TYPE_ROOT_PORT) return root; return NULL; } static bool __host_bridge_whitelist(struct pci_host_bridge *host, bool same_host_bridge, bool warn) { struct pci_dev *root = pci_host_bridge_dev(host); const struct pci_p2pdma_whitelist_entry *entry; unsigned short vendor, device; if (!root) return false; vendor = root->vendor; device = root->device; for (entry = pci_p2pdma_whitelist; entry->vendor; entry++) { if (vendor != entry->vendor || device != entry->device) continue; if (entry->flags & REQ_SAME_HOST_BRIDGE && !same_host_bridge) return false; return true; } if (warn) pci_warn(root, "Host bridge not in P2PDMA whitelist: %04x:%04x\n", vendor, device); return false; } /* * If we can't find a common upstream bridge take a look at the root * complex and compare it to a whitelist of known good hardware. */ static bool host_bridge_whitelist(struct pci_dev *a, struct pci_dev *b, bool warn) { struct pci_host_bridge *host_a = pci_find_host_bridge(a->bus); struct pci_host_bridge *host_b = pci_find_host_bridge(b->bus); if (host_a == host_b) return __host_bridge_whitelist(host_a, true, warn); if (__host_bridge_whitelist(host_a, false, warn) && __host_bridge_whitelist(host_b, false, warn)) return true; return false; } static unsigned long map_types_idx(struct pci_dev *client) { return (pci_domain_nr(client->bus) << 16) | pci_dev_id(client); } /* * Calculate the P2PDMA mapping type and distance between two PCI devices. * * If the two devices are the same PCI function, return * PCI_P2PDMA_MAP_BUS_ADDR and a distance of 0. * * If they are two functions of the same device, return * PCI_P2PDMA_MAP_BUS_ADDR and a distance of 2 (one hop up to the bridge, * then one hop back down to another function of the same device). * * In the case where two devices are connected to the same PCIe switch, * return a distance of 4. This corresponds to the following PCI tree: * * -+ Root Port * \+ Switch Upstream Port * +-+ Switch Downstream Port 0 * + \- Device A * \-+ Switch Downstream Port 1 * \- Device B * * The distance is 4 because we traverse from Device A to Downstream Port 0 * to the common Switch Upstream Port, back down to Downstream Port 1 and * then to Device B. The mapping type returned depends on the ACS * redirection setting of the ports along the path. * * If ACS redirect is set on any port in the path, traffic between the * devices will go through the host bridge, so return * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; otherwise return * PCI_P2PDMA_MAP_BUS_ADDR. * * Any two devices that have a data path that goes through the host bridge * will consult a whitelist. If the host bridge is in the whitelist, return * PCI_P2PDMA_MAP_THRU_HOST_BRIDGE with the distance set to the number of * ports per above. If the device is not in the whitelist, return * PCI_P2PDMA_MAP_NOT_SUPPORTED. */ static enum pci_p2pdma_map_type calc_map_type_and_dist(struct pci_dev *provider, struct pci_dev *client, int *dist, bool verbose) { enum pci_p2pdma_map_type map_type = PCI_P2PDMA_MAP_THRU_HOST_BRIDGE; struct pci_dev *a = provider, *b = client, *bb; bool acs_redirects = false; struct pci_p2pdma *p2pdma; struct seq_buf acs_list; int acs_cnt = 0; int dist_a = 0; int dist_b = 0; char buf[128]; seq_buf_init(&acs_list, buf, sizeof(buf)); /* * Note, we don't need to take references to devices returned by * pci_upstream_bridge() seeing we hold a reference to a child * device which will already hold a reference to the upstream bridge. */ while (a) { dist_b = 0; if (pci_bridge_has_acs_redir(a)) { seq_buf_print_bus_devfn(&acs_list, a); acs_cnt++; } bb = b; while (bb) { if (a == bb) goto check_b_path_acs; bb = pci_upstream_bridge(bb); dist_b++; } a = pci_upstream_bridge(a); dist_a++; } *dist = dist_a + dist_b; goto map_through_host_bridge; check_b_path_acs: bb = b; while (bb) { if (a == bb) break; if (pci_bridge_has_acs_redir(bb)) { seq_buf_print_bus_devfn(&acs_list, bb); acs_cnt++; } bb = pci_upstream_bridge(bb); } *dist = dist_a + dist_b; if (!acs_cnt) { map_type = PCI_P2PDMA_MAP_BUS_ADDR; goto done; } if (verbose) { acs_list.buffer[acs_list.len-1] = 0; /* drop final semicolon */ pci_warn(client, "ACS redirect is set between the client and provider (%s)\n", pci_name(provider)); pci_warn(client, "to disable ACS redirect for this path, add the kernel parameter: pci=disable_acs_redir=%s\n", acs_list.buffer); } acs_redirects = true; map_through_host_bridge: if (!cpu_supports_p2pdma() && !host_bridge_whitelist(provider, client, acs_redirects)) { if (verbose) pci_warn(client, "cannot be used for peer-to-peer DMA as the client and provider (%s) do not share an upstream bridge or whitelisted host bridge\n", pci_name(provider)); map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED; } done: rcu_read_lock(); p2pdma = rcu_dereference(provider->p2pdma); if (p2pdma) xa_store(&p2pdma->map_types, map_types_idx(client), xa_mk_value(map_type), GFP_KERNEL); rcu_read_unlock(); return map_type; } /** * pci_p2pdma_distance_many - Determine the cumulative distance between * a p2pdma provider and the clients in use. * @provider: p2pdma provider to check against the client list * @clients: array of devices to check (NULL-terminated) * @num_clients: number of clients in the array * @verbose: if true, print warnings for devices when we return -1 * * Returns -1 if any of the clients are not compatible, otherwise returns a * positive number where a lower number is the preferable choice. (If there's * one client that's the same as the provider it will return 0, which is best * choice). * * "compatible" means the provider and the clients are either all behind * the same PCI root port or the host bridges connected to each of the devices * are listed in the 'pci_p2pdma_whitelist'. */ int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, int num_clients, bool verbose) { enum pci_p2pdma_map_type map; bool not_supported = false; struct pci_dev *pci_client; int total_dist = 0; int i, distance; if (num_clients == 0) return -1; for (i = 0; i < num_clients; i++) { pci_client = find_parent_pci_dev(clients[i]); if (!pci_client) { if (verbose) dev_warn(clients[i], "cannot be used for peer-to-peer DMA as it is not a PCI device\n"); return -1; } map = calc_map_type_and_dist(provider, pci_client, &distance, verbose); pci_dev_put(pci_client); if (map == PCI_P2PDMA_MAP_NOT_SUPPORTED) not_supported = true; if (not_supported && !verbose) break; total_dist += distance; } if (not_supported) return -1; return total_dist; } EXPORT_SYMBOL_GPL(pci_p2pdma_distance_many); /** * pci_has_p2pmem - check if a given PCI device has published any p2pmem * @pdev: PCI device to check */ bool pci_has_p2pmem(struct pci_dev *pdev) { struct pci_p2pdma *p2pdma; bool res; rcu_read_lock(); p2pdma = rcu_dereference(pdev->p2pdma); res = p2pdma && p2pdma->p2pmem_published; rcu_read_unlock(); return res; } EXPORT_SYMBOL_GPL(pci_has_p2pmem); /** * pci_p2pmem_find_many - find a peer-to-peer DMA memory device compatible with * the specified list of clients and shortest distance * @clients: array of devices to check (NULL-terminated) * @num_clients: number of client devices in the list * * If multiple devices are behind the same switch, the one "closest" to the * client devices in use will be chosen first. (So if one of the providers is * the same as one of the clients, that provider will be used ahead of any * other providers that are unrelated). If multiple providers are an equal * distance away, one will be chosen at random. * * Returns a pointer to the PCI device with a reference taken (use pci_dev_put * to return the reference) or NULL if no compatible device is found. The * found provider will also be assigned to the client list. */ struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients) { struct pci_dev *pdev = NULL; int distance; int closest_distance = INT_MAX; struct pci_dev **closest_pdevs; int dev_cnt = 0; const int max_devs = PAGE_SIZE / sizeof(*closest_pdevs); int i; closest_pdevs = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!closest_pdevs) return NULL; for_each_pci_dev(pdev) { if (!pci_has_p2pmem(pdev)) continue; distance = pci_p2pdma_distance_many(pdev, clients, num_clients, false); if (distance < 0 || distance > closest_distance) continue; if (distance == closest_distance && dev_cnt >= max_devs) continue; if (distance < closest_distance) { for (i = 0; i < dev_cnt; i++) pci_dev_put(closest_pdevs[i]); dev_cnt = 0; closest_distance = distance; } closest_pdevs[dev_cnt++] = pci_dev_get(pdev); } if (dev_cnt) pdev = pci_dev_get(closest_pdevs[get_random_u32_below(dev_cnt)]); for (i = 0; i < dev_cnt; i++) pci_dev_put(closest_pdevs[i]); kfree(closest_pdevs); return pdev; } EXPORT_SYMBOL_GPL(pci_p2pmem_find_many); /** * pci_alloc_p2pmem - allocate peer-to-peer DMA memory * @pdev: the device to allocate memory from * @size: number of bytes to allocate * * Returns the allocated memory or NULL on error. */ void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size) { void *ret = NULL; struct percpu_ref *ref; struct pci_p2pdma *p2pdma; /* * Pairs with synchronize_rcu() in pci_p2pdma_release() to * ensure pdev->p2pdma is non-NULL for the duration of the * read-lock. */ rcu_read_lock(); p2pdma = rcu_dereference(pdev->p2pdma); if (unlikely(!p2pdma)) goto out; ret = (void *)gen_pool_alloc_owner(p2pdma->pool, size, (void **) &ref); if (!ret) goto out; if (unlikely(!percpu_ref_tryget_live_rcu(ref))) { gen_pool_free(p2pdma->pool, (unsigned long) ret, size); ret = NULL; goto out; } out: rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(pci_alloc_p2pmem); /** * pci_free_p2pmem - free peer-to-peer DMA memory * @pdev: the device the memory was allocated from * @addr: address of the memory that was allocated * @size: number of bytes that were allocated */ void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size) { struct percpu_ref *ref; struct pci_p2pdma *p2pdma = rcu_dereference_protected(pdev->p2pdma, 1); gen_pool_free_owner(p2pdma->pool, (uintptr_t)addr, size, (void **) &ref); percpu_ref_put(ref); } EXPORT_SYMBOL_GPL(pci_free_p2pmem); /** * pci_p2pmem_virt_to_bus - return the PCI bus address for a given virtual * address obtained with pci_alloc_p2pmem() * @pdev: the device the memory was allocated from * @addr: address of the memory that was allocated */ pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr) { struct pci_p2pdma *p2pdma; if (!addr) return 0; p2pdma = rcu_dereference_protected(pdev->p2pdma, 1); if (!p2pdma) return 0; /* * Note: when we added the memory to the pool we used the PCI * bus address as the physical address. So gen_pool_virt_to_phys() * actually returns the bus address despite the misleading name. */ return gen_pool_virt_to_phys(p2pdma->pool, (unsigned long)addr); } EXPORT_SYMBOL_GPL(pci_p2pmem_virt_to_bus); /** * pci_p2pmem_alloc_sgl - allocate peer-to-peer DMA memory in a scatterlist * @pdev: the device to allocate memory from * @nents: the number of SG entries in the list * @length: number of bytes to allocate * * Return: %NULL on error or &struct scatterlist pointer and @nents on success */ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, unsigned int *nents, u32 length) { struct scatterlist *sg; void *addr; sg = kmalloc(sizeof(*sg), GFP_KERNEL); if (!sg) return NULL; sg_init_table(sg, 1); addr = pci_alloc_p2pmem(pdev, length); if (!addr) goto out_free_sg; sg_set_buf(sg, addr, length); *nents = 1; return sg; out_free_sg: kfree(sg); return NULL; } EXPORT_SYMBOL_GPL(pci_p2pmem_alloc_sgl); /** * pci_p2pmem_free_sgl - free a scatterlist allocated by pci_p2pmem_alloc_sgl() * @pdev: the device to allocate memory from * @sgl: the allocated scatterlist */ void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl) { struct scatterlist *sg; int count; for_each_sg(sgl, sg, INT_MAX, count) { if (!sg) break; pci_free_p2pmem(pdev, sg_virt(sg), sg->length); } kfree(sgl); } EXPORT_SYMBOL_GPL(pci_p2pmem_free_sgl); /** * pci_p2pmem_publish - publish the peer-to-peer DMA memory for use by * other devices with pci_p2pmem_find() * @pdev: the device with peer-to-peer DMA memory to publish * @publish: set to true to publish the memory, false to unpublish it * * Published memory can be used by other PCI device drivers for * peer-2-peer DMA operations. Non-published memory is reserved for * exclusive use of the device driver that registers the peer-to-peer * memory. */ void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) { struct pci_p2pdma *p2pdma; rcu_read_lock(); p2pdma = rcu_dereference(pdev->p2pdma); if (p2pdma) p2pdma->p2pmem_published = publish; rcu_read_unlock(); } EXPORT_SYMBOL_GPL(pci_p2pmem_publish); static enum pci_p2pdma_map_type pci_p2pdma_map_type(struct dev_pagemap *pgmap, struct device *dev) { enum pci_p2pdma_map_type type = PCI_P2PDMA_MAP_NOT_SUPPORTED; struct pci_dev *provider = to_p2p_pgmap(pgmap)->provider; struct pci_dev *client; struct pci_p2pdma *p2pdma; int dist; if (!provider->p2pdma) return PCI_P2PDMA_MAP_NOT_SUPPORTED; if (!dev_is_pci(dev)) return PCI_P2PDMA_MAP_NOT_SUPPORTED; client = to_pci_dev(dev); rcu_read_lock(); p2pdma = rcu_dereference(provider->p2pdma); if (p2pdma) type = xa_to_value(xa_load(&p2pdma->map_types, map_types_idx(client))); rcu_read_unlock(); if (type == PCI_P2PDMA_MAP_UNKNOWN) return calc_map_type_and_dist(provider, client, &dist, true); return type; } /** * pci_p2pdma_map_segment - map an sg segment determining the mapping type * @state: State structure that should be declared outside of the for_each_sg() * loop and initialized to zero. * @dev: DMA device that's doing the mapping operation * @sg: scatterlist segment to map * * This is a helper to be used by non-IOMMU dma_map_sg() implementations where * the sg segment is the same for the page_link and the dma_address. * * Attempt to map a single segment in an SGL with the PCI bus address. * The segment must point to a PCI P2PDMA page and thus must be * wrapped in a is_pci_p2pdma_page(sg_page(sg)) check. * * Returns the type of mapping used and maps the page if the type is * PCI_P2PDMA_MAP_BUS_ADDR. */ enum pci_p2pdma_map_type pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, struct scatterlist *sg) { if (state->pgmap != sg_page(sg)->pgmap) { state->pgmap = sg_page(sg)->pgmap; state->map = pci_p2pdma_map_type(state->pgmap, dev); state->bus_off = to_p2p_pgmap(state->pgmap)->bus_offset; } if (state->map == PCI_P2PDMA_MAP_BUS_ADDR) { sg->dma_address = sg_phys(sg) + state->bus_off; sg_dma_len(sg) = sg->length; sg_dma_mark_bus_address(sg); } return state->map; } /** * pci_p2pdma_enable_store - parse a configfs/sysfs attribute store * to enable p2pdma * @page: contents of the value to be stored * @p2p_dev: returns the PCI device that was selected to be used * (if one was specified in the stored value) * @use_p2pdma: returns whether to enable p2pdma or not * * Parses an attribute value to decide whether to enable p2pdma. * The value can select a PCI device (using its full BDF device * name) or a boolean (in any format kstrtobool() accepts). A false * value disables p2pdma, a true value expects the caller * to automatically find a compatible device and specifying a PCI device * expects the caller to use the specific provider. * * pci_p2pdma_enable_show() should be used as the show operation for * the attribute. * * Returns 0 on success */ int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma) { struct device *dev; dev = bus_find_device_by_name(&pci_bus_type, NULL, page); if (dev) { *use_p2pdma = true; *p2p_dev = to_pci_dev(dev); if (!pci_has_p2pmem(*p2p_dev)) { pci_err(*p2p_dev, "PCI device has no peer-to-peer memory: %s\n", page); pci_dev_put(*p2p_dev); return -ENODEV; } return 0; } else if ((page[0] == '0' || page[0] == '1') && !iscntrl(page[1])) { /* * If the user enters a PCI device that doesn't exist * like "0000:01:00.1", we don't want kstrtobool to think * it's a '0' when it's clearly not what the user wanted. * So we require 0's and 1's to be exactly one character. */ } else if (!kstrtobool(page, use_p2pdma)) { return 0; } pr_err("No such PCI device: %.*s\n", (int)strcspn(page, "\n"), page); return -ENODEV; } EXPORT_SYMBOL_GPL(pci_p2pdma_enable_store); /** * pci_p2pdma_enable_show - show a configfs/sysfs attribute indicating * whether p2pdma is enabled * @page: contents of the stored value * @p2p_dev: the selected p2p device (NULL if no device is selected) * @use_p2pdma: whether p2pdma has been enabled * * Attributes that use pci_p2pdma_enable_store() should use this function * to show the value of the attribute. * * Returns 0 on success */ ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, bool use_p2pdma) { if (!use_p2pdma) return sprintf(page, "0\n"); if (!p2p_dev) return sprintf(page, "1\n"); return sprintf(page, "%s\n", pci_name(p2p_dev)); } EXPORT_SYMBOL_GPL(pci_p2pdma_enable_show);
linux-master
drivers/pci/p2pdma.c
// SPDX-License-Identifier: GPL-2.0 /* * Support routines for initializing a PCI subsystem * * Extruded from code written by * Dave Rusling ([email protected]) * David Mosberger ([email protected]) * David Miller ([email protected]) */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/cache.h> #include "pci.h" void pci_assign_irq(struct pci_dev *dev) { u8 pin; u8 slot = -1; int irq = 0; struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus); if (!(hbrg->map_irq)) { pci_dbg(dev, "runtime IRQ mapping not provided by arch\n"); return; } /* * If this device is not on the primary bus, we need to figure out * which interrupt pin it will come in on. We know which slot it * will come in on because that slot is where the bridge is. Each * time the interrupt line passes through a PCI-PCI bridge we must * apply the swizzle function. */ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); /* Cope with illegal. */ if (pin > 4) pin = 1; if (pin) { /* Follow the chain of bridges, swizzling as we go. */ if (hbrg->swizzle_irq) slot = (*(hbrg->swizzle_irq))(dev, &pin); /* * If a swizzling function is not used, map_irq() must * ignore slot. */ irq = (*(hbrg->map_irq))(dev, slot, pin); if (irq == -1) irq = 0; } dev->irq = irq; pci_dbg(dev, "assign IRQ: got %d\n", dev->irq); /* * Always tell the device, so the driver knows what is the real IRQ * to use; the device does not use it. */ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); }
linux-master
drivers/pci/setup-irq.c
// SPDX-License-Identifier: GPL-2.0 /* * Export the firmware instance and label associated with a PCI device to * sysfs * * Copyright (C) 2010 Dell Inc. * by Narendra K <[email protected]>, * Jordan Hargrave <[email protected]> * * PCI Firmware Specification Revision 3.1 section 4.6.7 (DSM for Naming a * PCI or PCI Express Device Under Operating Systems) defines an instance * number and string name. This code retrieves them and exports them to sysfs. * If the system firmware does not provide the ACPI _DSM (Device Specific * Method), then the SMBIOS type 41 instance number and string is exported to * sysfs. * * SMBIOS defines type 41 for onboard pci devices. This code retrieves * the instance number and string from the type 41 record and exports * it to sysfs. * * Please see https://linux.dell.com/files/biosdevname/ for more * information. */ #include <linux/dmi.h> #include <linux/sysfs.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/module.h> #include <linux/device.h> #include <linux/nls.h> #include <linux/acpi.h> #include <linux/pci-acpi.h> #include "pci.h" static bool device_has_acpi_name(struct device *dev) { #ifdef CONFIG_ACPI acpi_handle handle = ACPI_HANDLE(dev); if (!handle) return false; return acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2, 1 << DSM_PCI_DEVICE_NAME); #else return false; #endif } #ifdef CONFIG_DMI enum smbios_attr_enum { SMBIOS_ATTR_NONE = 0, SMBIOS_ATTR_LABEL_SHOW, SMBIOS_ATTR_INSTANCE_SHOW, }; static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf, enum smbios_attr_enum attribute) { const struct dmi_device *dmi; struct dmi_dev_onboard *donboard; int domain_nr = pci_domain_nr(pdev->bus); int bus = pdev->bus->number; int devfn = pdev->devfn; dmi = NULL; while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, NULL, dmi)) != NULL) { donboard = dmi->device_data; if (donboard && donboard->segment == domain_nr && donboard->bus == bus && donboard->devfn == devfn) { if (buf) { if (attribute == SMBIOS_ATTR_INSTANCE_SHOW) return sysfs_emit(buf, "%d\n", donboard->instance); else if (attribute == SMBIOS_ATTR_LABEL_SHOW) return sysfs_emit(buf, "%s\n", dmi->name); } return strlen(dmi->name); } } return 0; } static ssize_t smbios_label_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return find_smbios_instance_string(pdev, buf, SMBIOS_ATTR_LABEL_SHOW); } static struct device_attribute dev_attr_smbios_label = __ATTR(label, 0444, smbios_label_show, NULL); static ssize_t index_show(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev = to_pci_dev(dev); return find_smbios_instance_string(pdev, buf, SMBIOS_ATTR_INSTANCE_SHOW); } static DEVICE_ATTR_RO(index); static struct attribute *smbios_attrs[] = { &dev_attr_smbios_label.attr, &dev_attr_index.attr, NULL, }; static umode_t smbios_attr_is_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); struct pci_dev *pdev = to_pci_dev(dev); if (device_has_acpi_name(dev)) return 0; if (!find_smbios_instance_string(pdev, NULL, SMBIOS_ATTR_NONE)) return 0; return a->mode; } const struct attribute_group pci_dev_smbios_attr_group = { .attrs = smbios_attrs, .is_visible = smbios_attr_is_visible, }; #endif #ifdef CONFIG_ACPI enum acpi_attr_enum { ACPI_ATTR_LABEL_SHOW, ACPI_ATTR_INDEX_SHOW, }; static int dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf) { int len; len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer, obj->buffer.length, UTF16_LITTLE_ENDIAN, buf, PAGE_SIZE - 1); buf[len++] = '\n'; return len; } static int dsm_get_label(struct device *dev, char *buf, enum acpi_attr_enum attr) { acpi_handle handle = ACPI_HANDLE(dev); union acpi_object *obj, *tmp; int len = 0; if (!handle) return -1; obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 0x2, DSM_PCI_DEVICE_NAME, NULL); if (!obj) return -1; tmp = obj->package.elements; if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 && tmp[0].type == ACPI_TYPE_INTEGER && (tmp[1].type == ACPI_TYPE_STRING || tmp[1].type == ACPI_TYPE_BUFFER)) { /* * The second string element is optional even when * this _DSM is implemented; when not implemented, * this entry must return a null string. */ if (attr == ACPI_ATTR_INDEX_SHOW) { len = sysfs_emit(buf, "%llu\n", tmp->integer.value); } else if (attr == ACPI_ATTR_LABEL_SHOW) { if (tmp[1].type == ACPI_TYPE_STRING) len = sysfs_emit(buf, "%s\n", tmp[1].string.pointer); else if (tmp[1].type == ACPI_TYPE_BUFFER) len = dsm_label_utf16s_to_utf8s(tmp + 1, buf); } } ACPI_FREE(obj); return len > 0 ? len : -1; } static ssize_t label_show(struct device *dev, struct device_attribute *attr, char *buf) { return dsm_get_label(dev, buf, ACPI_ATTR_LABEL_SHOW); } static DEVICE_ATTR_RO(label); static ssize_t acpi_index_show(struct device *dev, struct device_attribute *attr, char *buf) { return dsm_get_label(dev, buf, ACPI_ATTR_INDEX_SHOW); } static DEVICE_ATTR_RO(acpi_index); static struct attribute *acpi_attrs[] = { &dev_attr_label.attr, &dev_attr_acpi_index.attr, NULL, }; static umode_t acpi_attr_is_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = kobj_to_dev(kobj); if (!device_has_acpi_name(dev)) return 0; return a->mode; } const struct attribute_group pci_dev_acpi_attr_group = { .attrs = acpi_attrs, .is_visible = acpi_attr_is_visible, }; #endif
linux-master
drivers/pci/pci-label.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI Express I/O Virtualization (IOV) support * Address Translation Service 1.0 * Page Request Interface added by Joerg Roedel <[email protected]> * PASID support added by Joerg Roedel <[email protected]> * * Copyright (C) 2009 Intel Corporation, Yu Zhao <[email protected]> * Copyright (C) 2011 Advanced Micro Devices, */ #include <linux/export.h> #include <linux/pci-ats.h> #include <linux/pci.h> #include <linux/slab.h> #include "pci.h" void pci_ats_init(struct pci_dev *dev) { int pos; if (pci_ats_disabled()) return; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS); if (!pos) return; dev->ats_cap = pos; } /** * pci_ats_supported - check if the device can use ATS * @dev: the PCI device * * Returns true if the device supports ATS and is allowed to use it, false * otherwise. */ bool pci_ats_supported(struct pci_dev *dev) { if (!dev->ats_cap) return false; return (dev->untrusted == 0); } EXPORT_SYMBOL_GPL(pci_ats_supported); /** * pci_enable_ats - enable the ATS capability * @dev: the PCI device * @ps: the IOMMU page shift * * Returns 0 on success, or negative on failure. */ int pci_enable_ats(struct pci_dev *dev, int ps) { u16 ctrl; struct pci_dev *pdev; if (!pci_ats_supported(dev)) return -EINVAL; if (WARN_ON(dev->ats_enabled)) return -EBUSY; if (ps < PCI_ATS_MIN_STU) return -EINVAL; /* * Note that enabling ATS on a VF fails unless it's already enabled * with the same STU on the PF. */ ctrl = PCI_ATS_CTRL_ENABLE; if (dev->is_virtfn) { pdev = pci_physfn(dev); if (pdev->ats_stu != ps) return -EINVAL; } else { dev->ats_stu = ps; ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU); } pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl); dev->ats_enabled = 1; return 0; } EXPORT_SYMBOL_GPL(pci_enable_ats); /** * pci_disable_ats - disable the ATS capability * @dev: the PCI device */ void pci_disable_ats(struct pci_dev *dev) { u16 ctrl; if (WARN_ON(!dev->ats_enabled)) return; pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, &ctrl); ctrl &= ~PCI_ATS_CTRL_ENABLE; pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl); dev->ats_enabled = 0; } EXPORT_SYMBOL_GPL(pci_disable_ats); void pci_restore_ats_state(struct pci_dev *dev) { u16 ctrl; if (!dev->ats_enabled) return; ctrl = PCI_ATS_CTRL_ENABLE; if (!dev->is_virtfn) ctrl |= PCI_ATS_CTRL_STU(dev->ats_stu - PCI_ATS_MIN_STU); pci_write_config_word(dev, dev->ats_cap + PCI_ATS_CTRL, ctrl); } /** * pci_ats_queue_depth - query the ATS Invalidate Queue Depth * @dev: the PCI device * * Returns the queue depth on success, or negative on failure. * * The ATS spec uses 0 in the Invalidate Queue Depth field to * indicate that the function can accept 32 Invalidate Request. * But here we use the `real' values (i.e. 1~32) for the Queue * Depth; and 0 indicates the function shares the Queue with * other functions (doesn't exclusively own a Queue). */ int pci_ats_queue_depth(struct pci_dev *dev) { u16 cap; if (!dev->ats_cap) return -EINVAL; if (dev->is_virtfn) return 0; pci_read_config_word(dev, dev->ats_cap + PCI_ATS_CAP, &cap); return PCI_ATS_CAP_QDEP(cap) ? PCI_ATS_CAP_QDEP(cap) : PCI_ATS_MAX_QDEP; } /** * pci_ats_page_aligned - Return Page Aligned Request bit status. * @pdev: the PCI device * * Returns 1, if the Untranslated Addresses generated by the device * are always aligned or 0 otherwise. * * Per PCIe spec r4.0, sec 10.5.1.2, if the Page Aligned Request bit * is set, it indicates the Untranslated Addresses generated by the * device are always aligned to a 4096 byte boundary. */ int pci_ats_page_aligned(struct pci_dev *pdev) { u16 cap; if (!pdev->ats_cap) return 0; pci_read_config_word(pdev, pdev->ats_cap + PCI_ATS_CAP, &cap); if (cap & PCI_ATS_CAP_PAGE_ALIGNED) return 1; return 0; } #ifdef CONFIG_PCI_PRI void pci_pri_init(struct pci_dev *pdev) { u16 status; pdev->pri_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); if (!pdev->pri_cap) return; pci_read_config_word(pdev, pdev->pri_cap + PCI_PRI_STATUS, &status); if (status & PCI_PRI_STATUS_PASID) pdev->pasid_required = 1; } /** * pci_enable_pri - Enable PRI capability * @pdev: PCI device structure * @reqs: outstanding requests * * Returns 0 on success, negative value on error */ int pci_enable_pri(struct pci_dev *pdev, u32 reqs) { u16 control, status; u32 max_requests; int pri = pdev->pri_cap; /* * VFs must not implement the PRI Capability. If their PF * implements PRI, it is shared by the VFs, so if the PF PRI is * enabled, it is also enabled for the VF. */ if (pdev->is_virtfn) { if (pci_physfn(pdev)->pri_enabled) return 0; return -EINVAL; } if (WARN_ON(pdev->pri_enabled)) return -EBUSY; if (!pri) return -EINVAL; pci_read_config_word(pdev, pri + PCI_PRI_STATUS, &status); if (!(status & PCI_PRI_STATUS_STOPPED)) return -EBUSY; pci_read_config_dword(pdev, pri + PCI_PRI_MAX_REQ, &max_requests); reqs = min(max_requests, reqs); pdev->pri_reqs_alloc = reqs; pci_write_config_dword(pdev, pri + PCI_PRI_ALLOC_REQ, reqs); control = PCI_PRI_CTRL_ENABLE; pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control); pdev->pri_enabled = 1; return 0; } /** * pci_disable_pri - Disable PRI capability * @pdev: PCI device structure * * Only clears the enabled-bit, regardless of its former value */ void pci_disable_pri(struct pci_dev *pdev) { u16 control; int pri = pdev->pri_cap; /* VFs share the PF PRI */ if (pdev->is_virtfn) return; if (WARN_ON(!pdev->pri_enabled)) return; if (!pri) return; pci_read_config_word(pdev, pri + PCI_PRI_CTRL, &control); control &= ~PCI_PRI_CTRL_ENABLE; pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control); pdev->pri_enabled = 0; } EXPORT_SYMBOL_GPL(pci_disable_pri); /** * pci_restore_pri_state - Restore PRI * @pdev: PCI device structure */ void pci_restore_pri_state(struct pci_dev *pdev) { u16 control = PCI_PRI_CTRL_ENABLE; u32 reqs = pdev->pri_reqs_alloc; int pri = pdev->pri_cap; if (pdev->is_virtfn) return; if (!pdev->pri_enabled) return; if (!pri) return; pci_write_config_dword(pdev, pri + PCI_PRI_ALLOC_REQ, reqs); pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control); } /** * pci_reset_pri - Resets device's PRI state * @pdev: PCI device structure * * The PRI capability must be disabled before this function is called. * Returns 0 on success, negative value on error. */ int pci_reset_pri(struct pci_dev *pdev) { u16 control; int pri = pdev->pri_cap; if (pdev->is_virtfn) return 0; if (WARN_ON(pdev->pri_enabled)) return -EBUSY; if (!pri) return -EINVAL; control = PCI_PRI_CTRL_RESET; pci_write_config_word(pdev, pri + PCI_PRI_CTRL, control); return 0; } /** * pci_prg_resp_pasid_required - Return PRG Response PASID Required bit * status. * @pdev: PCI device structure * * Returns 1 if PASID is required in PRG Response Message, 0 otherwise. */ int pci_prg_resp_pasid_required(struct pci_dev *pdev) { if (pdev->is_virtfn) pdev = pci_physfn(pdev); return pdev->pasid_required; } /** * pci_pri_supported - Check if PRI is supported. * @pdev: PCI device structure * * Returns true if PRI capability is present, false otherwise. */ bool pci_pri_supported(struct pci_dev *pdev) { /* VFs share the PF PRI */ if (pci_physfn(pdev)->pri_cap) return true; return false; } EXPORT_SYMBOL_GPL(pci_pri_supported); #endif /* CONFIG_PCI_PRI */ #ifdef CONFIG_PCI_PASID void pci_pasid_init(struct pci_dev *pdev) { pdev->pasid_cap = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); } /** * pci_enable_pasid - Enable the PASID capability * @pdev: PCI device structure * @features: Features to enable * * Returns 0 on success, negative value on error. This function checks * whether the features are actually supported by the device and returns * an error if not. */ int pci_enable_pasid(struct pci_dev *pdev, int features) { u16 control, supported; int pasid = pdev->pasid_cap; /* * VFs must not implement the PASID Capability, but if a PF * supports PASID, its VFs share the PF PASID configuration. */ if (pdev->is_virtfn) { if (pci_physfn(pdev)->pasid_enabled) return 0; return -EINVAL; } if (WARN_ON(pdev->pasid_enabled)) return -EBUSY; if (!pdev->eetlp_prefix_path && !pdev->pasid_no_tlp) return -EINVAL; if (!pasid) return -EINVAL; if (!pci_acs_path_enabled(pdev, NULL, PCI_ACS_RR | PCI_ACS_UF)) return -EINVAL; pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported); supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV; /* User wants to enable anything unsupported? */ if ((supported & features) != features) return -EINVAL; control = PCI_PASID_CTRL_ENABLE | features; pdev->pasid_features = features; pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control); pdev->pasid_enabled = 1; return 0; } EXPORT_SYMBOL_GPL(pci_enable_pasid); /** * pci_disable_pasid - Disable the PASID capability * @pdev: PCI device structure */ void pci_disable_pasid(struct pci_dev *pdev) { u16 control = 0; int pasid = pdev->pasid_cap; /* VFs share the PF PASID configuration */ if (pdev->is_virtfn) return; if (WARN_ON(!pdev->pasid_enabled)) return; if (!pasid) return; pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control); pdev->pasid_enabled = 0; } EXPORT_SYMBOL_GPL(pci_disable_pasid); /** * pci_restore_pasid_state - Restore PASID capabilities * @pdev: PCI device structure */ void pci_restore_pasid_state(struct pci_dev *pdev) { u16 control; int pasid = pdev->pasid_cap; if (pdev->is_virtfn) return; if (!pdev->pasid_enabled) return; if (!pasid) return; control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features; pci_write_config_word(pdev, pasid + PCI_PASID_CTRL, control); } /** * pci_pasid_features - Check which PASID features are supported * @pdev: PCI device structure * * Returns a negative value when no PASI capability is present. * Otherwise is returns a bitmask with supported features. Current * features reported are: * PCI_PASID_CAP_EXEC - Execute permission supported * PCI_PASID_CAP_PRIV - Privileged mode supported */ int pci_pasid_features(struct pci_dev *pdev) { u16 supported; int pasid; if (pdev->is_virtfn) pdev = pci_physfn(pdev); pasid = pdev->pasid_cap; if (!pasid) return -EINVAL; pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported); supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV; return supported; } EXPORT_SYMBOL_GPL(pci_pasid_features); #define PASID_NUMBER_SHIFT 8 #define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT) /** * pci_max_pasids - Get maximum number of PASIDs supported by device * @pdev: PCI device structure * * Returns negative value when PASID capability is not present. * Otherwise it returns the number of supported PASIDs. */ int pci_max_pasids(struct pci_dev *pdev) { u16 supported; int pasid; if (pdev->is_virtfn) pdev = pci_physfn(pdev); pasid = pdev->pasid_cap; if (!pasid) return -EINVAL; pci_read_config_word(pdev, pasid + PCI_PASID_CAP, &supported); supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT; return (1 << supported); } EXPORT_SYMBOL_GPL(pci_max_pasids); #endif /* CONFIG_PCI_PASID */
linux-master
drivers/pci/ats.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCI <-> OF mapping helpers * * Copyright 2011 IBM Corp. */ #define pr_fmt(fmt) "PCI: OF: " fmt #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include "pci.h" #ifdef CONFIG_PCI /** * pci_set_of_node - Find and set device's DT device_node * @dev: the PCI device structure to fill * * Returns 0 on success with of_node set or when no device is described in the * DT. Returns -ENODEV if the device is present, but disabled in the DT. */ int pci_set_of_node(struct pci_dev *dev) { struct device_node *node; if (!dev->bus->dev.of_node) return 0; node = of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn); if (!node) return 0; device_set_node(&dev->dev, of_fwnode_handle(node)); return 0; } void pci_release_of_node(struct pci_dev *dev) { of_node_put(dev->dev.of_node); device_set_node(&dev->dev, NULL); } void pci_set_bus_of_node(struct pci_bus *bus) { struct device_node *node; if (bus->self == NULL) { node = pcibios_get_phb_of_node(bus); } else { node = of_node_get(bus->self->dev.of_node); if (node && of_property_read_bool(node, "external-facing")) bus->self->external_facing = true; } device_set_node(&bus->dev, of_fwnode_handle(node)); } void pci_release_bus_of_node(struct pci_bus *bus) { of_node_put(bus->dev.of_node); device_set_node(&bus->dev, NULL); } struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus) { /* This should only be called for PHBs */ if (WARN_ON(bus->self || bus->parent)) return NULL; /* * Look for a node pointer in either the intermediary device we * create above the root bus or its own parent. Normally only * the later is populated. */ if (bus->bridge->of_node) return of_node_get(bus->bridge->of_node); if (bus->bridge->parent && bus->bridge->parent->of_node) return of_node_get(bus->bridge->parent->of_node); return NULL; } struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus) { #ifdef CONFIG_IRQ_DOMAIN struct irq_domain *d; if (!bus->dev.of_node) return NULL; /* Start looking for a phandle to an MSI controller. */ d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI); if (d) return d; /* * If we don't have an msi-parent property, look for a domain * directly attached to the host bridge. */ d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI); if (d) return d; return irq_find_host(bus->dev.of_node); #else return NULL; #endif } bool pci_host_of_has_msi_map(struct device *dev) { if (dev && dev->of_node) return of_get_property(dev->of_node, "msi-map", NULL); return false; } static inline int __of_pci_pci_compare(struct device_node *node, unsigned int data) { int devfn; devfn = of_pci_get_devfn(node); if (devfn < 0) return 0; return devfn == data; } struct device_node *of_pci_find_child_device(struct device_node *parent, unsigned int devfn) { struct device_node *node, *node2; for_each_child_of_node(parent, node) { if (__of_pci_pci_compare(node, devfn)) return node; /* * Some OFs create a parent node "multifunc-device" as * a fake root for all functions of a multi-function * device we go down them as well. */ if (of_node_name_eq(node, "multifunc-device")) { for_each_child_of_node(node, node2) { if (__of_pci_pci_compare(node2, devfn)) { of_node_put(node); return node2; } } } } return NULL; } EXPORT_SYMBOL_GPL(of_pci_find_child_device); /** * of_pci_get_devfn() - Get device and function numbers for a device node * @np: device node * * Parses a standard 5-cell PCI resource and returns an 8-bit value that can * be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device * and function numbers respectively. On error a negative error code is * returned. */ int of_pci_get_devfn(struct device_node *np) { u32 reg[5]; int error; error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg)); if (error) return error; return (reg[0] >> 8) & 0xff; } EXPORT_SYMBOL_GPL(of_pci_get_devfn); /** * of_pci_parse_bus_range() - parse the bus-range property of a PCI device * @node: device node * @res: address to a struct resource to return the bus-range * * Returns 0 on success or a negative error-code on failure. */ int of_pci_parse_bus_range(struct device_node *node, struct resource *res) { u32 bus_range[2]; int error; error = of_property_read_u32_array(node, "bus-range", bus_range, ARRAY_SIZE(bus_range)); if (error) return error; res->name = node->name; res->start = bus_range[0]; res->end = bus_range[1]; res->flags = IORESOURCE_BUS; return 0; } EXPORT_SYMBOL_GPL(of_pci_parse_bus_range); /** * of_get_pci_domain_nr - Find the host bridge domain number * of the given device node. * @node: Device tree node with the domain information. * * This function will try to obtain the host bridge domain number by finding * a property called "linux,pci-domain" of the given device node. * * Return: * * > 0 - On success, an associated domain number. * * -EINVAL - The property "linux,pci-domain" does not exist. * * -ENODATA - The linux,pci-domain" property does not have value. * * -EOVERFLOW - Invalid "linux,pci-domain" property value. * * Returns the associated domain number from DT in the range [0-0xffff], or * a negative value if the required property is not found. */ int of_get_pci_domain_nr(struct device_node *node) { u32 domain; int error; error = of_property_read_u32(node, "linux,pci-domain", &domain); if (error) return error; return (u16)domain; } EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); /** * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only * is present and valid */ void of_pci_check_probe_only(void) { u32 val; int ret; ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val); if (ret) { if (ret == -ENODATA || ret == -EOVERFLOW) pr_warn("linux,pci-probe-only without valid value, ignoring\n"); return; } if (val) pci_add_flags(PCI_PROBE_ONLY); else pci_clear_flags(PCI_PROBE_ONLY); pr_info("PROBE_ONLY %s\n", val ? "enabled" : "disabled"); } EXPORT_SYMBOL_GPL(of_pci_check_probe_only); /** * devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI * host bridge resources from DT * @dev: host bridge device * @busno: bus number associated with the bridge root bus * @bus_max: maximum number of buses for this bridge * @resources: list where the range of resources will be added after DT parsing * @ib_resources: list where the range of inbound resources (with addresses * from 'dma-ranges') will be added after DT parsing * @io_base: pointer to a variable that will contain on return the physical * address for the start of the I/O range. Can be NULL if the caller doesn't * expect I/O ranges to be present in the device tree. * * This function will parse the "ranges" property of a PCI host bridge device * node and setup the resource mapping based on its content. It is expected * that the property conforms with the Power ePAPR document. * * It returns zero if the range parsing has been successful or a standard error * value if it failed. */ static int devm_of_pci_get_host_bridge_resources(struct device *dev, unsigned char busno, unsigned char bus_max, struct list_head *resources, struct list_head *ib_resources, resource_size_t *io_base) { struct device_node *dev_node = dev->of_node; struct resource *res, tmp_res; struct resource *bus_range; struct of_pci_range range; struct of_pci_range_parser parser; const char *range_type; int err; if (io_base) *io_base = (resource_size_t)OF_BAD_ADDR; bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL); if (!bus_range) return -ENOMEM; dev_info(dev, "host bridge %pOF ranges:\n", dev_node); err = of_pci_parse_bus_range(dev_node, bus_range); if (err) { bus_range->start = busno; bus_range->end = bus_max; bus_range->flags = IORESOURCE_BUS; dev_info(dev, " No bus range found for %pOF, using %pR\n", dev_node, bus_range); } else { if (bus_range->end > bus_range->start + bus_max) bus_range->end = bus_range->start + bus_max; } pci_add_resource(resources, bus_range); /* Check for ranges property */ err = of_pci_range_parser_init(&parser, dev_node); if (err) return 0; dev_dbg(dev, "Parsing ranges property...\n"); for_each_of_pci_range(&parser, &range) { /* Read next ranges element */ if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO) range_type = "IO"; else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM) range_type = "MEM"; else range_type = "err"; dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n", range_type, range.cpu_addr, range.cpu_addr + range.size - 1, range.pci_addr); /* * If we failed translation or got a zero-sized region * then skip this range */ if (range.cpu_addr == OF_BAD_ADDR || range.size == 0) continue; err = of_pci_range_to_resource(&range, dev_node, &tmp_res); if (err) continue; res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL); if (!res) { err = -ENOMEM; goto failed; } if (resource_type(res) == IORESOURCE_IO) { if (!io_base) { dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n", dev_node); err = -EINVAL; goto failed; } if (*io_base != (resource_size_t)OF_BAD_ADDR) dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n", dev_node); *io_base = range.cpu_addr; } else if (resource_type(res) == IORESOURCE_MEM) { res->flags &= ~IORESOURCE_MEM_64; } pci_add_resource_offset(resources, res, res->start - range.pci_addr); } /* Check for dma-ranges property */ if (!ib_resources) return 0; err = of_pci_dma_range_parser_init(&parser, dev_node); if (err) return 0; dev_dbg(dev, "Parsing dma-ranges property...\n"); for_each_of_pci_range(&parser, &range) { /* * If we failed translation or got a zero-sized region * then skip this range */ if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) || range.cpu_addr == OF_BAD_ADDR || range.size == 0) continue; dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n", "IB MEM", range.cpu_addr, range.cpu_addr + range.size - 1, range.pci_addr); err = of_pci_range_to_resource(&range, dev_node, &tmp_res); if (err) continue; res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL); if (!res) { err = -ENOMEM; goto failed; } pci_add_resource_offset(ib_resources, res, res->start - range.pci_addr); } return 0; failed: pci_free_resource_list(resources); return err; } #if IS_ENABLED(CONFIG_OF_IRQ) /** * of_irq_parse_pci - Resolve the interrupt for a PCI device * @pdev: the device whose interrupt is to be resolved * @out_irq: structure of_phandle_args filled by this function * * This function resolves the PCI interrupt for a given PCI device. If a * device-node exists for a given pci_dev, it will use normal OF tree * walking. If not, it will implement standard swizzling and walk up the * PCI tree until an device-node is found, at which point it will finish * resolving using the OF tree walking. */ static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) { struct device_node *dn, *ppnode = NULL; struct pci_dev *ppdev; __be32 laddr[3]; u8 pin; int rc; /* * Check if we have a device node, if yes, fallback to standard * device tree parsing */ dn = pci_device_to_OF_node(pdev); if (dn) { rc = of_irq_parse_one(dn, 0, out_irq); if (!rc) return rc; } /* * Ok, we don't, time to have fun. Let's start by building up an * interrupt spec. we assume #interrupt-cells is 1, which is standard * for PCI. If you do different, then don't use that routine. */ rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); if (rc != 0) goto err; /* No pin, exit with no error message. */ if (pin == 0) return -ENODEV; /* Local interrupt-map in the device node? Use it! */ if (of_property_present(dn, "interrupt-map")) { pin = pci_swizzle_interrupt_pin(pdev, pin); ppnode = dn; } /* Now we walk up the PCI tree */ while (!ppnode) { /* Get the pci_dev of our parent */ ppdev = pdev->bus->self; /* Ouch, it's a host bridge... */ if (ppdev == NULL) { ppnode = pci_bus_to_OF_node(pdev->bus); /* No node for host bridge ? give up */ if (ppnode == NULL) { rc = -EINVAL; goto err; } } else { /* We found a P2P bridge, check if it has a node */ ppnode = pci_device_to_OF_node(ppdev); } /* * Ok, we have found a parent with a device-node, hand over to * the OF parsing code. * We build a unit address from the linux device to be used for * resolution. Note that we use the linux bus number which may * not match your firmware bus numbering. * Fortunately, in most cases, interrupt-map-mask doesn't * include the bus number as part of the matching. * You should still be careful about that though if you intend * to rely on this function (you ship a firmware that doesn't * create device nodes for all PCI devices). */ if (ppnode) break; /* * We can only get here if we hit a P2P bridge with no node; * let's do standard swizzling and try again */ pin = pci_swizzle_interrupt_pin(pdev, pin); pdev = ppdev; } out_irq->np = ppnode; out_irq->args_count = 1; out_irq->args[0] = pin; laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8)); laddr[1] = laddr[2] = cpu_to_be32(0); rc = of_irq_parse_raw(laddr, out_irq); if (rc) goto err; return 0; err: if (rc == -ENOENT) { dev_warn(&pdev->dev, "%s: no interrupt-map found, INTx interrupts not available\n", __func__); pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n", __func__); } else { dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc); } return rc; } /** * of_irq_parse_and_map_pci() - Decode a PCI IRQ from the device tree and map to a VIRQ * @dev: The PCI device needing an IRQ * @slot: PCI slot number; passed when used as map_irq callback. Unused * @pin: PCI IRQ pin number; passed when used as map_irq callback. Unused * * @slot and @pin are unused, but included in the function so that this * function can be used directly as the map_irq callback to * pci_assign_irq() and struct pci_host_bridge.map_irq pointer */ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) { struct of_phandle_args oirq; int ret; ret = of_irq_parse_pci(dev, &oirq); if (ret) return 0; /* Proper return code 0 == NO_IRQ */ return irq_create_of_mapping(&oirq); } EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci); #endif /* CONFIG_OF_IRQ */ static int pci_parse_request_of_pci_ranges(struct device *dev, struct pci_host_bridge *bridge) { int err, res_valid = 0; resource_size_t iobase; struct resource_entry *win, *tmp; INIT_LIST_HEAD(&bridge->windows); INIT_LIST_HEAD(&bridge->dma_ranges); err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows, &bridge->dma_ranges, &iobase); if (err) return err; err = devm_request_pci_bus_resources(dev, &bridge->windows); if (err) return err; resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { struct resource *res = win->res; switch (resource_type(res)) { case IORESOURCE_IO: err = devm_pci_remap_iospace(dev, res, iobase); if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); resource_list_destroy_entry(win); } break; case IORESOURCE_MEM: res_valid |= !(res->flags & IORESOURCE_PREFETCH); if (!(res->flags & IORESOURCE_PREFETCH)) if (upper_32_bits(resource_size(res))) dev_warn(dev, "Memory resource size exceeds max for 32 bits\n"); break; } } if (!res_valid) dev_warn(dev, "non-prefetchable memory resource required\n"); return 0; } int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge) { if (!dev->of_node) return 0; bridge->swizzle_irq = pci_common_swizzle; bridge->map_irq = of_irq_parse_and_map_pci; return pci_parse_request_of_pci_ranges(dev, bridge); } #ifdef CONFIG_PCI_DYNAMIC_OF_NODES void of_pci_remove_node(struct pci_dev *pdev) { struct device_node *np; np = pci_device_to_OF_node(pdev); if (!np || !of_node_check_flag(np, OF_DYNAMIC)) return; pdev->dev.of_node = NULL; of_changeset_revert(np->data); of_changeset_destroy(np->data); of_node_put(np); } void of_pci_make_dev_node(struct pci_dev *pdev) { struct device_node *ppnode, *np = NULL; const char *pci_type; struct of_changeset *cset; const char *name; int ret; /* * If there is already a device tree node linked to this device, * return immediately. */ if (pci_device_to_OF_node(pdev)) return; /* Check if there is device tree node for parent device */ if (!pdev->bus->self) ppnode = pdev->bus->dev.of_node; else ppnode = pdev->bus->self->dev.of_node; if (!ppnode) return; if (pci_is_bridge(pdev)) pci_type = "pci"; else pci_type = "dev"; name = kasprintf(GFP_KERNEL, "%s@%x,%x", pci_type, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); if (!name) return; cset = kmalloc(sizeof(*cset), GFP_KERNEL); if (!cset) goto failed; of_changeset_init(cset); np = of_changeset_create_node(cset, ppnode, name); if (!np) goto failed; np->data = cset; ret = of_pci_add_properties(pdev, cset, np); if (ret) goto failed; ret = of_changeset_apply(cset); if (ret) goto failed; pdev->dev.of_node = np; kfree(name); return; failed: if (np) of_node_put(np); kfree(name); } #endif #endif /* CONFIG_PCI */ /** * of_pci_get_max_link_speed - Find the maximum link speed of the given device node. * @node: Device tree node with the maximum link speed information. * * This function will try to find the limitation of link speed by finding * a property called "max-link-speed" of the given device node. * * Return: * * > 0 - On success, a maximum link speed. * * -EINVAL - Invalid "max-link-speed" property value, or failure to access * the property of the device tree node. * * Returns the associated max link speed from DT, or a negative value if the * required property is not found or is invalid. */ int of_pci_get_max_link_speed(struct device_node *node) { u32 max_link_speed; if (of_property_read_u32(node, "max-link-speed", &max_link_speed) || max_link_speed == 0 || max_link_speed > 4) return -EINVAL; return max_link_speed; } EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed); /** * of_pci_get_slot_power_limit - Parses the "slot-power-limit-milliwatt" * property. * * @node: device tree node with the slot power limit information * @slot_power_limit_value: pointer where the value should be stored in PCIe * Slot Capabilities Register format * @slot_power_limit_scale: pointer where the scale should be stored in PCIe * Slot Capabilities Register format * * Returns the slot power limit in milliwatts and if @slot_power_limit_value * and @slot_power_limit_scale pointers are non-NULL, fills in the value and * scale in format used by PCIe Slot Capabilities Register. * * If the property is not found or is invalid, returns 0. */ u32 of_pci_get_slot_power_limit(struct device_node *node, u8 *slot_power_limit_value, u8 *slot_power_limit_scale) { u32 slot_power_limit_mw; u8 value, scale; if (of_property_read_u32(node, "slot-power-limit-milliwatt", &slot_power_limit_mw)) slot_power_limit_mw = 0; /* Calculate Slot Power Limit Value and Slot Power Limit Scale */ if (slot_power_limit_mw == 0) { value = 0x00; scale = 0; } else if (slot_power_limit_mw <= 255) { value = slot_power_limit_mw; scale = 3; } else if (slot_power_limit_mw <= 255*10) { value = slot_power_limit_mw / 10; scale = 2; slot_power_limit_mw = slot_power_limit_mw / 10 * 10; } else if (slot_power_limit_mw <= 255*100) { value = slot_power_limit_mw / 100; scale = 1; slot_power_limit_mw = slot_power_limit_mw / 100 * 100; } else if (slot_power_limit_mw <= 239*1000) { value = slot_power_limit_mw / 1000; scale = 0; slot_power_limit_mw = slot_power_limit_mw / 1000 * 1000; } else if (slot_power_limit_mw < 250*1000) { value = 0xEF; scale = 0; slot_power_limit_mw = 239*1000; } else if (slot_power_limit_mw <= 600*1000) { value = 0xF0 + (slot_power_limit_mw / 1000 - 250) / 25; scale = 0; slot_power_limit_mw = slot_power_limit_mw / (1000*25) * (1000*25); } else { value = 0xFE; scale = 0; slot_power_limit_mw = 600*1000; } if (slot_power_limit_value) *slot_power_limit_value = value; if (slot_power_limit_scale) *slot_power_limit_scale = scale; return slot_power_limit_mw; } EXPORT_SYMBOL_GPL(of_pci_get_slot_power_limit);
linux-master
drivers/pci/of.c
// SPDX-License-Identifier: GPL-2.0 /* * Procfs interface for the PCI bus * * Copyright (c) 1997--1999 Martin Mares <[email protected]> */ #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/capability.h> #include <linux/uaccess.h> #include <linux/security.h> #include <asm/byteorder.h> #include "pci.h" static int proc_initialized; /* = 0 */ static loff_t proc_bus_pci_lseek(struct file *file, loff_t off, int whence) { struct pci_dev *dev = pde_data(file_inode(file)); return fixed_size_llseek(file, off, whence, dev->cfg_size); } static ssize_t proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { struct pci_dev *dev = pde_data(file_inode(file)); unsigned int pos = *ppos; unsigned int cnt, size; /* * Normal users can read only the standardized portion of the * configuration space as several chips lock up when trying to read * undefined locations (think of Intel PIIX4 as a typical example). */ if (capable(CAP_SYS_ADMIN)) size = dev->cfg_size; else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) size = 128; else size = 64; if (pos >= size) return 0; if (nbytes >= size) nbytes = size; if (pos + nbytes > size) nbytes = size - pos; cnt = nbytes; if (!access_ok(buf, cnt)) return -EINVAL; pci_config_pm_runtime_get(dev); if ((pos & 1) && cnt) { unsigned char val; pci_user_read_config_byte(dev, pos, &val); __put_user(val, buf); buf++; pos++; cnt--; } if ((pos & 3) && cnt > 2) { unsigned short val; pci_user_read_config_word(dev, pos, &val); __put_user(cpu_to_le16(val), (__le16 __user *) buf); buf += 2; pos += 2; cnt -= 2; } while (cnt >= 4) { unsigned int val; pci_user_read_config_dword(dev, pos, &val); __put_user(cpu_to_le32(val), (__le32 __user *) buf); buf += 4; pos += 4; cnt -= 4; cond_resched(); } if (cnt >= 2) { unsigned short val; pci_user_read_config_word(dev, pos, &val); __put_user(cpu_to_le16(val), (__le16 __user *) buf); buf += 2; pos += 2; cnt -= 2; } if (cnt) { unsigned char val; pci_user_read_config_byte(dev, pos, &val); __put_user(val, buf); pos++; } pci_config_pm_runtime_put(dev); *ppos = pos; return nbytes; } static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, loff_t *ppos) { struct inode *ino = file_inode(file); struct pci_dev *dev = pde_data(ino); int pos = *ppos; int size = dev->cfg_size; int cnt, ret; ret = security_locked_down(LOCKDOWN_PCI_ACCESS); if (ret) return ret; if (pos >= size) return 0; if (nbytes >= size) nbytes = size; if (pos + nbytes > size) nbytes = size - pos; cnt = nbytes; if (!access_ok(buf, cnt)) return -EINVAL; pci_config_pm_runtime_get(dev); if ((pos & 1) && cnt) { unsigned char val; __get_user(val, buf); pci_user_write_config_byte(dev, pos, val); buf++; pos++; cnt--; } if ((pos & 3) && cnt > 2) { __le16 val; __get_user(val, (__le16 __user *) buf); pci_user_write_config_word(dev, pos, le16_to_cpu(val)); buf += 2; pos += 2; cnt -= 2; } while (cnt >= 4) { __le32 val; __get_user(val, (__le32 __user *) buf); pci_user_write_config_dword(dev, pos, le32_to_cpu(val)); buf += 4; pos += 4; cnt -= 4; } if (cnt >= 2) { __le16 val; __get_user(val, (__le16 __user *) buf); pci_user_write_config_word(dev, pos, le16_to_cpu(val)); buf += 2; pos += 2; cnt -= 2; } if (cnt) { unsigned char val; __get_user(val, buf); pci_user_write_config_byte(dev, pos, val); pos++; } pci_config_pm_runtime_put(dev); *ppos = pos; i_size_write(ino, dev->cfg_size); return nbytes; } #ifdef HAVE_PCI_MMAP struct pci_filp_private { enum pci_mmap_state mmap_state; int write_combine; }; #endif /* HAVE_PCI_MMAP */ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct pci_dev *dev = pde_data(file_inode(file)); #ifdef HAVE_PCI_MMAP struct pci_filp_private *fpriv = file->private_data; #endif /* HAVE_PCI_MMAP */ int ret = 0; ret = security_locked_down(LOCKDOWN_PCI_ACCESS); if (ret) return ret; switch (cmd) { case PCIIOC_CONTROLLER: ret = pci_domain_nr(dev->bus); break; #ifdef HAVE_PCI_MMAP case PCIIOC_MMAP_IS_IO: if (!arch_can_pci_mmap_io()) return -EINVAL; fpriv->mmap_state = pci_mmap_io; break; case PCIIOC_MMAP_IS_MEM: fpriv->mmap_state = pci_mmap_mem; break; case PCIIOC_WRITE_COMBINE: if (arch_can_pci_mmap_wc()) { if (arg) fpriv->write_combine = 1; else fpriv->write_combine = 0; break; } /* If arch decided it can't, fall through... */ fallthrough; #endif /* HAVE_PCI_MMAP */ default: ret = -EINVAL; break; } return ret; } #ifdef HAVE_PCI_MMAP static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma) { struct pci_dev *dev = pde_data(file_inode(file)); struct pci_filp_private *fpriv = file->private_data; resource_size_t start, end; int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM; if (!capable(CAP_SYS_RAWIO) || security_locked_down(LOCKDOWN_PCI_ACCESS)) return -EPERM; if (fpriv->mmap_state == pci_mmap_io) { if (!arch_can_pci_mmap_io()) return -EINVAL; res_bit = IORESOURCE_IO; } /* Make sure the caller is mapping a real resource for this device */ for (i = 0; i < PCI_STD_NUM_BARS; i++) { if (dev->resource[i].flags & res_bit && pci_mmap_fits(dev, i, vma, PCI_MMAP_PROCFS)) break; } if (i >= PCI_STD_NUM_BARS) return -ENODEV; if (fpriv->mmap_state == pci_mmap_mem && fpriv->write_combine) { if (dev->resource[i].flags & IORESOURCE_PREFETCH) write_combine = 1; else return -EINVAL; } if (dev->resource[i].flags & IORESOURCE_MEM && iomem_is_exclusive(dev->resource[i].start)) return -EINVAL; pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); /* Adjust vm_pgoff to be the offset within the resource */ vma->vm_pgoff -= start >> PAGE_SHIFT; ret = pci_mmap_resource_range(dev, i, vma, fpriv->mmap_state, write_combine); if (ret < 0) return ret; return 0; } static int proc_bus_pci_open(struct inode *inode, struct file *file) { struct pci_filp_private *fpriv = kmalloc(sizeof(*fpriv), GFP_KERNEL); if (!fpriv) return -ENOMEM; fpriv->mmap_state = pci_mmap_io; fpriv->write_combine = 0; file->private_data = fpriv; file->f_mapping = iomem_get_mapping(); return 0; } static int proc_bus_pci_release(struct inode *inode, struct file *file) { kfree(file->private_data); file->private_data = NULL; return 0; } #endif /* HAVE_PCI_MMAP */ static const struct proc_ops proc_bus_pci_ops = { .proc_lseek = proc_bus_pci_lseek, .proc_read = proc_bus_pci_read, .proc_write = proc_bus_pci_write, .proc_ioctl = proc_bus_pci_ioctl, #ifdef CONFIG_COMPAT .proc_compat_ioctl = proc_bus_pci_ioctl, #endif #ifdef HAVE_PCI_MMAP .proc_open = proc_bus_pci_open, .proc_release = proc_bus_pci_release, .proc_mmap = proc_bus_pci_mmap, #ifdef HAVE_ARCH_PCI_GET_UNMAPPED_AREA .proc_get_unmapped_area = get_pci_unmapped_area, #endif /* HAVE_ARCH_PCI_GET_UNMAPPED_AREA */ #endif /* HAVE_PCI_MMAP */ }; /* iterator */ static void *pci_seq_start(struct seq_file *m, loff_t *pos) { struct pci_dev *dev = NULL; loff_t n = *pos; for_each_pci_dev(dev) { if (!n--) break; } return dev; } static void *pci_seq_next(struct seq_file *m, void *v, loff_t *pos) { struct pci_dev *dev = v; (*pos)++; dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev); return dev; } static void pci_seq_stop(struct seq_file *m, void *v) { if (v) { struct pci_dev *dev = v; pci_dev_put(dev); } } static int show_device(struct seq_file *m, void *v) { const struct pci_dev *dev = v; const struct pci_driver *drv; int i; if (dev == NULL) return 0; drv = pci_dev_driver(dev); seq_printf(m, "%02x%02x\t%04x%04x\t%x", dev->bus->number, dev->devfn, dev->vendor, dev->device, dev->irq); /* only print standard and ROM resources to preserve compatibility */ for (i = 0; i <= PCI_ROM_RESOURCE; i++) { resource_size_t start, end; pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); seq_printf(m, "\t%16llx", (unsigned long long)(start | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); } for (i = 0; i <= PCI_ROM_RESOURCE; i++) { resource_size_t start, end; pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); seq_printf(m, "\t%16llx", dev->resource[i].start < dev->resource[i].end ? (unsigned long long)(end - start) + 1 : 0); } seq_putc(m, '\t'); if (drv) seq_puts(m, drv->name); seq_putc(m, '\n'); return 0; } static const struct seq_operations proc_bus_pci_devices_op = { .start = pci_seq_start, .next = pci_seq_next, .stop = pci_seq_stop, .show = show_device }; static struct proc_dir_entry *proc_bus_pci_dir; int pci_proc_attach_device(struct pci_dev *dev) { struct pci_bus *bus = dev->bus; struct proc_dir_entry *e; char name[16]; if (!proc_initialized) return -EACCES; if (!bus->procdir) { if (pci_proc_domain(bus)) { sprintf(name, "%04x:%02x", pci_domain_nr(bus), bus->number); } else { sprintf(name, "%02x", bus->number); } bus->procdir = proc_mkdir(name, proc_bus_pci_dir); if (!bus->procdir) return -ENOMEM; } sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); e = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir, &proc_bus_pci_ops, dev); if (!e) return -ENOMEM; proc_set_size(e, dev->cfg_size); dev->procent = e; return 0; } int pci_proc_detach_device(struct pci_dev *dev) { proc_remove(dev->procent); dev->procent = NULL; return 0; } int pci_proc_detach_bus(struct pci_bus *bus) { proc_remove(bus->procdir); return 0; } static int __init pci_proc_init(void) { struct pci_dev *dev = NULL; proc_bus_pci_dir = proc_mkdir("bus/pci", NULL); proc_create_seq("devices", 0, proc_bus_pci_dir, &proc_bus_pci_devices_op); proc_initialized = 1; for_each_pci_dev(dev) pci_proc_attach_device(dev); return 0; } device_initcall(pci_proc_init);
linux-master
drivers/pci/proc.c
// SPDX-License-Identifier: GPL-2.0 /* * This file contains work-arounds for many known PCI hardware bugs. * Devices present only on certain architectures (host bridges et cetera) * should be handled in arch-specific code. * * Note: any quirks for hotpluggable devices must _NOT_ be declared __init. * * Copyright (c) 1999 Martin Mares <[email protected]> * * Init/reset quirks for USB host controllers should be in the USB quirks * file, where their drivers can use them. */ #include <linux/bitfield.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/pci.h> #include <linux/isa-dma.h> /* isa_dma_bridge_buggy */ #include <linux/init.h> #include <linux/delay.h> #include <linux/acpi.h> #include <linux/dmi.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/ktime.h> #include <linux/mm.h> #include <linux/nvme.h> #include <linux/platform_data/x86/apple.h> #include <linux/pm_runtime.h> #include <linux/suspend.h> #include <linux/switchtec.h> #include "pci.h" /* * Retrain the link of a downstream PCIe port by hand if necessary. * * This is needed at least where a downstream port of the ASMedia ASM2824 * Gen 3 switch is wired to the upstream port of the Pericom PI7C9X2G304 * Gen 2 switch, and observed with the Delock Riser Card PCI Express x1 > * 2 x PCIe x1 device, P/N 41433, plugged into the SiFive HiFive Unmatched * board. * * In such a configuration the switches are supposed to negotiate the link * speed of preferably 5.0GT/s, falling back to 2.5GT/s. However the link * continues switching between the two speeds indefinitely and the data * link layer never reaches the active state, with link training reported * repeatedly active ~84% of the time. Forcing the target link speed to * 2.5GT/s with the upstream ASM2824 device makes the two switches talk to * each other correctly however. And more interestingly retraining with a * higher target link speed afterwards lets the two successfully negotiate * 5.0GT/s. * * With the ASM2824 we can rely on the otherwise optional Data Link Layer * Link Active status bit and in the failed link training scenario it will * be off along with the Link Bandwidth Management Status indicating that * hardware has changed the link speed or width in an attempt to correct * unreliable link operation. For a port that has been left unconnected * both bits will be clear. So use this information to detect the problem * rather than polling the Link Training bit and watching out for flips or * at least the active status. * * Since the exact nature of the problem isn't known and in principle this * could trigger where an ASM2824 device is downstream rather upstream, * apply this erratum workaround to any downstream ports as long as they * support Link Active reporting and have the Link Control 2 register. * Restrict the speed to 2.5GT/s then with the Target Link Speed field, * request a retrain and wait 200ms for the data link to go up. * * If this turns out successful and we know by the Vendor:Device ID it is * safe to do so, then lift the restriction, letting the devices negotiate * a higher speed. Also check for a similar 2.5GT/s speed restriction the * firmware may have already arranged and lift it with ports that already * report their data link being up. * * Return TRUE if the link has been successfully retrained, otherwise FALSE. */ bool pcie_failed_link_retrain(struct pci_dev *dev) { static const struct pci_device_id ids[] = { { PCI_VDEVICE(ASMEDIA, 0x2824) }, /* ASMedia ASM2824 */ {} }; u16 lnksta, lnkctl2; if (!pci_is_pcie(dev) || !pcie_downstream_port(dev) || !pcie_cap_has_lnkctl2(dev) || !dev->link_active_reporting) return false; pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &lnkctl2); pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); if ((lnksta & (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_DLLLA)) == PCI_EXP_LNKSTA_LBMS) { pci_info(dev, "broken device, retraining non-functional downstream link at 2.5GT/s\n"); lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; lnkctl2 |= PCI_EXP_LNKCTL2_TLS_2_5GT; pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2); if (pcie_retrain_link(dev, false)) { pci_info(dev, "retraining failed\n"); return false; } pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); } if ((lnksta & PCI_EXP_LNKSTA_DLLLA) && (lnkctl2 & PCI_EXP_LNKCTL2_TLS) == PCI_EXP_LNKCTL2_TLS_2_5GT && pci_match_id(ids, dev)) { u32 lnkcap; pci_info(dev, "removing 2.5GT/s downstream link speed restriction\n"); pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap); lnkctl2 &= ~PCI_EXP_LNKCTL2_TLS; lnkctl2 |= lnkcap & PCI_EXP_LNKCAP_SLS; pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, lnkctl2); if (pcie_retrain_link(dev, false)) { pci_info(dev, "retraining failed\n"); return false; } } return true; } static ktime_t fixup_debug_start(struct pci_dev *dev, void (*fn)(struct pci_dev *dev)) { if (initcall_debug) pci_info(dev, "calling %pS @ %i\n", fn, task_pid_nr(current)); return ktime_get(); } static void fixup_debug_report(struct pci_dev *dev, ktime_t calltime, void (*fn)(struct pci_dev *dev)) { ktime_t delta, rettime; unsigned long long duration; rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; if (initcall_debug || duration > 10000) pci_info(dev, "%pS took %lld usecs\n", fn, duration); } static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) { ktime_t calltime; for (; f < end; f++) if ((f->class == (u32) (dev->class >> f->class_shift) || f->class == (u32) PCI_ANY_ID) && (f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { void (*hook)(struct pci_dev *dev); #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS hook = offset_to_ptr(&f->hook_offset); #else hook = f->hook; #endif calltime = fixup_debug_start(dev, hook); hook(dev); fixup_debug_report(dev, calltime, hook); } } extern struct pci_fixup __start_pci_fixups_early[]; extern struct pci_fixup __end_pci_fixups_early[]; extern struct pci_fixup __start_pci_fixups_header[]; extern struct pci_fixup __end_pci_fixups_header[]; extern struct pci_fixup __start_pci_fixups_final[]; extern struct pci_fixup __end_pci_fixups_final[]; extern struct pci_fixup __start_pci_fixups_enable[]; extern struct pci_fixup __end_pci_fixups_enable[]; extern struct pci_fixup __start_pci_fixups_resume[]; extern struct pci_fixup __end_pci_fixups_resume[]; extern struct pci_fixup __start_pci_fixups_resume_early[]; extern struct pci_fixup __end_pci_fixups_resume_early[]; extern struct pci_fixup __start_pci_fixups_suspend[]; extern struct pci_fixup __end_pci_fixups_suspend[]; extern struct pci_fixup __start_pci_fixups_suspend_late[]; extern struct pci_fixup __end_pci_fixups_suspend_late[]; static bool pci_apply_fixup_final_quirks; void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { struct pci_fixup *start, *end; switch (pass) { case pci_fixup_early: start = __start_pci_fixups_early; end = __end_pci_fixups_early; break; case pci_fixup_header: start = __start_pci_fixups_header; end = __end_pci_fixups_header; break; case pci_fixup_final: if (!pci_apply_fixup_final_quirks) return; start = __start_pci_fixups_final; end = __end_pci_fixups_final; break; case pci_fixup_enable: start = __start_pci_fixups_enable; end = __end_pci_fixups_enable; break; case pci_fixup_resume: start = __start_pci_fixups_resume; end = __end_pci_fixups_resume; break; case pci_fixup_resume_early: start = __start_pci_fixups_resume_early; end = __end_pci_fixups_resume_early; break; case pci_fixup_suspend: start = __start_pci_fixups_suspend; end = __end_pci_fixups_suspend; break; case pci_fixup_suspend_late: start = __start_pci_fixups_suspend_late; end = __end_pci_fixups_suspend_late; break; default: /* stupid compiler warning, you would think with an enum... */ return; } pci_do_fixups(dev, start, end); } EXPORT_SYMBOL(pci_fixup_device); static int __init pci_apply_final_quirks(void) { struct pci_dev *dev = NULL; u8 cls = 0; u8 tmp; if (pci_cache_line_size) pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2); pci_apply_fixup_final_quirks = true; for_each_pci_dev(dev) { pci_fixup_device(pci_fixup_final, dev); /* * If arch hasn't set it explicitly yet, use the CLS * value shared by all PCI devices. If there's a * mismatch, fall back to the default value. */ if (!pci_cache_line_size) { pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &tmp); if (!cls) cls = tmp; if (!tmp || cls == tmp) continue; pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n", cls << 2, tmp << 2, pci_dfl_cache_line_size << 2); pci_cache_line_size = pci_dfl_cache_line_size; } } if (!pci_cache_line_size) { pr_info("PCI: CLS %u bytes, default %u\n", cls << 2, pci_dfl_cache_line_size << 2); pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size; } return 0; } fs_initcall_sync(pci_apply_final_quirks); /* * Decoding should be disabled for a PCI device during BAR sizing to avoid * conflict. But doing so may cause problems on host bridge and perhaps other * key system devices. For devices that need to have mmio decoding always-on, * we need to set the dev->mmio_always_on bit. */ static void quirk_mmio_always_on(struct pci_dev *dev) { dev->mmio_always_on = 1; } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_BRIDGE_HOST, 8, quirk_mmio_always_on); /* * The Mellanox Tavor device gives false positive parity errors. Disable * parity error reporting. */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR, pci_disable_parity); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE, pci_disable_parity); /* * Deal with broken BIOSes that neglect to enable passive release, * which can cause problems in combination with the 82441FX/PPro MTRRs */ static void quirk_passive_release(struct pci_dev *dev) { struct pci_dev *d = NULL; unsigned char dlc; /* * We have to make sure a particular bit is set in the PIIX3 * ISA bridge, so we have to go out and find it. */ while ((d = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, d))) { pci_read_config_byte(d, 0x82, &dlc); if (!(dlc & 1<<1)) { pci_info(d, "PIIX3: Enabling Passive Release\n"); dlc |= 1<<1; pci_write_config_byte(d, 0x82, dlc); } } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_passive_release); #ifdef CONFIG_X86_32 /* * The VIA VP2/VP3/MVP3 seem to have some 'features'. There may be a * workaround but VIA don't answer queries. If you happen to have good * contacts at VIA ask them for me please -- Alan * * This appears to be BIOS not version dependent. So presumably there is a * chipset level fix. */ static void quirk_isa_dma_hangs(struct pci_dev *dev) { if (!isa_dma_bridge_buggy) { isa_dma_bridge_buggy = 1; pci_info(dev, "Activating ISA DMA hang workarounds\n"); } } /* * It's not totally clear which chipsets are the problematic ones. We know * 82C586 and 82C596 variants are affected. */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371SB_0, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); #endif #ifdef CONFIG_HAS_IOPORT /* * Intel NM10 "Tiger Point" LPC PM1a_STS.BM_STS must be clear * for some HT machines to use C4 w/o hanging. */ static void quirk_tigerpoint_bm_sts(struct pci_dev *dev) { u32 pmbase; u16 pm1a; pci_read_config_dword(dev, 0x40, &pmbase); pmbase = pmbase & 0xff80; pm1a = inw(pmbase); if (pm1a & 0x10) { pci_info(dev, FW_BUG "Tiger Point LPC.BM_STS cleared\n"); outw(0x10, pmbase); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TGP_LPC, quirk_tigerpoint_bm_sts); #endif /* Chipsets where PCI->PCI transfers vanish or hang */ static void quirk_nopcipci(struct pci_dev *dev) { if ((pci_pci_problems & PCIPCI_FAIL) == 0) { pci_info(dev, "Disabling direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_FAIL; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, quirk_nopcipci); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496, quirk_nopcipci); static void quirk_nopciamd(struct pci_dev *dev) { u8 rev; pci_read_config_byte(dev, 0x08, &rev); if (rev == 0x13) { /* Erratum 24 */ pci_info(dev, "Chipset erratum: Disabling direct PCI/AGP transfers\n"); pci_pci_problems |= PCIAGP_FAIL; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8151_0, quirk_nopciamd); /* Triton requires workarounds to be used by the drivers */ static void quirk_triton(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_TRITON) == 0) { pci_info(dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_TRITON; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437, quirk_triton); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82437VX, quirk_triton); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439, quirk_triton); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82439TX, quirk_triton); /* * VIA Apollo KT133 needs PCI latency patch * Made according to a Windows driver-based patch by George E. Breese; * see PCI Latency Adjust on http://www.viahardware.com/download/viatweak.shtm * Also see http://www.au-ja.org/review-kt133a-1-en.phtml for the info on * which Mr Breese based his work. * * Updated based on further information from the site and also on * information provided by VIA */ static void quirk_vialatency(struct pci_dev *dev) { struct pci_dev *p; u8 busarb; /* * Ok, we have a potential problem chipset here. Now see if we have * a buggy southbridge. */ p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, NULL); if (p != NULL) { /* * 0x40 - 0x4f == 686B, 0x10 - 0x2f == 686A; * thanks Dan Hollis. * Check for buggy part revisions */ if (p->revision < 0x40 || p->revision > 0x42) goto exit; } else { p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, NULL); if (p == NULL) /* No problem parts */ goto exit; /* Check for buggy part revisions */ if (p->revision < 0x10 || p->revision > 0x12) goto exit; } /* * Ok we have the problem. Now set the PCI master grant to occur * every master grant. The apparent bug is that under high PCI load * (quite common in Linux of course) you can get data loss when the * CPU is held off the bus for 3 bus master requests. This happens * to include the IDE controllers.... * * VIA only apply this fix when an SB Live! is present but under * both Linux and Windows this isn't enough, and we have seen * corruption without SB Live! but with things like 3 UDMA IDE * controllers. So we ignore that bit of the VIA recommendation.. */ pci_read_config_byte(dev, 0x76, &busarb); /* * Set bit 4 and bit 5 of byte 76 to 0x01 * "Master priority rotation on every PCI master grant" */ busarb &= ~(1<<5); busarb |= (1<<4); pci_write_config_byte(dev, 0x76, busarb); pci_info(dev, "Applying VIA southbridge workaround\n"); exit: pci_dev_put(p); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency); /* Must restore this on a resume from RAM */ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8363_0, quirk_vialatency); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8371_1, quirk_vialatency); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8361, quirk_vialatency); /* VIA Apollo VP3 needs ETBF on BT848/878 */ static void quirk_viaetbf(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_VIAETBF) == 0) { pci_info(dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_VIAETBF; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_viaetbf); static void quirk_vsfx(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_VSFX) == 0) { pci_info(dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_VSFX; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576, quirk_vsfx); /* * ALi Magik requires workarounds to be used by the drivers that DMA to AGP * space. Latency must be set to 0xA and Triton workaround applied too. * [Info kindly provided by ALi] */ static void quirk_alimagik(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_ALIMAGIK) == 0) { pci_info(dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_ALIMAGIK|PCIPCI_TRITON; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1647, quirk_alimagik); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1651, quirk_alimagik); /* Natoma has some interesting boundary conditions with Zoran stuff at least */ static void quirk_natoma(struct pci_dev *dev) { if ((pci_pci_problems&PCIPCI_NATOMA) == 0) { pci_info(dev, "Limiting direct PCI/PCI transfers\n"); pci_pci_problems |= PCIPCI_NATOMA; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_0, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443LX_1, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_1, quirk_natoma); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2, quirk_natoma); /* * This chip can cause PCI parity errors if config register 0xA0 is read * while DMAs are occurring. */ static void quirk_citrine(struct pci_dev *dev) { dev->cfg_size = 0xA0; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, quirk_citrine); /* * This chip can cause bus lockups if config addresses above 0x600 * are read or written. */ static void quirk_nfp6000(struct pci_dev *dev) { dev->cfg_size = 0x600; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP4000, quirk_nfp6000); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000, quirk_nfp6000); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP5000, quirk_nfp6000); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NETRONOME_NFP6000_VF, quirk_nfp6000); /* On IBM Crocodile ipr SAS adapters, expand BAR to system page size */ static void quirk_extend_bar_to_page(struct pci_dev *dev) { int i; for (i = 0; i < PCI_STD_NUM_BARS; i++) { struct resource *r = &dev->resource[i]; if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { r->end = PAGE_SIZE - 1; r->start = 0; r->flags |= IORESOURCE_UNSET; pci_info(dev, "expanded BAR %d to page size: %pR\n", i, r); } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page); /* * S3 868 and 968 chips report region size equal to 32M, but they decode 64M. * If it's needed, re-allocate the region. */ static void quirk_s3_64M(struct pci_dev *dev) { struct resource *r = &dev->resource[0]; if ((r->start & 0x3ffffff) || r->end != r->start + 0x3ffffff) { r->flags |= IORESOURCE_UNSET; r->start = 0; r->end = 0x3ffffff; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_868, quirk_s3_64M); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_S3, PCI_DEVICE_ID_S3_968, quirk_s3_64M); static void quirk_io(struct pci_dev *dev, int pos, unsigned int size, const char *name) { u32 region; struct pci_bus_region bus_region; struct resource *res = dev->resource + pos; pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + (pos << 2), &region); if (!region) return; res->name = pci_name(dev); res->flags = region & ~PCI_BASE_ADDRESS_IO_MASK; res->flags |= (IORESOURCE_IO | IORESOURCE_PCI_FIXED | IORESOURCE_SIZEALIGN); region &= ~(size - 1); /* Convert from PCI bus to resource space */ bus_region.start = region; bus_region.end = region + size - 1; pcibios_bus_to_resource(dev->bus, res, &bus_region); pci_info(dev, FW_BUG "%s quirk: reg 0x%x: %pR\n", name, PCI_BASE_ADDRESS_0 + (pos << 2), res); } /* * Some CS5536 BIOSes (for example, the Soekris NET5501 board w/ comBIOS * ver. 1.33 20070103) don't set the correct ISA PCI region header info. * BAR0 should be 8 bytes; instead, it may be set to something like 8k * (which conflicts w/ BAR1's memory range). * * CS553x's ISA PCI BARs may also be read-only (ref: * https://bugzilla.kernel.org/show_bug.cgi?id=85991 - Comment #4 forward). */ static void quirk_cs5536_vsa(struct pci_dev *dev) { static char *name = "CS5536 ISA bridge"; if (pci_resource_len(dev, 0) != 8) { quirk_io(dev, 0, 8, name); /* SMB */ quirk_io(dev, 1, 256, name); /* GPIO */ quirk_io(dev, 2, 64, name); /* MFGPT */ pci_info(dev, "%s bug detected (incorrect header); workaround applied\n", name); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA, quirk_cs5536_vsa); static void quirk_io_region(struct pci_dev *dev, int port, unsigned int size, int nr, const char *name) { u16 region; struct pci_bus_region bus_region; struct resource *res = dev->resource + nr; pci_read_config_word(dev, port, &region); region &= ~(size - 1); if (!region) return; res->name = pci_name(dev); res->flags = IORESOURCE_IO; /* Convert from PCI bus to resource space */ bus_region.start = region; bus_region.end = region + size - 1; pcibios_bus_to_resource(dev->bus, res, &bus_region); if (!pci_claim_resource(dev, nr)) pci_info(dev, "quirk: %pR claimed by %s\n", res, name); } /* * ATI Northbridge setups MCE the processor if you even read somewhere * between 0x3b0->0x3bb or read 0x3d3 */ static void quirk_ati_exploding_mce(struct pci_dev *dev) { pci_info(dev, "ATI Northbridge, reserving I/O ports 0x3b0 to 0x3bb\n"); /* Mae rhaid i ni beidio ag edrych ar y lleoliadiau I/O hyn */ request_region(0x3b0, 0x0C, "RadeonIGP"); request_region(0x3d3, 0x01, "RadeonIGP"); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS100, quirk_ati_exploding_mce); /* * In the AMD NL platform, this device ([1022:7912]) has a class code of * PCI_CLASS_SERIAL_USB_XHCI (0x0c0330), which means the xhci driver will * claim it. * * But the dwc3 driver is a more specific driver for this device, and we'd * prefer to use it instead of xhci. To prevent xhci from claiming the * device, change the class code to 0x0c03fe, which the PCI r3.0 spec * defines as "USB device (not host controller)". The dwc3 driver can then * claim it based on its Vendor and Device ID. */ static void quirk_amd_nl_class(struct pci_dev *pdev) { u32 class = pdev->class; /* Use "USB Device (not host controller)" class */ pdev->class = PCI_CLASS_SERIAL_USB_DEVICE; pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n", class, pdev->class); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB, quirk_amd_nl_class); /* * Synopsys USB 3.x host HAPS platform has a class code of * PCI_CLASS_SERIAL_USB_XHCI, and xhci driver can claim it. However, these * devices should use dwc3-haps driver. Change these devices' class code to * PCI_CLASS_SERIAL_USB_DEVICE to prevent the xhci-pci driver from claiming * them. */ static void quirk_synopsys_haps(struct pci_dev *pdev) { u32 class = pdev->class; switch (pdev->device) { case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3: case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3_AXI: case PCI_DEVICE_ID_SYNOPSYS_HAPSUSB31: pdev->class = PCI_CLASS_SERIAL_USB_DEVICE; pci_info(pdev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n", class, pdev->class); break; } } DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, PCI_CLASS_SERIAL_USB_XHCI, 0, quirk_synopsys_haps); /* * Let's make the southbridge information explicit instead of having to * worry about people probing the ACPI areas, for example.. (Yes, it * happens, and if you read the wrong ACPI register it will put the machine * to sleep with no way of waking it up again. Bummer). * * ALI M7101: Two IO regions pointed to by words at * 0xE0 (64 bytes of ACPI registers) * 0xE2 (32 bytes of SMB registers) */ static void quirk_ali7101_acpi(struct pci_dev *dev) { quirk_io_region(dev, 0xE0, 64, PCI_BRIDGE_RESOURCES, "ali7101 ACPI"); quirk_io_region(dev, 0xE2, 32, PCI_BRIDGE_RESOURCES+1, "ali7101 SMB"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, quirk_ali7101_acpi); static void piix4_io_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) { u32 devres; u32 mask, size, base; pci_read_config_dword(dev, port, &devres); if ((devres & enable) != enable) return; mask = (devres >> 16) & 15; base = devres & 0xffff; size = 16; for (;;) { unsigned int bit = size >> 1; if ((bit & mask) == bit) break; size = bit; } /* * For now we only print it out. Eventually we'll want to * reserve it (at least if it's in the 0x1000+ range), but * let's get enough confirmation reports first. */ base &= -size; pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base + size - 1); } static void piix4_mem_quirk(struct pci_dev *dev, const char *name, unsigned int port, unsigned int enable) { u32 devres; u32 mask, size, base; pci_read_config_dword(dev, port, &devres); if ((devres & enable) != enable) return; base = devres & 0xffff0000; mask = (devres & 0x3f) << 16; size = 128 << 16; for (;;) { unsigned int bit = size >> 1; if ((bit & mask) == bit) break; size = bit; } /* * For now we only print it out. Eventually we'll want to * reserve it, but let's get enough confirmation reports first. */ base &= -size; pci_info(dev, "%s MMIO at %04x-%04x\n", name, base, base + size - 1); } /* * PIIX4 ACPI: Two IO regions pointed to by longwords at * 0x40 (64 bytes of ACPI registers) * 0x90 (16 bytes of SMB registers) * and a few strange programmable PIIX4 device resources. */ static void quirk_piix4_acpi(struct pci_dev *dev) { u32 res_a; quirk_io_region(dev, 0x40, 64, PCI_BRIDGE_RESOURCES, "PIIX4 ACPI"); quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+1, "PIIX4 SMB"); /* Device resource A has enables for some of the other ones */ pci_read_config_dword(dev, 0x5c, &res_a); piix4_io_quirk(dev, "PIIX4 devres B", 0x60, 3 << 21); piix4_io_quirk(dev, "PIIX4 devres C", 0x64, 3 << 21); /* Device resource D is just bitfields for static resources */ /* Device 12 enabled? */ if (res_a & (1 << 29)) { piix4_io_quirk(dev, "PIIX4 devres E", 0x68, 1 << 20); piix4_mem_quirk(dev, "PIIX4 devres F", 0x6c, 1 << 7); } /* Device 13 enabled? */ if (res_a & (1 << 30)) { piix4_io_quirk(dev, "PIIX4 devres G", 0x70, 1 << 20); piix4_mem_quirk(dev, "PIIX4 devres H", 0x74, 1 << 7); } piix4_io_quirk(dev, "PIIX4 devres I", 0x78, 1 << 20); piix4_io_quirk(dev, "PIIX4 devres J", 0x7c, 1 << 20); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, quirk_piix4_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3, quirk_piix4_acpi); #define ICH_PMBASE 0x40 #define ICH_ACPI_CNTL 0x44 #define ICH4_ACPI_EN 0x10 #define ICH6_ACPI_EN 0x80 #define ICH4_GPIOBASE 0x58 #define ICH4_GPIO_CNTL 0x5c #define ICH4_GPIO_EN 0x10 #define ICH6_GPIOBASE 0x48 #define ICH6_GPIO_CNTL 0x4c #define ICH6_GPIO_EN 0x10 /* * ICH4, ICH4-M, ICH5, ICH5-M ACPI: Three IO regions pointed to by longwords at * 0x40 (128 bytes of ACPI, GPIO & TCO registers) * 0x58 (64 bytes of GPIO I/O space) */ static void quirk_ich4_lpc_acpi(struct pci_dev *dev) { u8 enable; /* * The check for PCIBIOS_MIN_IO is to ensure we won't create a conflict * with low legacy (and fixed) ports. We don't know the decoding * priority and can't tell whether the legacy device or the one created * here is really at that address. This happens on boards with broken * BIOSes. */ pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); if (enable & ICH4_ACPI_EN) quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES, "ICH4 ACPI/GPIO/TCO"); pci_read_config_byte(dev, ICH4_GPIO_CNTL, &enable); if (enable & ICH4_GPIO_EN) quirk_io_region(dev, ICH4_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1, "ICH4 GPIO"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_10, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, quirk_ich4_lpc_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, quirk_ich4_lpc_acpi); static void ich6_lpc_acpi_gpio(struct pci_dev *dev) { u8 enable; pci_read_config_byte(dev, ICH_ACPI_CNTL, &enable); if (enable & ICH6_ACPI_EN) quirk_io_region(dev, ICH_PMBASE, 128, PCI_BRIDGE_RESOURCES, "ICH6 ACPI/GPIO/TCO"); pci_read_config_byte(dev, ICH6_GPIO_CNTL, &enable); if (enable & ICH6_GPIO_EN) quirk_io_region(dev, ICH6_GPIOBASE, 64, PCI_BRIDGE_RESOURCES+1, "ICH6 GPIO"); } static void ich6_lpc_generic_decode(struct pci_dev *dev, unsigned int reg, const char *name, int dynsize) { u32 val; u32 size, base; pci_read_config_dword(dev, reg, &val); /* Enabled? */ if (!(val & 1)) return; base = val & 0xfffc; if (dynsize) { /* * This is not correct. It is 16, 32 or 64 bytes depending on * register D31:F0:ADh bits 5:4. * * But this gets us at least _part_ of it. */ size = 16; } else { size = 128; } base &= ~(size-1); /* * Just print it out for now. We should reserve it after more * debugging. */ pci_info(dev, "%s PIO at %04x-%04x\n", name, base, base+size-1); } static void quirk_ich6_lpc(struct pci_dev *dev) { /* Shared ACPI/GPIO decode with all ICH6+ */ ich6_lpc_acpi_gpio(dev); /* ICH6-specific generic IO decode */ ich6_lpc_generic_decode(dev, 0x84, "LPC Generic IO decode 1", 0); ich6_lpc_generic_decode(dev, 0x88, "LPC Generic IO decode 2", 1); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, quirk_ich6_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, quirk_ich6_lpc); static void ich7_lpc_generic_decode(struct pci_dev *dev, unsigned int reg, const char *name) { u32 val; u32 mask, base; pci_read_config_dword(dev, reg, &val); /* Enabled? */ if (!(val & 1)) return; /* IO base in bits 15:2, mask in bits 23:18, both are dword-based */ base = val & 0xfffc; mask = (val >> 16) & 0xfc; mask |= 3; /* * Just print it out for now. We should reserve it after more * debugging. */ pci_info(dev, "%s PIO at %04x (mask %04x)\n", name, base, mask); } /* ICH7-10 has the same common LPC generic IO decode registers */ static void quirk_ich7_lpc(struct pci_dev *dev) { /* We share the common ACPI/GPIO decode with ICH6 */ ich6_lpc_acpi_gpio(dev); /* And have 4 ICH7+ generic decodes */ ich7_lpc_generic_decode(dev, 0x84, "ICH7 LPC Generic IO decode 1"); ich7_lpc_generic_decode(dev, 0x88, "ICH7 LPC Generic IO decode 2"); ich7_lpc_generic_decode(dev, 0x8c, "ICH7 LPC Generic IO decode 3"); ich7_lpc_generic_decode(dev, 0x90, "ICH7 LPC Generic IO decode 4"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_0, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_2, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_3, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_2, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_4, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_8, quirk_ich7_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_1, quirk_ich7_lpc); /* * VIA ACPI: One IO region pointed to by longword at * 0x48 or 0x20 (256 bytes of ACPI registers) */ static void quirk_vt82c586_acpi(struct pci_dev *dev) { if (dev->revision & 0x10) quirk_io_region(dev, 0x48, 256, PCI_BRIDGE_RESOURCES, "vt82c586 ACPI"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_vt82c586_acpi); /* * VIA VT82C686 ACPI: Three IO region pointed to by (long)words at * 0x48 (256 bytes of ACPI registers) * 0x70 (128 bytes of hardware monitoring register) * 0x90 (16 bytes of SMB registers) */ static void quirk_vt82c686_acpi(struct pci_dev *dev) { quirk_vt82c586_acpi(dev); quirk_io_region(dev, 0x70, 128, PCI_BRIDGE_RESOURCES+1, "vt82c686 HW-mon"); quirk_io_region(dev, 0x90, 16, PCI_BRIDGE_RESOURCES+2, "vt82c686 SMB"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi); /* * VIA VT8235 ISA Bridge: Two IO regions pointed to by words at * 0x88 (128 bytes of power management registers) * 0xd0 (16 bytes of SMB registers) */ static void quirk_vt8235_acpi(struct pci_dev *dev) { quirk_io_region(dev, 0x88, 128, PCI_BRIDGE_RESOURCES, "vt8235 PM"); quirk_io_region(dev, 0xd0, 16, PCI_BRIDGE_RESOURCES+1, "vt8235 SMB"); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_vt8235_acpi); /* * TI XIO2000a PCIe-PCI Bridge erroneously reports it supports fast * back-to-back: Disable fast back-to-back on the secondary bus segment */ static void quirk_xio2000a(struct pci_dev *dev) { struct pci_dev *pdev; u16 command; pci_warn(dev, "TI XIO2000a quirk detected; secondary bus fast back-to-back transfers disabled\n"); list_for_each_entry(pdev, &dev->subordinate->devices, bus_list) { pci_read_config_word(pdev, PCI_COMMAND, &command); if (command & PCI_COMMAND_FAST_BACK) pci_write_config_word(pdev, PCI_COMMAND, command & ~PCI_COMMAND_FAST_BACK); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XIO2000A, quirk_xio2000a); #ifdef CONFIG_X86_IO_APIC #include <asm/io_apic.h> /* * VIA 686A/B: If an IO-APIC is active, we need to route all on-chip * devices to the external APIC. * * TODO: When we have device-specific interrupt routers, this code will go * away from quirks. */ static void quirk_via_ioapic(struct pci_dev *dev) { u8 tmp; if (nr_ioapics < 1) tmp = 0; /* nothing routed to external APIC */ else tmp = 0x1f; /* all known bits (4-0) routed to external APIC */ pci_info(dev, "%s VIA external APIC routing\n", tmp ? "Enabling" : "Disabling"); /* Offset 0x58: External APIC IRQ output control */ pci_write_config_byte(dev, 0x58, tmp); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_ioapic); /* * VIA 8237: Some BIOSes don't set the 'Bypass APIC De-Assert Message' Bit. * This leads to doubled level interrupt rates. * Set this bit to get rid of cycle wastage. * Otherwise uncritical. */ static void quirk_via_vt8237_bypass_apic_deassert(struct pci_dev *dev) { u8 misc_control2; #define BYPASS_APIC_DEASSERT 8 pci_read_config_byte(dev, 0x5B, &misc_control2); if (!(misc_control2 & BYPASS_APIC_DEASSERT)) { pci_info(dev, "Bypassing VIA 8237 APIC De-Assert Message\n"); pci_write_config_byte(dev, 0x5B, misc_control2|BYPASS_APIC_DEASSERT); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_vt8237_bypass_apic_deassert); /* * The AMD IO-APIC can hang the box when an APIC IRQ is masked. * We check all revs >= B0 (yet not in the pre production!) as the bug * is currently marked NoFix * * We have multiple reports of hangs with this chipset that went away with * noapic specified. For the moment we assume it's the erratum. We may be wrong * of course. However the advice is demonstrably good even if so. */ static void quirk_amd_ioapic(struct pci_dev *dev) { if (dev->revision >= 0x02) { pci_warn(dev, "I/O APIC: AMD Erratum #22 may be present. In the event of instability try\n"); pci_warn(dev, " : booting with the \"noapic\" option\n"); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic); #endif /* CONFIG_X86_IO_APIC */ #if defined(CONFIG_ARM64) && defined(CONFIG_PCI_ATS) static void quirk_cavium_sriov_rnm_link(struct pci_dev *dev) { /* Fix for improper SR-IOV configuration on Cavium cn88xx RNM device */ if (dev->subsystem_device == 0xa118) dev->sriov->link = dev->devfn; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CAVIUM, 0xa018, quirk_cavium_sriov_rnm_link); #endif /* * Some settings of MMRBC can lead to data corruption so block changes. * See AMD 8131 HyperTransport PCI-X Tunnel Revision Guide */ static void quirk_amd_8131_mmrbc(struct pci_dev *dev) { if (dev->subordinate && dev->revision <= 0x12) { pci_info(dev, "AMD8131 rev %x detected; disabling PCI-X MMRBC\n", dev->revision); dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MMRBC; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_amd_8131_mmrbc); /* * FIXME: it is questionable that quirk_via_acpi() is needed. It shows up * as an ISA bridge, and does not support the PCI_INTERRUPT_LINE register * at all. Therefore it seems like setting the pci_dev's IRQ to the value * of the ACPI SCI interrupt is only done for convenience. * -jgarzik */ static void quirk_via_acpi(struct pci_dev *d) { u8 irq; /* VIA ACPI device: SCI IRQ line in PCI config byte 0x42 */ pci_read_config_byte(d, 0x42, &irq); irq &= 0xf; if (irq && (irq != 2)) d->irq = irq; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi); /* VIA bridges which have VLink */ static int via_vlink_dev_lo = -1, via_vlink_dev_hi = 18; static void quirk_via_bridge(struct pci_dev *dev) { /* See what bridge we have and find the device ranges */ switch (dev->device) { case PCI_DEVICE_ID_VIA_82C686: /* * The VT82C686 is special; it attaches to PCI and can have * any device number. All its subdevices are functions of * that single device. */ via_vlink_dev_lo = PCI_SLOT(dev->devfn); via_vlink_dev_hi = PCI_SLOT(dev->devfn); break; case PCI_DEVICE_ID_VIA_8237: case PCI_DEVICE_ID_VIA_8237A: via_vlink_dev_lo = 15; break; case PCI_DEVICE_ID_VIA_8235: via_vlink_dev_lo = 16; break; case PCI_DEVICE_ID_VIA_8231: case PCI_DEVICE_ID_VIA_8233_0: case PCI_DEVICE_ID_VIA_8233A: case PCI_DEVICE_ID_VIA_8233C_0: via_vlink_dev_lo = 17; break; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233_0, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233A, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8233C_0, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, quirk_via_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237A, quirk_via_bridge); /* * quirk_via_vlink - VIA VLink IRQ number update * @dev: PCI device * * If the device we are dealing with is on a PIC IRQ we need to ensure that * the IRQ line register which usually is not relevant for PCI cards, is * actually written so that interrupts get sent to the right place. * * We only do this on systems where a VIA south bridge was detected, and * only for VIA devices on the motherboard (see quirk_via_bridge above). */ static void quirk_via_vlink(struct pci_dev *dev) { u8 irq, new_irq; /* Check if we have VLink at all */ if (via_vlink_dev_lo == -1) return; new_irq = dev->irq; /* Don't quirk interrupts outside the legacy IRQ range */ if (!new_irq || new_irq > 15) return; /* Internal device ? */ if (dev->bus->number != 0 || PCI_SLOT(dev->devfn) > via_vlink_dev_hi || PCI_SLOT(dev->devfn) < via_vlink_dev_lo) return; /* * This is an internal VLink device on a PIC interrupt. The BIOS * ought to have set this but may not have, so we redo it. */ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); if (new_irq != irq) { pci_info(dev, "VIA VLink IRQ fixup, from %d to %d\n", irq, new_irq); udelay(15); /* unknown if delay really needed */ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); } } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_ANY_ID, quirk_via_vlink); /* * VIA VT82C598 has its device ID settable and many BIOSes set it to the ID * of VT82C597 for backward compatibility. We need to switch it off to be * able to recognize the real type of the chip. */ static void quirk_vt82c598_id(struct pci_dev *dev) { pci_write_config_byte(dev, 0xfc, 0); pci_read_config_word(dev, PCI_DEVICE_ID, &dev->device); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C597_0, quirk_vt82c598_id); /* * CardBus controllers have a legacy base address that enables them to * respond as i82365 pcmcia controllers. We don't want them to do this * even if the Linux CardBus driver is not loaded, because the Linux i82365 * driver does not (and should not) handle CardBus. */ static void quirk_cardbus_legacy(struct pci_dev *dev) { pci_write_config_dword(dev, PCI_CB_LEGACY_MODE_BASE, 0); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy); DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_BRIDGE_CARDBUS, 8, quirk_cardbus_legacy); /* * Following the PCI ordering rules is optional on the AMD762. I'm not sure * what the designers were smoking but let's not inhale... * * To be fair to AMD, it follows the spec by default, it's BIOS people who * turn it off! */ static void quirk_amd_ordering(struct pci_dev *dev) { u32 pcic; pci_read_config_dword(dev, 0x4C, &pcic); if ((pcic & 6) != 6) { pcic |= 6; pci_warn(dev, "BIOS failed to enable PCI standards compliance; fixing this error\n"); pci_write_config_dword(dev, 0x4C, pcic); pci_read_config_dword(dev, 0x84, &pcic); pcic |= (1 << 23); /* Required in this mode */ pci_write_config_dword(dev, 0x84, pcic); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering); /* * DreamWorks-provided workaround for Dunord I-3000 problem * * This card decodes and responds to addresses not apparently assigned to * it. We force a larger allocation to ensure that nothing gets put too * close to it. */ static void quirk_dunord(struct pci_dev *dev) { struct resource *r = &dev->resource[1]; r->flags |= IORESOURCE_UNSET; r->start = 0; r->end = 0xffffff; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DUNORD, PCI_DEVICE_ID_DUNORD_I3000, quirk_dunord); /* * i82380FB mobile docking controller: its PCI-to-PCI bridge is subtractive * decoding (transparent), and does indicate this in the ProgIf. * Unfortunately, the ProgIf value is wrong - 0x80 instead of 0x01. */ static void quirk_transparent_bridge(struct pci_dev *dev) { dev->transparent = 1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82380FB, quirk_transparent_bridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA, 0x605, quirk_transparent_bridge); /* * Common misconfiguration of the MediaGX/Geode PCI master that will reduce * PCI bandwidth from 70MB/s to 25MB/s. See the GXM/GXLV/GX1 datasheets * found at http://www.national.com/analog for info on what these bits do. * <[email protected]> */ static void quirk_mediagx_master(struct pci_dev *dev) { u8 reg; pci_read_config_byte(dev, 0x41, &reg); if (reg & 2) { reg &= ~2; pci_info(dev, "Fixup for MediaGX/Geode Slave Disconnect Boundary (0x41=0x%02x)\n", reg); pci_write_config_byte(dev, 0x41, reg); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_PCI_MASTER, quirk_mediagx_master); /* * Ensure C0 rev restreaming is off. This is normally done by the BIOS but * in the odd case it is not the results are corruption hence the presence * of a Linux check. */ static void quirk_disable_pxb(struct pci_dev *pdev) { u16 config; if (pdev->revision != 0x04) /* Only C0 requires this */ return; pci_read_config_word(pdev, 0x40, &config); if (config & (1<<6)) { config &= ~(1<<6); pci_write_config_word(pdev, 0x40, config); pci_info(pdev, "C0 revision 450NX. Disabling PCI restreaming\n"); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, quirk_disable_pxb); static void quirk_amd_ide_mode(struct pci_dev *pdev) { /* set SBX00/Hudson-2 SATA in IDE mode to AHCI mode */ u8 tmp; pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &tmp); if (tmp == 0x01) { pci_read_config_byte(pdev, 0x40, &tmp); pci_write_config_byte(pdev, 0x40, tmp|1); pci_write_config_byte(pdev, 0x9, 1); pci_write_config_byte(pdev, 0xa, 6); pci_write_config_byte(pdev, 0x40, tmp); pdev->class = PCI_CLASS_STORAGE_SATA_AHCI; pci_info(pdev, "set SATA to AHCI mode\n"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode); /* Serverworks CSB5 IDE does not fully support native mode */ static void quirk_svwks_csb5ide(struct pci_dev *pdev) { u8 prog; pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); if (prog & 5) { prog &= ~5; pdev->class &= ~5; pci_write_config_byte(pdev, PCI_CLASS_PROG, prog); /* PCI layer will sort out resources */ } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, quirk_svwks_csb5ide); /* Intel 82801CAM ICH3-M datasheet says IDE modes must be the same */ static void quirk_ide_samemode(struct pci_dev *pdev) { u8 prog; pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); if (((prog & 1) && !(prog & 4)) || ((prog & 4) && !(prog & 1))) { pci_info(pdev, "IDE mode mismatch; forcing legacy mode\n"); prog &= ~5; pdev->class &= ~5; pci_write_config_byte(pdev, PCI_CLASS_PROG, prog); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode); /* Some ATA devices break if put into D3 */ static void quirk_no_ata_d3(struct pci_dev *pdev) { pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; } /* Quirk the legacy ATA devices only. The AHCI ones are ok */ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3); /* ALi loses some register settings that we cannot then restore */ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3); /* VIA comes back fine but we need to keep it alive or ACPI GTM failures occur when mode detecting */ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_no_ata_d3); /* * This was originally an Alpha-specific thing, but it really fits here. * The i82375 PCI/EISA bridge appears as non-classified. Fix that. */ static void quirk_eisa_bridge(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_EISA << 8; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82375, quirk_eisa_bridge); /* * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge * is not activated. The myth is that Asus said that they do not want the * users to be irritated by just another PCI Device in the Win98 device * manager. (see the file prog/hotplug/README.p4b in the lm_sensors * package 2.7.0 for details) * * The SMBus PCI Device can be activated by setting a bit in the ICH LPC * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it * becomes necessary to do this tweak in two steps -- the chosen trigger * is either the Host bridge (preferred) or on-board VGA controller. * * Note that we used to unhide the SMBus that way on Toshiba laptops * (Satellite A40 and Tecra M2) but then found that the thermal management * was done by SMM code, which could cause unsynchronized concurrent * accesses to the SMBus registers, with potentially bad effects. Thus you * should be very careful when adding new entries: if SMM is accessing the * Intel SMBus, this is a very good reason to leave it hidden. * * Likewise, many recent laptops use ACPI for thermal management. If the * ACPI DSDT code accesses the SMBus, then Linux should not access it * natively, and keeping the SMBus hidden is the right thing to do. If you * are about to add an entry in the table below, please first disassemble * the DSDT and double-check that there is no code accessing the SMBus. */ static int asus_hides_smbus; static void asus_hides_smbus_hostbridge(struct pci_dev *dev) { if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) { if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB) switch (dev->subsystem_device) { case 0x8025: /* P4B-LX */ case 0x8070: /* P4B */ case 0x8088: /* P4B533 */ case 0x1626: /* L3C notebook */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) switch (dev->subsystem_device) { case 0x80b1: /* P4GE-V */ case 0x80b2: /* P4PE */ case 0x8093: /* P4B533-V */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82850_HB) switch (dev->subsystem_device) { case 0x8030: /* P4T533 */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_7205_0) switch (dev->subsystem_device) { case 0x8070: /* P4G8X Deluxe */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_E7501_MCH) switch (dev->subsystem_device) { case 0x80c9: /* PU-DLS */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82855GM_HB) switch (dev->subsystem_device) { case 0x1751: /* M2N notebook */ case 0x1821: /* M5N notebook */ case 0x1897: /* A6L notebook */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch (dev->subsystem_device) { case 0x184b: /* W1N notebook */ case 0x186a: /* M6Ne notebook */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB) switch (dev->subsystem_device) { case 0x80f2: /* P4P800-X */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB) switch (dev->subsystem_device) { case 0x1882: /* M6V notebook */ case 0x1977: /* A6VA notebook */ asus_hides_smbus = 1; } } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_HP)) { if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch (dev->subsystem_device) { case 0x088C: /* HP Compaq nc8000 */ case 0x0890: /* HP Compaq nc6000 */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82865_HB) switch (dev->subsystem_device) { case 0x12bc: /* HP D330L */ case 0x12bd: /* HP D530 */ case 0x006a: /* HP Compaq nx9500 */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82875_HB) switch (dev->subsystem_device) { case 0x12bf: /* HP xw4100 */ asus_hides_smbus = 1; } } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG)) { if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch (dev->subsystem_device) { case 0xC00C: /* Samsung P35 notebook */ asus_hides_smbus = 1; } } else if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ)) { if (dev->device == PCI_DEVICE_ID_INTEL_82855PM_HB) switch (dev->subsystem_device) { case 0x0058: /* Compaq Evo N620c */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82810_IG3) switch (dev->subsystem_device) { case 0xB16C: /* Compaq Deskpro EP 401963-001 (PCA# 010174) */ /* Motherboard doesn't have Host bridge * subvendor/subdevice IDs, therefore checking * its on-board VGA controller */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82801DB_2) switch (dev->subsystem_device) { case 0x00b8: /* Compaq Evo D510 CMT */ case 0x00b9: /* Compaq Evo D510 SFF */ case 0x00ba: /* Compaq Evo D510 USDT */ /* Motherboard doesn't have Host bridge * subvendor/subdevice IDs and on-board VGA * controller is disabled if an AGP card is * inserted, therefore checking USB UHCI * Controller #1 */ asus_hides_smbus = 1; } else if (dev->device == PCI_DEVICE_ID_INTEL_82815_CGC) switch (dev->subsystem_device) { case 0x001A: /* Compaq Deskpro EN SSF P667 815E */ /* Motherboard doesn't have host bridge * subvendor/subdevice IDs, therefore checking * its on-board VGA controller */ asus_hides_smbus = 1; } } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82845G_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82850_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7501_MCH, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855PM_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82855GM_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82915GM_HB, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82810_IG3, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_2, asus_hides_smbus_hostbridge); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, asus_hides_smbus_hostbridge); static void asus_hides_smbus_lpc(struct pci_dev *dev) { u16 val; if (likely(!asus_hides_smbus)) return; pci_read_config_word(dev, 0xF2, &val); if (val & 0x8) { pci_write_config_word(dev, 0xF2, val & (~0x8)); pci_read_config_word(dev, 0xF2, &val); if (val & 0x8) pci_info(dev, "i801 SMBus device continues to play 'hide and seek'! 0x%x\n", val); else pci_info(dev, "Enabled i801 SMBus device\n"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, asus_hides_smbus_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, asus_hides_smbus_lpc); /* It appears we just have one such device. If not, we have a warning */ static void __iomem *asus_rcba_base; static void asus_hides_smbus_lpc_ich6_suspend(struct pci_dev *dev) { u32 rcba; if (likely(!asus_hides_smbus)) return; WARN_ON(asus_rcba_base); pci_read_config_dword(dev, 0xF0, &rcba); /* use bits 31:14, 16 kB aligned */ asus_rcba_base = ioremap(rcba & 0xFFFFC000, 0x4000); if (asus_rcba_base == NULL) return; } static void asus_hides_smbus_lpc_ich6_resume_early(struct pci_dev *dev) { u32 val; if (likely(!asus_hides_smbus || !asus_rcba_base)) return; /* read the Function Disable register, dword mode only */ val = readl(asus_rcba_base + 0x3418); /* enable the SMBus device */ writel(val & 0xFFFFFFF7, asus_rcba_base + 0x3418); } static void asus_hides_smbus_lpc_ich6_resume(struct pci_dev *dev) { if (likely(!asus_hides_smbus || !asus_rcba_base)) return; iounmap(asus_rcba_base); asus_rcba_base = NULL; pci_info(dev, "Enabled ICH6/i801 SMBus device\n"); } static void asus_hides_smbus_lpc_ich6(struct pci_dev *dev) { asus_hides_smbus_lpc_ich6_suspend(dev); asus_hides_smbus_lpc_ich6_resume_early(dev); asus_hides_smbus_lpc_ich6_resume(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6); DECLARE_PCI_FIXUP_SUSPEND(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_suspend); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6_resume_early); /* SiS 96x south bridge: BIOS typically hides SMBus device... */ static void quirk_sis_96x_smbus(struct pci_dev *dev) { u8 val = 0; pci_read_config_byte(dev, 0x77, &val); if (val & 0x10) { pci_info(dev, "Enabling SiS 96x SMBus\n"); pci_write_config_byte(dev, 0x77, val & ~0x10); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_961, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus); /* * ... This is further complicated by the fact that some SiS96x south * bridges pretend to be 85C503/5513 instead. In that case see if we * spotted a compatible north bridge to make sure. * (pci_find_device() doesn't work yet) * * We can also enable the sis96x bit in the discovery register.. */ #define SIS_DETECT_REGISTER 0x40 static void quirk_sis_503(struct pci_dev *dev) { u8 reg; u16 devid; pci_read_config_byte(dev, SIS_DETECT_REGISTER, &reg); pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg | (1 << 6)); pci_read_config_word(dev, PCI_DEVICE_ID, &devid); if (((devid & 0xfff0) != 0x0960) && (devid != 0x0018)) { pci_write_config_byte(dev, SIS_DETECT_REGISTER, reg); return; } /* * Ok, it now shows up as a 96x. Run the 96x quirk by hand in case * it has already been processed. (Depends on link order, which is * apparently not guaranteed) */ dev->device = devid; quirk_sis_96x_smbus(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503, quirk_sis_503); /* * On ASUS A8V and A8V Deluxe boards, the onboard AC97 audio controller * and MC97 modem controller are disabled when a second PCI soundcard is * present. This patch, tweaking the VT8237 ISA bridge, enables them. * -- bjd */ static void asus_hides_ac97_lpc(struct pci_dev *dev) { u8 val; int asus_hides_ac97 = 0; if (likely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) { if (dev->device == PCI_DEVICE_ID_VIA_8237) asus_hides_ac97 = 1; } if (!asus_hides_ac97) return; pci_read_config_byte(dev, 0x50, &val); if (val & 0xc0) { pci_write_config_byte(dev, 0x50, val & (~0xc0)); pci_read_config_byte(dev, 0x50, &val); if (val & 0xc0) pci_info(dev, "Onboard AC97/MC97 devices continue to play 'hide and seek'! 0x%x\n", val); else pci_info(dev, "Enabled onboard AC97/MC97 devices\n"); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, asus_hides_ac97_lpc); #if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE) /* * If we are using libata we can drive this chip properly but must do this * early on to make the additional device appear during the PCI scanning. */ static void quirk_jmicron_ata(struct pci_dev *pdev) { u32 conf1, conf5, class; u8 hdr; /* Only poke fn 0 */ if (PCI_FUNC(pdev->devfn)) return; pci_read_config_dword(pdev, 0x40, &conf1); pci_read_config_dword(pdev, 0x80, &conf5); conf1 &= ~0x00CFF302; /* Clear bit 1, 8, 9, 12-19, 22, 23 */ conf5 &= ~(1 << 24); /* Clear bit 24 */ switch (pdev->device) { case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */ case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */ case PCI_DEVICE_ID_JMICRON_JMB364: /* SATA dual ports */ /* The controller should be in single function ahci mode */ conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */ break; case PCI_DEVICE_ID_JMICRON_JMB365: case PCI_DEVICE_ID_JMICRON_JMB366: /* Redirect IDE second PATA port to the right spot */ conf5 |= (1 << 24); fallthrough; case PCI_DEVICE_ID_JMICRON_JMB361: case PCI_DEVICE_ID_JMICRON_JMB363: case PCI_DEVICE_ID_JMICRON_JMB369: /* Enable dual function mode, AHCI on fn 0, IDE fn1 */ /* Set the class codes correctly and then direct IDE 0 */ conf1 |= 0x00C2A1B3; /* Set 0, 1, 4, 5, 7, 8, 13, 15, 17, 22, 23 */ break; case PCI_DEVICE_ID_JMICRON_JMB368: /* The controller should be in single function IDE mode */ conf1 |= 0x00C00000; /* Set 22, 23 */ break; } pci_write_config_dword(pdev, 0x40, conf1); pci_write_config_dword(pdev, 0x80, conf5); /* Update pdev accordingly */ pci_read_config_byte(pdev, PCI_HEADER_TYPE, &hdr); pdev->hdr_type = hdr & 0x7f; pdev->multifunction = !!(hdr & 0x80); pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class); pdev->class = class >> 8; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB364, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB369, quirk_jmicron_ata); #endif static void quirk_jmicron_async_suspend(struct pci_dev *dev) { if (dev->multifunction) { device_disable_async_suspend(&dev->dev); pci_info(dev, "async suspend disabled to avoid multi-function power-on ordering issue\n"); } } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE, 8, quirk_jmicron_async_suspend); DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0, quirk_jmicron_async_suspend); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x2362, quirk_jmicron_async_suspend); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_JMICRON, 0x236f, quirk_jmicron_async_suspend); #ifdef CONFIG_X86_IO_APIC static void quirk_alder_ioapic(struct pci_dev *pdev) { int i; if ((pdev->class >> 8) != 0xff00) return; /* * The first BAR is the location of the IO-APIC... we must * not touch this (and it's already covered by the fixmap), so * forcibly insert it into the resource tree. */ if (pci_resource_start(pdev, 0) && pci_resource_len(pdev, 0)) insert_resource(&iomem_resource, &pdev->resource[0]); /* * The next five BARs all seem to be rubbish, so just clean * them out. */ for (i = 1; i < PCI_STD_NUM_BARS; i++) memset(&pdev->resource[i], 0, sizeof(pdev->resource[i])); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic); #endif static void quirk_no_msi(struct pci_dev *dev) { pci_info(dev, "avoiding MSI to work around a hardware defect\n"); dev->no_msi = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4386, quirk_no_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4387, quirk_no_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4388, quirk_no_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4389, quirk_no_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438a, quirk_no_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x438b, quirk_no_msi); static void quirk_pcie_mch(struct pci_dev *pdev) { pdev->no_msi = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_pcie_mch); DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_HUAWEI, 0x1610, PCI_CLASS_BRIDGE_PCI, 8, quirk_pcie_mch); /* * HiSilicon KunPeng920 and KunPeng930 have devices appear as PCI but are * actually on the AMBA bus. These fake PCI devices can support SVA via * SMMU stall feature, by setting dma-can-stall for ACPI platforms. * * Normally stalling must not be enabled for PCI devices, since it would * break the PCI requirement for free-flowing writes and may lead to * deadlock. We expect PCI devices to support ATS and PRI if they want to * be fault-tolerant, so there's no ACPI binding to describe anything else, * even when a "PCI" device turns out to be a regular old SoC device * dressed up as a RCiEP and normal rules don't apply. */ static void quirk_huawei_pcie_sva(struct pci_dev *pdev) { struct property_entry properties[] = { PROPERTY_ENTRY_BOOL("dma-can-stall"), {}, }; if (pdev->revision != 0x21 && pdev->revision != 0x30) return; pdev->pasid_no_tlp = 1; /* * Set the dma-can-stall property on ACPI platforms. Device tree * can set it directly. */ if (!pdev->dev.of_node && device_create_managed_software_node(&pdev->dev, properties, NULL)) pci_warn(pdev, "could not add stall property"); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa250, quirk_huawei_pcie_sva); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa251, quirk_huawei_pcie_sva); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa255, quirk_huawei_pcie_sva); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa256, quirk_huawei_pcie_sva); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa258, quirk_huawei_pcie_sva); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_HUAWEI, 0xa259, quirk_huawei_pcie_sva); /* * It's possible for the MSI to get corrupted if SHPC and ACPI are used * together on certain PXH-based systems. */ static void quirk_pcie_pxh(struct pci_dev *dev) { dev->no_msi = 1; pci_warn(dev, "PXH quirk detected; SHPC device MSI disabled\n"); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_0, quirk_pcie_pxh); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHD_1, quirk_pcie_pxh); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_pcie_pxh); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_pcie_pxh); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pcie_pxh); /* * Some Intel PCI Express chipsets have trouble with downstream device * power management. */ static void quirk_intel_pcie_pm(struct pci_dev *dev) { pci_pm_d3hot_delay = 120; dev->no_d1d2 = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2601, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2602, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2603, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2604, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2605, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2606, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2607, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2608, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay) { if (dev->d3hot_delay >= delay) return; dev->d3hot_delay = delay; pci_info(dev, "extending delay after power-on from D3hot to %d msec\n", dev->d3hot_delay); } static void quirk_radeon_pm(struct pci_dev *dev) { if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE && dev->subsystem_device == 0x00e2) quirk_d3hot_delay(dev, 20); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm); /* * NVIDIA Ampere-based HDA controllers can wedge the whole device if a bus * reset is performed too soon after transition to D0, extend d3hot_delay * to previous effective default for all NVIDIA HDA controllers. */ static void quirk_nvidia_hda_pm(struct pci_dev *dev) { quirk_d3hot_delay(dev, 20); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_nvidia_hda_pm); /* * Ryzen5/7 XHCI controllers fail upon resume from runtime suspend or s2idle. * https://bugzilla.kernel.org/show_bug.cgi?id=205587 * * The kernel attempts to transition these devices to D3cold, but that seems * to be ineffective on the platforms in question; the PCI device appears to * remain on in D3hot state. The D3hot-to-D0 transition then requires an * extended delay in order to succeed. */ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev) { quirk_d3hot_delay(dev, 20); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot); #ifdef CONFIG_X86_IO_APIC static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) { noioapicreroute = 1; pr_info("%s detected: disable boot interrupt reroute\n", d->ident); return 0; } static const struct dmi_system_id boot_interrupt_dmi_table[] = { /* * Systems to exclude from boot interrupt reroute quirks */ { .callback = dmi_disable_ioapicreroute, .ident = "ASUSTek Computer INC. M2N-LR", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer INC."), DMI_MATCH(DMI_PRODUCT_NAME, "M2N-LR"), }, }, {} }; /* * Boot interrupts on some chipsets cannot be turned off. For these chipsets, * remap the original interrupt in the Linux kernel to the boot interrupt, so * that a PCI device's interrupt handler is installed on the boot interrupt * line instead. */ static void quirk_reroute_to_boot_interrupts_intel(struct pci_dev *dev) { dmi_check_system(boot_interrupt_dmi_table); if (noioapicquirk || noioapicreroute) return; dev->irq_reroute_variant = INTEL_IRQ_REROUTE_VARIANT; pci_info(dev, "rerouting interrupts for [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80333_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_0, quirk_reroute_to_boot_interrupts_intel); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_80332_1, quirk_reroute_to_boot_interrupts_intel); /* * On some chipsets we can disable the generation of legacy INTx boot * interrupts. */ /* * IO-APIC1 on 6300ESB generates boot interrupts, see Intel order no * 300641-004US, section 5.7.3. * * Core IO on Xeon E5 1600/2600/4600, see Intel order no 326509-003. * Core IO on Xeon E5 v2, see Intel order no 329188-003. * Core IO on Xeon E7 v2, see Intel order no 329595-002. * Core IO on Xeon E5 v3, see Intel order no 330784-003. * Core IO on Xeon E7 v3, see Intel order no 332315-001US. * Core IO on Xeon E5 v4, see Intel order no 333810-002US. * Core IO on Xeon E7 v4, see Intel order no 332315-001US. * Core IO on Xeon D-1500, see Intel order no 332051-001. * Core IO on Xeon Scalable, see Intel order no 610950. */ #define INTEL_6300_IOAPIC_ABAR 0x40 /* Bus 0, Dev 29, Func 5 */ #define INTEL_6300_DISABLE_BOOT_IRQ (1<<14) #define INTEL_CIPINTRC_CFG_OFFSET 0x14C /* Bus 0, Dev 5, Func 0 */ #define INTEL_CIPINTRC_DIS_INTX_ICH (1<<25) static void quirk_disable_intel_boot_interrupt(struct pci_dev *dev) { u16 pci_config_word; u32 pci_config_dword; if (noioapicquirk) return; switch (dev->device) { case PCI_DEVICE_ID_INTEL_ESB_10: pci_read_config_word(dev, INTEL_6300_IOAPIC_ABAR, &pci_config_word); pci_config_word |= INTEL_6300_DISABLE_BOOT_IRQ; pci_write_config_word(dev, INTEL_6300_IOAPIC_ABAR, pci_config_word); break; case 0x3c28: /* Xeon E5 1600/2600/4600 */ case 0x0e28: /* Xeon E5/E7 V2 */ case 0x2f28: /* Xeon E5/E7 V3,V4 */ case 0x6f28: /* Xeon D-1500 */ case 0x2034: /* Xeon Scalable Family */ pci_read_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET, &pci_config_dword); pci_config_dword |= INTEL_CIPINTRC_DIS_INTX_ICH; pci_write_config_dword(dev, INTEL_CIPINTRC_CFG_OFFSET, pci_config_dword); break; default: return; } pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } /* * Device 29 Func 5 Device IDs of IO-APIC * containing ABAR—APIC1 Alternate Base Address Register */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_10, quirk_disable_intel_boot_interrupt); /* * Device 5 Func 0 Device IDs of Core IO modules/hubs * containing Coherent Interface Protocol Interrupt Control * * Device IDs obtained from volume 2 datasheets of commented * families above. */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x3c28, quirk_disable_intel_boot_interrupt); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0e28, quirk_disable_intel_boot_interrupt); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2f28, quirk_disable_intel_boot_interrupt); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x6f28, quirk_disable_intel_boot_interrupt); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2034, quirk_disable_intel_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x3c28, quirk_disable_intel_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x0e28, quirk_disable_intel_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2f28, quirk_disable_intel_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x6f28, quirk_disable_intel_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, 0x2034, quirk_disable_intel_boot_interrupt); /* Disable boot interrupts on HT-1000 */ #define BC_HT1000_FEATURE_REG 0x64 #define BC_HT1000_PIC_REGS_ENABLE (1<<0) #define BC_HT1000_MAP_IDX 0xC00 #define BC_HT1000_MAP_DATA 0xC01 static void quirk_disable_broadcom_boot_interrupt(struct pci_dev *dev) { u32 pci_config_dword; u8 irq; if (noioapicquirk) return; pci_read_config_dword(dev, BC_HT1000_FEATURE_REG, &pci_config_dword); pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword | BC_HT1000_PIC_REGS_ENABLE); for (irq = 0x10; irq < 0x10 + 32; irq++) { outb(irq, BC_HT1000_MAP_IDX); outb(0x00, BC_HT1000_MAP_DATA); } pci_write_config_dword(dev, BC_HT1000_FEATURE_REG, pci_config_dword); pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000SB, quirk_disable_broadcom_boot_interrupt); /* Disable boot interrupts on AMD and ATI chipsets */ /* * NOIOAMODE needs to be disabled to disable "boot interrupts". For AMD 8131 * rev. A0 and B0, NOIOAMODE needs to be disabled anyway to fix IO-APIC mode * (due to an erratum). */ #define AMD_813X_MISC 0x40 #define AMD_813X_NOIOAMODE (1<<0) #define AMD_813X_REV_B1 0x12 #define AMD_813X_REV_B2 0x13 static void quirk_disable_amd_813x_boot_interrupt(struct pci_dev *dev) { u32 pci_config_dword; if (noioapicquirk) return; if ((dev->revision == AMD_813X_REV_B1) || (dev->revision == AMD_813X_REV_B2)) return; pci_read_config_dword(dev, AMD_813X_MISC, &pci_config_dword); pci_config_dword &= ~AMD_813X_NOIOAMODE; pci_write_config_dword(dev, AMD_813X_MISC, pci_config_dword); pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_amd_813x_boot_interrupt); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, quirk_disable_amd_813x_boot_interrupt); #define AMD_8111_PCI_IRQ_ROUTING 0x56 static void quirk_disable_amd_8111_boot_interrupt(struct pci_dev *dev) { u16 pci_config_word; if (noioapicquirk) return; pci_read_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, &pci_config_word); if (!pci_config_word) { pci_info(dev, "boot interrupts on device [%04x:%04x] already disabled\n", dev->vendor, dev->device); return; } pci_write_config_word(dev, AMD_8111_PCI_IRQ_ROUTING, 0); pci_info(dev, "disabled boot interrupts on device [%04x:%04x]\n", dev->vendor, dev->device); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS, quirk_disable_amd_8111_boot_interrupt); #endif /* CONFIG_X86_IO_APIC */ /* * Toshiba TC86C001 IDE controller reports the standard 8-byte BAR0 size * but the PIO transfers won't work if BAR0 falls at the odd 8 bytes. * Re-allocate the region if needed... */ static void quirk_tc86c001_ide(struct pci_dev *dev) { struct resource *r = &dev->resource[0]; if (r->start & 0x8) { r->flags |= IORESOURCE_UNSET; r->start = 0; r->end = 0xf; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC86C001_IDE, quirk_tc86c001_ide); /* * PLX PCI 9050 PCI Target bridge controller has an erratum that prevents the * local configuration registers accessible via BAR0 (memory) or BAR1 (i/o) * being read correctly if bit 7 of the base address is set. * The BAR0 or BAR1 region may be disabled (size 0) or enabled (size 128). * Re-allocate the regions to a 256-byte boundary if necessary. */ static void quirk_plx_pci9050(struct pci_dev *dev) { unsigned int bar; /* Fixed in revision 2 (PCI 9052). */ if (dev->revision >= 2) return; for (bar = 0; bar <= 1; bar++) if (pci_resource_len(dev, bar) == 0x80 && (pci_resource_start(dev, bar) & 0x80)) { struct resource *r = &dev->resource[bar]; pci_info(dev, "Re-allocating PLX PCI 9050 BAR %u to length 256 to avoid bit 7 bug\n", bar); r->flags |= IORESOURCE_UNSET; r->start = 0; r->end = 0xff; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, quirk_plx_pci9050); /* * The following Meilhaus (vendor ID 0x1402) device IDs (amongst others) * may be using the PLX PCI 9050: 0x0630, 0x0940, 0x0950, 0x0960, 0x100b, * 0x1400, 0x140a, 0x140b, 0x14e0, 0x14ea, 0x14eb, 0x1604, 0x1608, 0x160c, * 0x168f, 0x2000, 0x2600, 0x3000, 0x810a, 0x810b. * * Currently, device IDs 0x2000 and 0x2600 are used by the Comedi "me_daq" * driver. */ DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2000, quirk_plx_pci9050); DECLARE_PCI_FIXUP_HEADER(0x1402, 0x2600, quirk_plx_pci9050); static void quirk_netmos(struct pci_dev *dev) { unsigned int num_parallel = (dev->subsystem_device & 0xf0) >> 4; unsigned int num_serial = dev->subsystem_device & 0xf; /* * These Netmos parts are multiport serial devices with optional * parallel ports. Even when parallel ports are present, they * are identified as class SERIAL, which means the serial driver * will claim them. To prevent this, mark them as class OTHER. * These combo devices should be claimed by parport_serial. * * The subdevice ID is of the form 0x00PS, where <P> is the number * of parallel ports and <S> is the number of serial ports. */ switch (dev->device) { case PCI_DEVICE_ID_NETMOS_9835: /* Well, this rule doesn't hold for the following 9835 device */ if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM && dev->subsystem_device == 0x0299) return; fallthrough; case PCI_DEVICE_ID_NETMOS_9735: case PCI_DEVICE_ID_NETMOS_9745: case PCI_DEVICE_ID_NETMOS_9845: case PCI_DEVICE_ID_NETMOS_9855: if (num_parallel) { pci_info(dev, "Netmos %04x (%u parallel, %u serial); changing class SERIAL to OTHER (use parport_serial)\n", dev->device, num_parallel, num_serial); dev->class = (PCI_CLASS_COMMUNICATION_OTHER << 8) | (dev->class & 0xff); } } } DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos); static void quirk_e100_interrupt(struct pci_dev *dev) { u16 command, pmcsr; u8 __iomem *csr; u8 cmd_hi; switch (dev->device) { /* PCI IDs taken from drivers/net/e100.c */ case 0x1029: case 0x1030 ... 0x1034: case 0x1038 ... 0x103E: case 0x1050 ... 0x1057: case 0x1059: case 0x1064 ... 0x106B: case 0x1091 ... 0x1095: case 0x1209: case 0x1229: case 0x2449: case 0x2459: case 0x245D: case 0x27DC: break; default: return; } /* * Some firmware hands off the e100 with interrupts enabled, * which can cause a flood of interrupts if packets are * received before the driver attaches to the device. So * disable all e100 interrupts here. The driver will * re-enable them when it's ready. */ pci_read_config_word(dev, PCI_COMMAND, &command); if (!(command & PCI_COMMAND_MEMORY) || !pci_resource_start(dev, 0)) return; /* * Check that the device is in the D0 power state. If it's not, * there is no point to look any further. */ if (dev->pm_cap) { pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); if ((pmcsr & PCI_PM_CTRL_STATE_MASK) != PCI_D0) return; } /* Convert from PCI bus to resource space. */ csr = ioremap(pci_resource_start(dev, 0), 8); if (!csr) { pci_warn(dev, "Can't map e100 registers\n"); return; } cmd_hi = readb(csr + 3); if (cmd_hi == 0) { pci_warn(dev, "Firmware left e100 interrupts enabled; disabling\n"); writeb(1, csr + 3); } iounmap(csr); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET, 8, quirk_e100_interrupt); /* * The 82575 and 82598 may experience data corruption issues when transitioning * out of L0S. To prevent this we need to disable L0S on the PCIe link. */ static void quirk_disable_aspm_l0s(struct pci_dev *dev) { pci_info(dev, "Disabling L0s\n"); pci_disable_link_state(dev, PCIE_LINK_STATE_L0S); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a7, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10a9, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10b6, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c6, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c7, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10c8, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10d6, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10db, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10dd, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10e1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10ec, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); static void quirk_disable_aspm_l0s_l1(struct pci_dev *dev) { pci_info(dev, "Disabling ASPM L0s/L1\n"); pci_disable_link_state(dev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); } /* * ASM1083/1085 PCIe-PCI bridge devices cause AER timeout errors on the * upstream PCIe root port when ASPM is enabled. At least L0s mode is affected; * disable both L0s and L1 for now to be safe. */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_disable_aspm_l0s_l1); /* * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain * Link bit cleared after starting the link retrain process to allow this * process to finish. * * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf. */ static void quirk_enable_clear_retrain_link(struct pci_dev *dev) { dev->clear_retrain_link = 1; pci_info(dev, "Enable PCIe Retrain Link quirk\n"); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PERICOM, 0xe110, quirk_enable_clear_retrain_link); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PERICOM, 0xe111, quirk_enable_clear_retrain_link); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PERICOM, 0xe130, quirk_enable_clear_retrain_link); static void fixup_rev1_53c810(struct pci_dev *dev) { u32 class = dev->class; /* * rev 1 ncr53c810 chips don't set the class at all which means * they don't get their resources remapped. Fix that here. */ if (class) return; dev->class = PCI_CLASS_STORAGE_SCSI << 8; pci_info(dev, "NCR 53c810 rev 1 PCI class overridden (%#08x -> %#08x)\n", class, dev->class); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); /* Enable 1k I/O space granularity on the Intel P64H2 */ static void quirk_p64h2_1k_io(struct pci_dev *dev) { u16 en1k; pci_read_config_word(dev, 0x40, &en1k); if (en1k & 0x200) { pci_info(dev, "Enable I/O Space to 1KB granularity\n"); dev->io_window_1k = 1; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1460, quirk_p64h2_1k_io); /* * Under some circumstances, AER is not linked with extended capabilities. * Force it to be linked by setting the corresponding control bit in the * config space. */ static void quirk_nvidia_ck804_pcie_aer_ext_cap(struct pci_dev *dev) { uint8_t b; if (pci_read_config_byte(dev, 0xf41, &b) == 0) { if (!(b & 0x20)) { pci_write_config_byte(dev, 0xf41, b | 0x20); pci_info(dev, "Linking AER extended capability\n"); } } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_pcie_aer_ext_cap); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_pcie_aer_ext_cap); static void quirk_via_cx700_pci_parking_caching(struct pci_dev *dev) { /* * Disable PCI Bus Parking and PCI Master read caching on CX700 * which causes unspecified timing errors with a VT6212L on the PCI * bus leading to USB2.0 packet loss. * * This quirk is only enabled if a second (on the external PCI bus) * VT6212L is found -- the CX700 core itself also contains a USB * host controller with the same PCI ID as the VT6212L. */ /* Count VT6212L instances */ struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, NULL); uint8_t b; /* * p should contain the first (internal) VT6212L -- see if we have * an external one by searching again. */ p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p); if (!p) return; pci_dev_put(p); if (pci_read_config_byte(dev, 0x76, &b) == 0) { if (b & 0x40) { /* Turn off PCI Bus Parking */ pci_write_config_byte(dev, 0x76, b ^ 0x40); pci_info(dev, "Disabling VIA CX700 PCI parking\n"); } } if (pci_read_config_byte(dev, 0x72, &b) == 0) { if (b != 0) { /* Turn off PCI Master read caching */ pci_write_config_byte(dev, 0x72, 0x0); /* Set PCI Master Bus time-out to "1x16 PCLK" */ pci_write_config_byte(dev, 0x75, 0x1); /* Disable "Read FIFO Timer" */ pci_write_config_byte(dev, 0x77, 0x0); pci_info(dev, "Disabling VIA CX700 PCI caching\n"); } } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching); static void quirk_brcm_5719_limit_mrrs(struct pci_dev *dev) { u32 rev; pci_read_config_dword(dev, 0xf4, &rev); /* Only CAP the MRRS if the device is a 5719 A0 */ if (rev == 0x05719000) { int readrq = pcie_get_readrq(dev); if (readrq > 2048) pcie_set_readrq(dev, 2048); } } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5719, quirk_brcm_5719_limit_mrrs); /* * Originally in EDAC sources for i82875P: Intel tells BIOS developers to * hide device 6 which configures the overflow device access containing the * DRBs - this is where we expose device 6. * http://www.x86-secret.com/articles/tweak/pat/patsecrets-2.htm */ static void quirk_unhide_mch_dev6(struct pci_dev *dev) { u8 reg; if (pci_read_config_byte(dev, 0xF4, &reg) == 0 && !(reg & 0x02)) { pci_info(dev, "Enabling MCH 'Overflow' Device\n"); pci_write_config_byte(dev, 0xF4, reg | 0x02); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB, quirk_unhide_mch_dev6); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, quirk_unhide_mch_dev6); #ifdef CONFIG_PCI_MSI /* * Some chipsets do not support MSI. We cannot easily rely on setting * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually some * other buses controlled by the chipset even if Linux is not aware of it. * Instead of setting the flag on all buses in the machine, simply disable * MSI globally. */ static void quirk_disable_all_msi(struct pci_dev *dev) { pci_no_msi(); pci_warn(dev, "MSI quirk detected; MSI disabled\n"); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3336, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3351, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT3364, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8380_0, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, 0x0761, quirk_disable_all_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SAMSUNG, 0xa5e3, quirk_disable_all_msi); /* Disable MSI on chipsets that are known to not support it */ static void quirk_disable_msi(struct pci_dev *dev) { if (dev->subordinate) { pci_warn(dev, "MSI quirk detected; subordinate MSI disabled\n"); dev->subordinate->bus_flags |= PCI_BUS_FLAGS_NO_MSI; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi); /* * The APC bridge device in AMD 780 family northbridges has some random * OEM subsystem ID in its vendor ID register (erratum 18), so instead * we use the possible vendor/device IDs of the host bridge for the * declared quirk, and search for the APC bridge by slot number. */ static void quirk_amd_780_apc_msi(struct pci_dev *host_bridge) { struct pci_dev *apc_bridge; apc_bridge = pci_get_slot(host_bridge->bus, PCI_DEVFN(1, 0)); if (apc_bridge) { if (apc_bridge->device == 0x9602) quirk_disable_msi(apc_bridge); pci_dev_put(apc_bridge); } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9600, quirk_amd_780_apc_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9601, quirk_amd_780_apc_msi); /* * Go through the list of HyperTransport capabilities and return 1 if a HT * MSI capability is found and enabled. */ static int msi_ht_cap_enabled(struct pci_dev *dev) { int pos, ttl = PCI_FIND_CAP_TTL; pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); while (pos && ttl--) { u8 flags; if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { pci_info(dev, "Found %s HT MSI Mapping\n", flags & HT_MSI_FLAGS_ENABLE ? "enabled" : "disabled"); return (flags & HT_MSI_FLAGS_ENABLE) != 0; } pos = pci_find_next_ht_capability(dev, pos, HT_CAPTYPE_MSI_MAPPING); } return 0; } /* Check the HyperTransport MSI mapping to know whether MSI is enabled or not */ static void quirk_msi_ht_cap(struct pci_dev *dev) { if (!msi_ht_cap_enabled(dev)) quirk_disable_msi(dev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE, quirk_msi_ht_cap); /* * The nVidia CK804 chipset may have 2 HT MSI mappings. MSI is supported * if the MSI capability is set in any of these mappings. */ static void quirk_nvidia_ck804_msi_ht_cap(struct pci_dev *dev) { struct pci_dev *pdev; /* * Check HT MSI cap on this chipset and the root one. A single one * having MSI is enough to be sure that MSI is supported. */ pdev = pci_get_slot(dev->bus, 0); if (!pdev) return; if (!msi_ht_cap_enabled(pdev)) quirk_msi_ht_cap(dev); pci_dev_put(pdev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, quirk_nvidia_ck804_msi_ht_cap); /* Force enable MSI mapping capability on HT bridges */ static void ht_enable_msi_mapping(struct pci_dev *dev) { int pos, ttl = PCI_FIND_CAP_TTL; pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); while (pos && ttl--) { u8 flags; if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { pci_info(dev, "Enabling HT MSI Mapping\n"); pci_write_config_byte(dev, pos + HT_MSI_FLAGS, flags | HT_MSI_FLAGS_ENABLE); } pos = pci_find_next_ht_capability(dev, pos, HT_CAPTYPE_MSI_MAPPING); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000_PXB, ht_enable_msi_mapping); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, ht_enable_msi_mapping); /* * The P5N32-SLI motherboards from Asus have a problem with MSI * for the MCP55 NIC. It is not yet determined whether the MSI problem * also affects other devices. As for now, turn off MSI for this device. */ static void nvenet_msi_disable(struct pci_dev *dev) { const char *board_name = dmi_get_system_info(DMI_BOARD_NAME); if (board_name && (strstr(board_name, "P5N32-SLI PREMIUM") || strstr(board_name, "P5N32-E SLI"))) { pci_info(dev, "Disabling MSI for MCP55 NIC on P5N32-SLI\n"); dev->no_msi = 1; } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15, nvenet_msi_disable); /* * PCIe spec r6.0 sec 6.1.4.3 says that if MSI/MSI-X is enabled, the device * can't use INTx interrupts. Tegra's PCIe Root Ports don't generate MSI * interrupts for PME and AER events; instead only INTx interrupts are * generated. Though Tegra's PCIe Root Ports can generate MSI interrupts * for other events, since PCIe specification doesn't support using a mix of * INTx and MSI/MSI-X, it is required to disable MSI interrupts to avoid port * service drivers registering their respective ISRs for MSIs. */ static void pci_quirk_nvidia_tegra_disable_rp_msi(struct pci_dev *dev) { dev->no_msi = 1; } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad0, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad1, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x1ad2, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e12, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e13, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0fae, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0faf, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e5, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x10e6, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229a, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229c, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_NVIDIA, 0x229e, PCI_CLASS_BRIDGE_PCI, 8, pci_quirk_nvidia_tegra_disable_rp_msi); /* * Some versions of the MCP55 bridge from Nvidia have a legacy IRQ routing * config register. This register controls the routing of legacy * interrupts from devices that route through the MCP55. If this register * is misprogrammed, interrupts are only sent to the BSP, unlike * conventional systems where the IRQ is broadcast to all online CPUs. Not * having this register set properly prevents kdump from booting up * properly, so let's make sure that we have it set correctly. * Note that this is an undocumented register. */ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev) { u32 cfg; if (!pci_find_capability(dev, PCI_CAP_ID_HT)) return; pci_read_config_dword(dev, 0x74, &cfg); if (cfg & ((1 << 2) | (1 << 15))) { pr_info("Rewriting IRQ routing register on MCP55\n"); cfg &= ~((1 << 2) | (1 << 15)); pci_write_config_dword(dev, 0x74, cfg); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V0, nvbridge_check_legacy_irq_routing); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_MCP55_BRIDGE_V4, nvbridge_check_legacy_irq_routing); static int ht_check_msi_mapping(struct pci_dev *dev) { int pos, ttl = PCI_FIND_CAP_TTL; int found = 0; /* Check if there is HT MSI cap or enabled on this device */ pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); while (pos && ttl--) { u8 flags; if (found < 1) found = 1; if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { if (flags & HT_MSI_FLAGS_ENABLE) { if (found < 2) { found = 2; break; } } } pos = pci_find_next_ht_capability(dev, pos, HT_CAPTYPE_MSI_MAPPING); } return found; } static int host_bridge_with_leaf(struct pci_dev *host_bridge) { struct pci_dev *dev; int pos; int i, dev_no; int found = 0; dev_no = host_bridge->devfn >> 3; for (i = dev_no + 1; i < 0x20; i++) { dev = pci_get_slot(host_bridge->bus, PCI_DEVFN(i, 0)); if (!dev) continue; /* found next host bridge? */ pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE); if (pos != 0) { pci_dev_put(dev); break; } if (ht_check_msi_mapping(dev)) { found = 1; pci_dev_put(dev); break; } pci_dev_put(dev); } return found; } #define PCI_HT_CAP_SLAVE_CTRL0 4 /* link control */ #define PCI_HT_CAP_SLAVE_CTRL1 8 /* link control to */ static int is_end_of_ht_chain(struct pci_dev *dev) { int pos, ctrl_off; int end = 0; u16 flags, ctrl; pos = pci_find_ht_capability(dev, HT_CAPTYPE_SLAVE); if (!pos) goto out; pci_read_config_word(dev, pos + PCI_CAP_FLAGS, &flags); ctrl_off = ((flags >> 10) & 1) ? PCI_HT_CAP_SLAVE_CTRL0 : PCI_HT_CAP_SLAVE_CTRL1; pci_read_config_word(dev, pos + ctrl_off, &ctrl); if (ctrl & (1 << 6)) end = 1; out: return end; } static void nv_ht_enable_msi_mapping(struct pci_dev *dev) { struct pci_dev *host_bridge; int pos; int i, dev_no; int found = 0; dev_no = dev->devfn >> 3; for (i = dev_no; i >= 0; i--) { host_bridge = pci_get_slot(dev->bus, PCI_DEVFN(i, 0)); if (!host_bridge) continue; pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE); if (pos != 0) { found = 1; break; } pci_dev_put(host_bridge); } if (!found) return; /* don't enable end_device/host_bridge with leaf directly here */ if (host_bridge == dev && is_end_of_ht_chain(host_bridge) && host_bridge_with_leaf(host_bridge)) goto out; /* root did that ! */ if (msi_ht_cap_enabled(host_bridge)) goto out; ht_enable_msi_mapping(dev); out: pci_dev_put(host_bridge); } static void ht_disable_msi_mapping(struct pci_dev *dev) { int pos, ttl = PCI_FIND_CAP_TTL; pos = pci_find_ht_capability(dev, HT_CAPTYPE_MSI_MAPPING); while (pos && ttl--) { u8 flags; if (pci_read_config_byte(dev, pos + HT_MSI_FLAGS, &flags) == 0) { pci_info(dev, "Disabling HT MSI Mapping\n"); pci_write_config_byte(dev, pos + HT_MSI_FLAGS, flags & ~HT_MSI_FLAGS_ENABLE); } pos = pci_find_next_ht_capability(dev, pos, HT_CAPTYPE_MSI_MAPPING); } } static void __nv_msi_ht_cap_quirk(struct pci_dev *dev, int all) { struct pci_dev *host_bridge; int pos; int found; if (!pci_msi_enabled()) return; /* check if there is HT MSI cap or enabled on this device */ found = ht_check_msi_mapping(dev); /* no HT MSI CAP */ if (found == 0) return; /* * HT MSI mapping should be disabled on devices that are below * a non-HyperTransport host bridge. Locate the host bridge. */ host_bridge = pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), 0, PCI_DEVFN(0, 0)); if (host_bridge == NULL) { pci_warn(dev, "nv_msi_ht_cap_quirk didn't locate host bridge\n"); return; } pos = pci_find_ht_capability(host_bridge, HT_CAPTYPE_SLAVE); if (pos != 0) { /* Host bridge is to HT */ if (found == 1) { /* it is not enabled, try to enable it */ if (all) ht_enable_msi_mapping(dev); else nv_ht_enable_msi_mapping(dev); } goto out; } /* HT MSI is not enabled */ if (found == 1) goto out; /* Host bridge is not to HT, disable HT MSI mapping on this device */ ht_disable_msi_mapping(dev); out: pci_dev_put(host_bridge); } static void nv_msi_ht_cap_quirk_all(struct pci_dev *dev) { return __nv_msi_ht_cap_quirk(dev, 1); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all); static void nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev) { return __nv_msi_ht_cap_quirk(dev, 0); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf); static void quirk_msi_intx_disable_bug(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; } static void quirk_msi_intx_disable_ati_bug(struct pci_dev *dev) { struct pci_dev *p; /* * SB700 MSI issue will be fixed at HW level from revision A21; * we need check PCI REVISION ID of SMBus controller to get SB700 * revision. */ p = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); if (!p) return; if ((p->revision < 0x3B) && (p->revision >= 0x30)) dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; pci_dev_put(p); } static void quirk_msi_intx_disable_qca_bug(struct pci_dev *dev) { /* AR816X/AR817X/E210X MSI is fixed at HW level from revision 0x18 */ if (dev->revision < 0x18) { pci_info(dev, "set MSI_INTX_DISABLE_BUG flag\n"); dev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4390, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4391, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4392, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4393, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4394, quirk_msi_intx_disable_ati_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4373, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4374, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1062, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1063, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2060, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x2062, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1073, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1083, quirk_msi_intx_disable_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1090, quirk_msi_intx_disable_qca_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x1091, quirk_msi_intx_disable_qca_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a0, quirk_msi_intx_disable_qca_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0x10a1, quirk_msi_intx_disable_qca_bug); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, 0xe091, quirk_msi_intx_disable_qca_bug); /* * Amazon's Annapurna Labs 1c36:0031 Root Ports don't support MSI-X, so it * should be disabled on platforms where the device (mistakenly) advertises it. * * Notice that this quirk also disables MSI (which may work, but hasn't been * tested), since currently there is no standard way to disable only MSI-X. * * The 0031 device id is reused for other non Root Port device types, * therefore the quirk is registered for the PCI_CLASS_BRIDGE_PCI class. */ static void quirk_al_msi_disable(struct pci_dev *dev) { dev->no_msi = 1; pci_warn(dev, "Disabling MSI/MSI-X\n"); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, PCI_CLASS_BRIDGE_PCI, 8, quirk_al_msi_disable); #endif /* CONFIG_PCI_MSI */ /* * Allow manual resource allocation for PCI hotplug bridges via * pci=hpmemsize=nnM and pci=hpiosize=nnM parameters. For some PCI-PCI * hotplug bridges, like PLX 6254 (former HINT HB6), kernel fails to * allocate resources when hotplug device is inserted and PCI bus is * rescanned. */ static void quirk_hotplug_bridge(struct pci_dev *dev) { dev->is_hotplug_bridge = 1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge); /* * This is a quirk for the Ricoh MMC controller found as a part of some * multifunction chips. * * This is very similar and based on the ricoh_mmc driver written by * Philip Langdale. Thank you for these magic sequences. * * These chips implement the four main memory card controllers (SD, MMC, * MS, xD) and one or both of CardBus or FireWire. * * It happens that they implement SD and MMC support as separate * controllers (and PCI functions). The Linux SDHCI driver supports MMC * cards but the chip detects MMC cards in hardware and directs them to the * MMC controller - so the SDHCI driver never sees them. * * To get around this, we must disable the useless MMC controller. At that * point, the SDHCI controller will start seeing them. It seems to be the * case that the relevant PCI registers to deactivate the MMC controller * live on PCI function 0, which might be the CardBus controller or the * FireWire controller, depending on the particular chip in question * * This has to be done early, because as soon as we disable the MMC controller * other PCI functions shift up one level, e.g. function #2 becomes function * #1, and this will confuse the PCI core. */ #ifdef CONFIG_MMC_RICOH_MMC static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev) { u8 write_enable; u8 write_target; u8 disable; /* * Disable via CardBus interface * * This must be done via function #0 */ if (PCI_FUNC(dev->devfn)) return; pci_read_config_byte(dev, 0xB7, &disable); if (disable & 0x02) return; pci_read_config_byte(dev, 0x8E, &write_enable); pci_write_config_byte(dev, 0x8E, 0xAA); pci_read_config_byte(dev, 0x8D, &write_target); pci_write_config_byte(dev, 0x8D, 0xB7); pci_write_config_byte(dev, 0xB7, disable | 0x02); pci_write_config_byte(dev, 0x8E, write_enable); pci_write_config_byte(dev, 0x8D, write_target); pci_notice(dev, "proprietary Ricoh MMC controller disabled (via CardBus function)\n"); pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n"); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476); static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev) { u8 write_enable; u8 disable; /* * Disable via FireWire interface * * This must be done via function #0 */ if (PCI_FUNC(dev->devfn)) return; /* * RICOH 0xe822 and 0xe823 SD/MMC card readers fail to recognize * certain types of SD/MMC cards. Lowering the SD base clock * frequency from 200Mhz to 50Mhz fixes this issue. * * 0x150 - SD2.0 mode enable for changing base clock * frequency to 50Mhz * 0xe1 - Base clock frequency * 0x32 - 50Mhz new clock frequency * 0xf9 - Key register for 0x150 * 0xfc - key register for 0xe1 */ if (dev->device == PCI_DEVICE_ID_RICOH_R5CE822 || dev->device == PCI_DEVICE_ID_RICOH_R5CE823) { pci_write_config_byte(dev, 0xf9, 0xfc); pci_write_config_byte(dev, 0x150, 0x10); pci_write_config_byte(dev, 0xf9, 0x00); pci_write_config_byte(dev, 0xfc, 0x01); pci_write_config_byte(dev, 0xe1, 0x32); pci_write_config_byte(dev, 0xfc, 0x00); pci_notice(dev, "MMC controller base frequency changed to 50Mhz.\n"); } pci_read_config_byte(dev, 0xCB, &disable); if (disable & 0x02) return; pci_read_config_byte(dev, 0xCA, &write_enable); pci_write_config_byte(dev, 0xCA, 0x57); pci_write_config_byte(dev, 0xCB, disable | 0x02); pci_write_config_byte(dev, 0xCA, write_enable); pci_notice(dev, "proprietary Ricoh MMC controller disabled (via FireWire function)\n"); pci_notice(dev, "MMC cards are now supported by standard SDHCI controller\n"); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE822, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5CE823, ricoh_mmc_fixup_r5c832); #endif /*CONFIG_MMC_RICOH_MMC*/ #ifdef CONFIG_DMAR_TABLE #define VTUNCERRMSK_REG 0x1ac #define VTD_MSK_SPEC_ERRORS (1 << 31) /* * This is a quirk for masking VT-d spec-defined errors to platform error * handling logic. Without this, platforms using Intel 7500, 5500 chipsets * (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based * on the RAS config settings of the platform) when a VT-d fault happens. * The resulting SMI caused the system to hang. * * VT-d spec-related errors are already handled by the VT-d OS code, so no * need to report the same error through other channels. */ static void vtd_mask_spec_errors(struct pci_dev *dev) { u32 word; pci_read_config_dword(dev, VTUNCERRMSK_REG, &word); pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors); #endif static void fixup_ti816x_class(struct pci_dev *dev) { u32 class = dev->class; /* TI 816x devices do not have class code set when in PCIe boot mode */ dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8; pci_info(dev, "PCI class overridden (%#08x -> %#08x)\n", class, dev->class); } DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_TI, 0xb800, PCI_CLASS_NOT_DEFINED, 8, fixup_ti816x_class); /* * Some PCIe devices do not work reliably with the claimed maximum * payload size supported. */ static void fixup_mpss_256(struct pci_dev *dev) { dev->pcie_mpss = 1; /* 256 bytes */ } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000A_0, fixup_mpss_256); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, fixup_mpss_256); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000B, fixup_mpss_256); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ASMEDIA, 0x0612, fixup_mpss_256); /* * Intel 5000 and 5100 Memory controllers have an erratum with read completion * coalescing (which is enabled by default on some BIOSes) and MPS of 256B. * Since there is no way of knowing what the PCIe MPS on each fabric will be * until all of the devices are discovered and buses walked, read completion * coalescing must be disabled. Unfortunately, it cannot be re-enabled because * it is possible to hotplug a device with MPS of 256B. */ static void quirk_intel_mc_errata(struct pci_dev *dev) { int err; u16 rcc; if (pcie_bus_config == PCIE_BUS_TUNE_OFF || pcie_bus_config == PCIE_BUS_DEFAULT) return; /* * Intel erratum specifies bits to change but does not say what * they are. Keeping them magical until such time as the registers * and values can be explained. */ err = pci_read_config_word(dev, 0x48, &rcc); if (err) { pci_err(dev, "Error attempting to read the read completion coalescing register\n"); return; } if (!(rcc & (1 << 10))) return; rcc &= ~(1 << 10); err = pci_write_config_word(dev, 0x48, rcc); if (err) { pci_err(dev, "Error attempting to write the read completion coalescing register\n"); return; } pr_info_once("Read completion coalescing disabled due to hardware erratum relating to 256B MPS\n"); } /* Intel 5000 series memory controllers and ports 2-7 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25c0, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d0, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d4, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25d8, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e3, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e4, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e5, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e6, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25e7, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f7, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f8, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25f9, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x25fa, quirk_intel_mc_errata); /* Intel 5100 series memory controllers and ports 2-7 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65c0, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e2, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e3, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e4, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e5, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e6, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65e7, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f7, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f8, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65f9, quirk_intel_mc_errata); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x65fa, quirk_intel_mc_errata); /* * Ivytown NTB BAR sizes are misreported by the hardware due to an erratum. * To work around this, query the size it should be configured to by the * device and modify the resource end to correspond to this new size. */ static void quirk_intel_ntb(struct pci_dev *dev) { int rc; u8 val; rc = pci_read_config_byte(dev, 0x00D0, &val); if (rc) return; dev->resource[2].end = dev->resource[2].start + ((u64) 1 << val) - 1; rc = pci_read_config_byte(dev, 0x00D1, &val); if (rc) return; dev->resource[4].end = dev->resource[4].start + ((u64) 1 << val) - 1; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e08, quirk_intel_ntb); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0e0d, quirk_intel_ntb); /* * Some BIOS implementations leave the Intel GPU interrupts enabled, even * though no one is handling them (e.g., if the i915 driver is never * loaded). Additionally the interrupt destination is not set up properly * and the interrupt ends up -somewhere-. * * These spurious interrupts are "sticky" and the kernel disables the * (shared) interrupt line after 100,000+ generated interrupts. * * Fix it by disabling the still enabled interrupts. This resolves crashes * often seen on monitor unplug. */ #define I915_DEIER_REG 0x4400c static void disable_igfx_irq(struct pci_dev *dev) { void __iomem *regs = pci_iomap(dev, 0, 0); if (regs == NULL) { pci_warn(dev, "igfx quirk: Can't iomap PCI device\n"); return; } /* Check if any interrupt line is still enabled */ if (readl(regs + I915_DEIER_REG) != 0) { pci_warn(dev, "BIOS left Intel GPU interrupts enabled; disabling\n"); writel(0, regs + I915_DEIER_REG); } pci_iounmap(dev, regs); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0042, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0046, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x004a, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0106, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq); /* * PCI devices which are on Intel chips can skip the 10ms delay * before entering D3 mode. */ static void quirk_remove_d3hot_delay(struct pci_dev *dev) { dev->d3hot_delay = 0; } /* C600 Series devices do not need 10ms d3hot_delay */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3hot_delay); /* Lynxpoint-H PCH devices do not need 10ms d3hot_delay */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3hot_delay); /* Intel Cherrytrail devices do not need 10ms d3hot_delay */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3hot_delay); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3hot_delay); /* * Some devices may pass our check in pci_intx_mask_supported() if * PCI_COMMAND_INTX_DISABLE works though they actually do not properly * support this feature. */ static void quirk_broken_intx_masking(struct pci_dev *dev) { dev->broken_intx_masking = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x0030, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(0x1814, 0x0601, /* Ralink RT2800 802.11n PCI */ quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(0x1b7c, 0x0004, /* Ceton InfiniTV4 */ quirk_broken_intx_masking); /* * Realtek RTL8169 PCI Gigabit Ethernet Controller (rev 10) * Subsystem: Realtek RTL8169/8110 Family PCI Gigabit Ethernet NIC * * RTL8110SC - Fails under PCI device assignment using DisINTx masking. */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REALTEK, 0x8169, quirk_broken_intx_masking); /* * Intel i40e (XL710/X710) 10/20/40GbE NICs all have broken INTx masking, * DisINTx can be set but the interrupt status bit is non-functional. */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1572, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1574, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1580, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1581, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1583, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1584, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1585, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1586, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1587, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, quirk_broken_intx_masking); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d2, quirk_broken_intx_masking); static u16 mellanox_broken_intx_devs[] = { PCI_DEVICE_ID_MELLANOX_HERMON_SDR, PCI_DEVICE_ID_MELLANOX_HERMON_DDR, PCI_DEVICE_ID_MELLANOX_HERMON_QDR, PCI_DEVICE_ID_MELLANOX_HERMON_DDR_GEN2, PCI_DEVICE_ID_MELLANOX_HERMON_QDR_GEN2, PCI_DEVICE_ID_MELLANOX_HERMON_EN, PCI_DEVICE_ID_MELLANOX_HERMON_EN_GEN2, PCI_DEVICE_ID_MELLANOX_CONNECTX_EN, PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_T_GEN2, PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_GEN2, PCI_DEVICE_ID_MELLANOX_CONNECTX_EN_5_GEN2, PCI_DEVICE_ID_MELLANOX_CONNECTX2, PCI_DEVICE_ID_MELLANOX_CONNECTX3, PCI_DEVICE_ID_MELLANOX_CONNECTX3_PRO, }; #define CONNECTX_4_CURR_MAX_MINOR 99 #define CONNECTX_4_INTX_SUPPORT_MINOR 14 /* * Check ConnectX-4/LX FW version to see if it supports legacy interrupts. * If so, don't mark it as broken. * FW minor > 99 means older FW version format and no INTx masking support. * FW minor < 14 means new FW version format and no INTx masking support. */ static void mellanox_check_broken_intx_masking(struct pci_dev *pdev) { __be32 __iomem *fw_ver; u16 fw_major; u16 fw_minor; u16 fw_subminor; u32 fw_maj_min; u32 fw_sub_min; int i; for (i = 0; i < ARRAY_SIZE(mellanox_broken_intx_devs); i++) { if (pdev->device == mellanox_broken_intx_devs[i]) { pdev->broken_intx_masking = 1; return; } } /* * Getting here means Connect-IB cards and up. Connect-IB has no INTx * support so shouldn't be checked further */ if (pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTIB) return; if (pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4 && pdev->device != PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) return; /* For ConnectX-4 and ConnectX-4LX, need to check FW support */ if (pci_enable_device_mem(pdev)) { pci_warn(pdev, "Can't enable device memory\n"); return; } fw_ver = ioremap(pci_resource_start(pdev, 0), 4); if (!fw_ver) { pci_warn(pdev, "Can't map ConnectX-4 initialization segment\n"); goto out; } /* Reading from resource space should be 32b aligned */ fw_maj_min = ioread32be(fw_ver); fw_sub_min = ioread32be(fw_ver + 1); fw_major = fw_maj_min & 0xffff; fw_minor = fw_maj_min >> 16; fw_subminor = fw_sub_min & 0xffff; if (fw_minor > CONNECTX_4_CURR_MAX_MINOR || fw_minor < CONNECTX_4_INTX_SUPPORT_MINOR) { pci_warn(pdev, "ConnectX-4: FW %u.%u.%u doesn't support INTx masking, disabling. Please upgrade FW to %d.14.1100 and up for INTx support\n", fw_major, fw_minor, fw_subminor, pdev->device == PCI_DEVICE_ID_MELLANOX_CONNECTX4 ? 12 : 14); pdev->broken_intx_masking = 1; } iounmap(fw_ver); out: pci_disable_device(pdev); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID, mellanox_check_broken_intx_masking); static void quirk_no_bus_reset(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; } /* * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be * prevented for those affected devices. */ static void quirk_nvidia_no_bus_reset(struct pci_dev *dev) { if ((dev->device & 0xffc0) == 0x2340) quirk_no_bus_reset(dev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, quirk_nvidia_no_bus_reset); /* * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset. * The device will throw a Link Down error on AER-capable systems and * regardless of AER, config space of the device is never accessible again * and typically causes the system to hang or reset when access is attempted. * https://lore.kernel.org/r/[email protected]/ */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003e, quirk_no_bus_reset); /* * Root port on some Cavium CN8xxx chips do not successfully complete a bus * reset when used with certain child devices. After the reset, config * accesses to the child may fail. */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CAVIUM, 0xa100, quirk_no_bus_reset); /* * Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS * automatically disables LTSSM when Secondary Bus Reset is received and * the device stops working. Prevent bus reset for these devices. With * this change, the device can be assigned to VMs with VFIO, but it will * leak state between VMs. Reference * https://e2e.ti.com/support/processors/f/791/t/954382 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset); static void quirk_no_pm_reset(struct pci_dev *dev) { /* * We can't do a bus reset on root bus devices, but an ineffective * PM reset may be better than nothing. */ if (!pci_is_root_bus(dev->bus)) dev->dev_flags |= PCI_DEV_FLAGS_NO_PM_RESET; } /* * Some AMD/ATI GPUS (HD8570 - Oland) report that a D3hot->D0 transition * causes a reset (i.e., they advertise NoSoftRst-). This transition seems * to have no effect on the device: it retains the framebuffer contents and * monitor sync. Advertising this support makes other layers, like VFIO, * assume pci_reset_function() is viable for this device. Mark it as * unavailable to skip it when testing reset methods. */ DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, 8, quirk_no_pm_reset); /* * Thunderbolt controllers with broken MSI hotplug signaling: * Entire 1st generation (Light Ridge, Eagle Ridge, Light Peak) and part * of the 2nd generation (Cactus Ridge 4C up to revision 1, Port Ridge). */ static void quirk_thunderbolt_hotplug_msi(struct pci_dev *pdev) { if (pdev->is_hotplug_bridge && (pdev->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C || pdev->revision <= 1)) pdev->no_msi = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, quirk_thunderbolt_hotplug_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EAGLE_RIDGE, quirk_thunderbolt_hotplug_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LIGHT_PEAK, quirk_thunderbolt_hotplug_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, quirk_thunderbolt_hotplug_msi); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE, quirk_thunderbolt_hotplug_msi); #ifdef CONFIG_ACPI /* * Apple: Shutdown Cactus Ridge Thunderbolt controller. * * On Apple hardware the Cactus Ridge Thunderbolt controller needs to be * shutdown before suspend. Otherwise the native host interface (NHI) will not * be present after resume if a device was plugged in before suspend. * * The Thunderbolt controller consists of a PCIe switch with downstream * bridges leading to the NHI and to the tunnel PCI bridges. * * This quirk cuts power to the whole chip. Therefore we have to apply it * during suspend_noirq of the upstream bridge. * * Power is automagically restored before resume. No action is needed. */ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev) { acpi_handle bridge, SXIO, SXFP, SXLV; if (!x86_apple_machine) return; if (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM) return; /* * SXIO/SXFP/SXLF turns off power to the Thunderbolt controller. * We don't know how to turn it back on again, but firmware does, * so we can only use SXIO/SXFP/SXLF if we're suspending via * firmware. */ if (!pm_suspend_via_firmware()) return; bridge = ACPI_HANDLE(&dev->dev); if (!bridge) return; /* * SXIO and SXLV are present only on machines requiring this quirk. * Thunderbolt bridges in external devices might have the same * device ID as those on the host, but they will not have the * associated ACPI methods. This implicitly checks that we are at * the right bridge. */ if (ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXIO", &SXIO)) || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXFP", &SXFP)) || ACPI_FAILURE(acpi_get_handle(bridge, "DSB0.NHI0.SXLV", &SXLV))) return; pci_info(dev, "quirk: cutting power to Thunderbolt controller...\n"); /* magic sequence */ acpi_execute_simple_method(SXIO, NULL, 1); acpi_execute_simple_method(SXFP, NULL, 0); msleep(300); acpi_execute_simple_method(SXLV, NULL, 0); acpi_execute_simple_method(SXIO, NULL, 0); acpi_execute_simple_method(SXLV, NULL, 0); } DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, quirk_apple_poweroff_thunderbolt); #endif /* * Following are device-specific reset methods which can be used to * reset a single function if other methods (e.g. FLR, PM D0->D3) are * not available. */ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, bool probe) { /* * http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf * * The 82599 supports FLR on VFs, but FLR support is reported only * in the PF DEVCAP (sec 9.3.10.4), not in the VF DEVCAP (sec 9.5). * Thus we must call pcie_flr() directly without first checking if it is * supported. */ if (!probe) pcie_flr(dev); return 0; } #define SOUTH_CHICKEN2 0xc2004 #define PCH_PP_STATUS 0xc7200 #define PCH_PP_CONTROL 0xc7204 #define MSG_CTL 0x45010 #define NSDE_PWR_STATE 0xd0100 #define IGD_OPERATION_TIMEOUT 10000 /* set timeout 10 seconds */ static int reset_ivb_igd(struct pci_dev *dev, bool probe) { void __iomem *mmio_base; unsigned long timeout; u32 val; if (probe) return 0; mmio_base = pci_iomap(dev, 0, 0); if (!mmio_base) return -ENOMEM; iowrite32(0x00000002, mmio_base + MSG_CTL); /* * Clobbering SOUTH_CHICKEN2 register is fine only if the next * driver loaded sets the right bits. However, this's a reset and * the bits have been set by i915 previously, so we clobber * SOUTH_CHICKEN2 register directly here. */ iowrite32(0x00000005, mmio_base + SOUTH_CHICKEN2); val = ioread32(mmio_base + PCH_PP_CONTROL) & 0xfffffffe; iowrite32(val, mmio_base + PCH_PP_CONTROL); timeout = jiffies + msecs_to_jiffies(IGD_OPERATION_TIMEOUT); do { val = ioread32(mmio_base + PCH_PP_STATUS); if ((val & 0xb0000000) == 0) goto reset_complete; msleep(10); } while (time_before(jiffies, timeout)); pci_warn(dev, "timeout during reset\n"); reset_complete: iowrite32(0x00000002, mmio_base + NSDE_PWR_STATE); pci_iounmap(dev, mmio_base); return 0; } /* Device-specific reset method for Chelsio T4-based adapters */ static int reset_chelsio_generic_dev(struct pci_dev *dev, bool probe) { u16 old_command; u16 msix_flags; /* * If this isn't a Chelsio T4-based device, return -ENOTTY indicating * that we have no device-specific reset method. */ if ((dev->device & 0xf000) != 0x4000) return -ENOTTY; /* * If this is the "probe" phase, return 0 indicating that we can * reset this device. */ if (probe) return 0; /* * T4 can wedge if there are DMAs in flight within the chip and Bus * Master has been disabled. We need to have it on till the Function * Level Reset completes. (BUS_MASTER is disabled in * pci_reset_function()). */ pci_read_config_word(dev, PCI_COMMAND, &old_command); pci_write_config_word(dev, PCI_COMMAND, old_command | PCI_COMMAND_MASTER); /* * Perform the actual device function reset, saving and restoring * configuration information around the reset. */ pci_save_state(dev); /* * T4 also suffers a Head-Of-Line blocking problem if MSI-X interrupts * are disabled when an MSI-X interrupt message needs to be delivered. * So we briefly re-enable MSI-X interrupts for the duration of the * FLR. The pci_restore_state() below will restore the original * MSI-X state. */ pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags); if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0) pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, msix_flags | PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); pcie_flr(dev); /* * Restore the configuration information (BAR values, etc.) including * the original PCI Configuration Space Command word, and return * success. */ pci_restore_state(dev); pci_write_config_word(dev, PCI_COMMAND, old_command); return 0; } #define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed #define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156 #define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166 /* * The Samsung SM961/PM961 controller can sometimes enter a fatal state after * FLR where config space reads from the device return -1. We seem to be * able to avoid this condition if we disable the NVMe controller prior to * FLR. This quirk is generic for any NVMe class device requiring similar * assistance to quiesce the device prior to FLR. * * NVMe specification: https://nvmexpress.org/resources/specifications/ * Revision 1.0e: * Chapter 2: Required and optional PCI config registers * Chapter 3: NVMe control registers * Chapter 7.3: Reset behavior */ static int nvme_disable_and_flr(struct pci_dev *dev, bool probe) { void __iomem *bar; u16 cmd; u32 cfg; if (dev->class != PCI_CLASS_STORAGE_EXPRESS || pcie_reset_flr(dev, PCI_RESET_PROBE) || !pci_resource_start(dev, 0)) return -ENOTTY; if (probe) return 0; bar = pci_iomap(dev, 0, NVME_REG_CC + sizeof(cfg)); if (!bar) return -ENOTTY; pci_read_config_word(dev, PCI_COMMAND, &cmd); pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MEMORY); cfg = readl(bar + NVME_REG_CC); /* Disable controller if enabled */ if (cfg & NVME_CC_ENABLE) { u32 cap = readl(bar + NVME_REG_CAP); unsigned long timeout; /* * Per nvme_disable_ctrl() skip shutdown notification as it * could complete commands to the admin queue. We only intend * to quiesce the device before reset. */ cfg &= ~(NVME_CC_SHN_MASK | NVME_CC_ENABLE); writel(cfg, bar + NVME_REG_CC); /* * Some controllers require an additional delay here, see * NVME_QUIRK_DELAY_BEFORE_CHK_RDY. None of those are yet * supported by this quirk. */ /* Cap register provides max timeout in 500ms increments */ timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; for (;;) { u32 status = readl(bar + NVME_REG_CSTS); /* Ready status becomes zero on disable complete */ if (!(status & NVME_CSTS_RDY)) break; msleep(100); if (time_after(jiffies, timeout)) { pci_warn(dev, "Timeout waiting for NVMe ready status to clear after disable\n"); break; } } } pci_iounmap(dev, bar); pcie_flr(dev); return 0; } /* * Some NVMe controllers such as Intel DC P3700 and Solidigm P44 Pro will * timeout waiting for ready status to change after NVMe enable if the driver * starts interacting with the device too soon after FLR. A 250ms delay after * FLR has heuristically proven to produce reliably working results for device * assignment cases. */ static int delay_250ms_after_flr(struct pci_dev *dev, bool probe) { if (probe) return pcie_reset_flr(dev, PCI_RESET_PROBE); pcie_reset_flr(dev, PCI_RESET_DO_RESET); msleep(250); return 0; } #define PCI_DEVICE_ID_HINIC_VF 0x375E #define HINIC_VF_FLR_TYPE 0x1000 #define HINIC_VF_FLR_CAP_BIT (1UL << 30) #define HINIC_VF_OP 0xE80 #define HINIC_VF_FLR_PROC_BIT (1UL << 18) #define HINIC_OPERATION_TIMEOUT 15000 /* 15 seconds */ /* Device-specific reset method for Huawei Intelligent NIC virtual functions */ static int reset_hinic_vf_dev(struct pci_dev *pdev, bool probe) { unsigned long timeout; void __iomem *bar; u32 val; if (probe) return 0; bar = pci_iomap(pdev, 0, 0); if (!bar) return -ENOTTY; /* Get and check firmware capabilities */ val = ioread32be(bar + HINIC_VF_FLR_TYPE); if (!(val & HINIC_VF_FLR_CAP_BIT)) { pci_iounmap(pdev, bar); return -ENOTTY; } /* Set HINIC_VF_FLR_PROC_BIT for the start of FLR */ val = ioread32be(bar + HINIC_VF_OP); val = val | HINIC_VF_FLR_PROC_BIT; iowrite32be(val, bar + HINIC_VF_OP); pcie_flr(pdev); /* * The device must recapture its Bus and Device Numbers after FLR * in order generate Completions. Issue a config write to let the * device capture this information. */ pci_write_config_word(pdev, PCI_VENDOR_ID, 0); /* Firmware clears HINIC_VF_FLR_PROC_BIT when reset is complete */ timeout = jiffies + msecs_to_jiffies(HINIC_OPERATION_TIMEOUT); do { val = ioread32be(bar + HINIC_VF_OP); if (!(val & HINIC_VF_FLR_PROC_BIT)) goto reset_complete; msleep(20); } while (time_before(jiffies, timeout)); val = ioread32be(bar + HINIC_VF_OP); if (!(val & HINIC_VF_FLR_PROC_BIT)) goto reset_complete; pci_warn(pdev, "Reset dev timeout, FLR ack reg: %#010x\n", val); reset_complete: pci_iounmap(pdev, bar); return 0; } static const struct pci_dev_reset_methods pci_dev_reset_methods[] = { { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF, reset_intel_82599_sfp_virtfn }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M_VGA, reset_ivb_igd }, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IVB_M2_VGA, reset_ivb_igd }, { PCI_VENDOR_ID_SAMSUNG, 0xa804, nvme_disable_and_flr }, { PCI_VENDOR_ID_INTEL, 0x0953, delay_250ms_after_flr }, { PCI_VENDOR_ID_INTEL, 0x0a54, delay_250ms_after_flr }, { PCI_VENDOR_ID_SOLIDIGM, 0xf1ac, delay_250ms_after_flr }, { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, reset_chelsio_generic_dev }, { PCI_VENDOR_ID_HUAWEI, PCI_DEVICE_ID_HINIC_VF, reset_hinic_vf_dev }, { 0 } }; /* * These device-specific reset methods are here rather than in a driver * because when a host assigns a device to a guest VM, the host may need * to reset the device but probably doesn't have a driver for it. */ int pci_dev_specific_reset(struct pci_dev *dev, bool probe) { const struct pci_dev_reset_methods *i; for (i = pci_dev_reset_methods; i->reset; i++) { if ((i->vendor == dev->vendor || i->vendor == (u16)PCI_ANY_ID) && (i->device == dev->device || i->device == (u16)PCI_ANY_ID)) return i->reset(dev, probe); } return -ENOTTY; } static void quirk_dma_func0_alias(struct pci_dev *dev) { if (PCI_FUNC(dev->devfn) != 0) pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 0), 1); } /* * https://bugzilla.redhat.com/show_bug.cgi?id=605888 * * Some Ricoh devices use function 0 as the PCIe requester ID for DMA. */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe832, quirk_dma_func0_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias); static void quirk_dma_func1_alias(struct pci_dev *dev) { if (PCI_FUNC(dev->devfn) != 1) pci_add_dma_alias(dev, PCI_DEVFN(PCI_SLOT(dev->devfn), 1), 1); } /* * Marvell 88SE9123 uses function 1 as the requester ID for DMA. In some * SKUs function 1 is present and is a legacy IDE controller, in other * SKUs this function is not present, making this a ghost requester. * https://bugzilla.kernel.org/show_bug.cgi?id=42679 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c136 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9125, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9170, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c47 + c57 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9172, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c59 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x917a, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c78 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9182, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c134 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9183, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c46 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x91a0, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c135 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9215, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c127 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9220, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c49 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9235, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, quirk_dma_func1_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645, quirk_dma_func1_alias); /* https://bugs.gentoo.org/show_bug.cgi?id=497630 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB388_ESD, quirk_dma_func1_alias); /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c117 */ DECLARE_PCI_FIXUP_HEADER(0x1c28, /* Lite-On */ 0x0122, /* Plextor M6E (Marvell 88SS9183)*/ quirk_dma_func1_alias); /* * Some devices DMA with the wrong devfn, not just the wrong function. * quirk_fixed_dma_alias() uses this table to create fixed aliases, where * the alias is "fixed" and independent of the device devfn. * * For example, the Adaptec 3405 is a PCIe card with an Intel 80333 I/O * processor. To software, this appears as a PCIe-to-PCI/X bridge with a * single device on the secondary bus. In reality, the single exposed * device at 0e.0 is the Address Translation Unit (ATU) of the controller * that provides a bridge to the internal bus of the I/O processor. The * controller supports private devices, which can be hidden from PCI config * space. In the case of the Adaptec 3405, a private device at 01.0 * appears to be the DMA engine, which therefore needs to become a DMA * alias for the device. */ static const struct pci_device_id fixed_dma_alias_tbl[] = { { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285, PCI_VENDOR_ID_ADAPTEC2, 0x02bb), /* Adaptec 3405 */ .driver_data = PCI_DEVFN(1, 0) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x0285, PCI_VENDOR_ID_ADAPTEC2, 0x02bc), /* Adaptec 3805 */ .driver_data = PCI_DEVFN(1, 0) }, { 0 } }; static void quirk_fixed_dma_alias(struct pci_dev *dev) { const struct pci_device_id *id; id = pci_match_id(fixed_dma_alias_tbl, dev); if (id) pci_add_dma_alias(dev, id->driver_data, 1); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ADAPTEC2, 0x0285, quirk_fixed_dma_alias); /* * A few PCIe-to-PCI bridges fail to expose a PCIe capability, resulting in * using the wrong DMA alias for the device. Some of these devices can be * used as either forward or reverse bridges, so we need to test whether the * device is operating in the correct mode. We could probably apply this * quirk to PCI_ANY_ID, but for now we'll just use known offenders. The test * is for a non-root, non-PCIe bridge where the upstream device is PCIe and * is not a PCIe-to-PCI bridge, then @pdev is actually a PCIe-to-PCI bridge. */ static void quirk_use_pcie_bridge_dma_alias(struct pci_dev *pdev) { if (!pci_is_root_bus(pdev->bus) && pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE && !pci_is_pcie(pdev) && pci_is_pcie(pdev->bus->self) && pci_pcie_type(pdev->bus->self) != PCI_EXP_TYPE_PCI_BRIDGE) pdev->dev_flags |= PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS; } /* ASM1083/1085, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c46 */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080, quirk_use_pcie_bridge_dma_alias); /* Tundra 8113, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c43 */ DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias); /* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias); /* ITE 8893 has the same problem as the 8892 */ DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8893, quirk_use_pcie_bridge_dma_alias); /* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */ DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias); /* * MIC x200 NTB forwards PCIe traffic using multiple alien RIDs. They have to * be added as aliases to the DMA device in order to allow buffer access * when IOMMU is enabled. Following devfns have to match RIT-LUT table * programmed in the EEPROM. */ static void quirk_mic_x200_dma_alias(struct pci_dev *pdev) { pci_add_dma_alias(pdev, PCI_DEVFN(0x10, 0x0), 1); pci_add_dma_alias(pdev, PCI_DEVFN(0x11, 0x0), 1); pci_add_dma_alias(pdev, PCI_DEVFN(0x12, 0x3), 1); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2260, quirk_mic_x200_dma_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2264, quirk_mic_x200_dma_alias); /* * Intel Visual Compute Accelerator (VCA) is a family of PCIe add-in devices * exposing computational units via Non Transparent Bridges (NTB, PEX 87xx). * * Similarly to MIC x200, we need to add DMA aliases to allow buffer access * when IOMMU is enabled. These aliases allow computational unit access to * host memory. These aliases mark the whole VCA device as one IOMMU * group. * * All possible slot numbers (0x20) are used, since we are unable to tell * what slot is used on other side. This quirk is intended for both host * and computational unit sides. The VCA devices have up to five functions * (four for DMA channels and one additional). */ static void quirk_pex_vca_alias(struct pci_dev *pdev) { const unsigned int num_pci_slots = 0x20; unsigned int slot; for (slot = 0; slot < num_pci_slots; slot++) pci_add_dma_alias(pdev, PCI_DEVFN(slot, 0x0), 5); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2954, quirk_pex_vca_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2955, quirk_pex_vca_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2956, quirk_pex_vca_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2958, quirk_pex_vca_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2959, quirk_pex_vca_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x295A, quirk_pex_vca_alias); /* * The IOMMU and interrupt controller on Broadcom Vulcan/Cavium ThunderX2 are * associated not at the root bus, but at a bridge below. This quirk avoids * generating invalid DMA aliases. */ static void quirk_bridge_cavm_thrx2_pcie_root(struct pci_dev *pdev) { pdev->dev_flags |= PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT; } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9000, quirk_bridge_cavm_thrx2_pcie_root); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, 0x9084, quirk_bridge_cavm_thrx2_pcie_root); /* * Intersil/Techwell TW686[4589]-based video capture cards have an empty (zero) * class code. Fix it. */ static void quirk_tw686x_class(struct pci_dev *pdev) { u32 class = pdev->class; /* Use "Multimedia controller" class */ pdev->class = (PCI_CLASS_MULTIMEDIA_OTHER << 8) | 0x01; pci_info(pdev, "TW686x PCI class overridden (%#08x -> %#08x)\n", class, pdev->class); } DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6864, PCI_CLASS_NOT_DEFINED, 8, quirk_tw686x_class); DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6865, PCI_CLASS_NOT_DEFINED, 8, quirk_tw686x_class); DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6868, PCI_CLASS_NOT_DEFINED, 8, quirk_tw686x_class); DECLARE_PCI_FIXUP_CLASS_EARLY(0x1797, 0x6869, PCI_CLASS_NOT_DEFINED, 8, quirk_tw686x_class); /* * Some devices have problems with Transaction Layer Packets with the Relaxed * Ordering Attribute set. Such devices should mark themselves and other * device drivers should check before sending TLPs with RO set. */ static void quirk_relaxedordering_disable(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_NO_RELAXED_ORDERING; pci_info(dev, "Disable Relaxed Ordering Attributes to avoid PCIe Completion erratum\n"); } /* * Intel Xeon processors based on Broadwell/Haswell microarchitecture Root * Complex have a Flow Control Credit issue which can cause performance * problems with Upstream Transaction Layer Packets with Relaxed Ordering set. */ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f01, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f02, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f03, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f04, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f05, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f06, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f07, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f08, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f09, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0a, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0b, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0c, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0d, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x6f0e, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f01, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f02, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f03, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f04, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f05, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f06, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f07, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f08, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f09, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0a, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0b, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0c, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0d, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, 0x2f0e, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); /* * The AMD ARM A1100 (aka "SEATTLE") SoC has a bug in its PCIe Root Complex * where Upstream Transaction Layer Packets with the Relaxed Ordering * Attribute clear are allowed to bypass earlier TLPs with Relaxed Ordering * set. This is a violation of the PCIe 3.0 Transaction Ordering Rules * outlined in Section 2.4.1 (PCI Express(r) Base Specification Revision 3.0 * November 10, 2010). As a result, on this platform we can't use Relaxed * Ordering for Upstream TLPs. */ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a00, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); /* * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same * values for the Attribute as were supplied in the header of the * corresponding Request, except as explicitly allowed when IDO is used." * * If a non-compliant device generates a completion with a different * attribute than the request, the receiver may accept it (which itself * seems non-compliant based on sec 2.3.2), or it may handle it as a * Malformed TLP or an Unexpected Completion, which will probably lead to a * device access timeout. * * If the non-compliant device generates completions with zero attributes * (instead of copying the attributes from the request), we can work around * this by disabling the "Relaxed Ordering" and "No Snoop" attributes in * upstream devices so they always generate requests with zero attributes. * * This affects other devices under the same Root Port, but since these * attributes are performance hints, there should be no functional problem. * * Note that Configuration Space accesses are never supposed to have TLP * Attributes, so we're safe waiting till after any Configuration Space * accesses to do the Root Port fixup. */ static void quirk_disable_root_port_attributes(struct pci_dev *pdev) { struct pci_dev *root_port = pcie_find_root_port(pdev); if (!root_port) { pci_warn(pdev, "PCIe Completion erratum may cause device errors\n"); return; } pci_info(root_port, "Disabling No Snoop/Relaxed Ordering Attributes to avoid PCIe Completion erratum in %s\n", dev_name(&pdev->dev)); pcie_capability_clear_and_set_word(root_port, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN, 0); } /* * The Chelsio T5 chip fails to copy TLP Attributes from a Request to the * Completion it generates. */ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev) { /* * This mask/compare operation selects for Physical Function 4 on a * T5. We only need to fix up the Root Port once for any of the * PFs. PF[0..3] have PCI Device IDs of 0x50xx, but PF4 is uniquely * 0x54xx so we use that one. */ if ((pdev->device & 0xff00) == 0x5400) quirk_disable_root_port_attributes(pdev); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, quirk_chelsio_T5_disable_root_port_attributes); /* * pci_acs_ctrl_enabled - compare desired ACS controls with those provided * by a device * @acs_ctrl_req: Bitmask of desired ACS controls * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by * the hardware design * * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included * in @acs_ctrl_ena, i.e., the device provides all the access controls the * caller desires. Return 0 otherwise. */ static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena) { if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req) return 1; return 0; } /* * AMD has indicated that the devices below do not support peer-to-peer * in any system where they are found in the southbridge with an AMD * IOMMU in the system. Multifunction devices that do not support * peer-to-peer between functions can claim to support a subset of ACS. * Such devices effectively enable request redirect (RR) and completion * redirect (CR) since all transactions are redirected to the upstream * root complex. * * https://lore.kernel.org/r/[email protected]/ * https://lore.kernel.org/r/[email protected]/ * https://lore.kernel.org/r/[email protected]/ * * 1002:4385 SBx00 SMBus Controller * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller * 1002:4383 SBx00 Azalia (Intel HDA) * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller * 1002:4384 SBx00 PCI to PCI Bridge * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller * * https://bugzilla.kernel.org/show_bug.cgi?id=81841#c15 * * 1022:780f [AMD] FCH PCI Bridge * 1022:7809 [AMD] FCH USB OHCI Controller */ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) { #ifdef CONFIG_ACPI struct acpi_table_header *header = NULL; acpi_status status; /* Targeting multifunction devices on the SB (appears on root bus) */ if (!dev->multifunction || !pci_is_root_bus(dev->bus)) return -ENODEV; /* The IVRS table describes the AMD IOMMU */ status = acpi_get_table("IVRS", 0, &header); if (ACPI_FAILURE(status)) return -ENODEV; acpi_put_table(header); /* Filter out flags not applicable to multifunction */ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR); #else return -ENODEV; #endif } static bool pci_quirk_cavium_acs_match(struct pci_dev *dev) { if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) return false; switch (dev->device) { /* * Effectively selects all downstream ports for whole ThunderX1 * (which represents 8 SoCs). */ case 0xa000 ... 0xa7ff: /* ThunderX1 */ case 0xaf84: /* ThunderX2 */ case 0xb884: /* ThunderX3 */ return true; default: return false; } } static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags) { if (!pci_quirk_cavium_acs_match(dev)) return -ENOTTY; /* * Cavium Root Ports don't advertise an ACS capability. However, * the RTL internally implements similar protection as if ACS had * Source Validation, Request Redirection, Completion Redirection, * and Upstream Forwarding features enabled. Assert that the * hardware implements and enables equivalent ACS functionality for * these flags. */ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags) { /* * X-Gene Root Ports matching this quirk do not allow peer-to-peer * transactions with others, allowing masking out these bits as if they * were unimplemented in the ACS capability. */ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } /* * Many Zhaoxin Root Ports and Switch Downstream Ports have no ACS capability. * But the implementation could block peer-to-peer transactions between them * and provide ACS-like functionality. */ static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags) { if (!pci_is_pcie(dev) || ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) && (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM))) return -ENOTTY; switch (dev->device) { case 0x0710 ... 0x071e: case 0x0721: case 0x0723 ... 0x0732: return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } return false; } /* * Many Intel PCH Root Ports do provide ACS-like features to disable peer * transactions and validate bus numbers in requests, but do not provide an * actual PCIe ACS capability. This is the list of device IDs known to fall * into that category as provided by Intel in Red Hat bugzilla 1037684. */ static const u16 pci_quirk_intel_pch_acs_ids[] = { /* Ibexpeak PCH */ 0x3b42, 0x3b43, 0x3b44, 0x3b45, 0x3b46, 0x3b47, 0x3b48, 0x3b49, 0x3b4a, 0x3b4b, 0x3b4c, 0x3b4d, 0x3b4e, 0x3b4f, 0x3b50, 0x3b51, /* Cougarpoint PCH */ 0x1c10, 0x1c11, 0x1c12, 0x1c13, 0x1c14, 0x1c15, 0x1c16, 0x1c17, 0x1c18, 0x1c19, 0x1c1a, 0x1c1b, 0x1c1c, 0x1c1d, 0x1c1e, 0x1c1f, /* Pantherpoint PCH */ 0x1e10, 0x1e11, 0x1e12, 0x1e13, 0x1e14, 0x1e15, 0x1e16, 0x1e17, 0x1e18, 0x1e19, 0x1e1a, 0x1e1b, 0x1e1c, 0x1e1d, 0x1e1e, 0x1e1f, /* Lynxpoint-H PCH */ 0x8c10, 0x8c11, 0x8c12, 0x8c13, 0x8c14, 0x8c15, 0x8c16, 0x8c17, 0x8c18, 0x8c19, 0x8c1a, 0x8c1b, 0x8c1c, 0x8c1d, 0x8c1e, 0x8c1f, /* Lynxpoint-LP PCH */ 0x9c10, 0x9c11, 0x9c12, 0x9c13, 0x9c14, 0x9c15, 0x9c16, 0x9c17, 0x9c18, 0x9c19, 0x9c1a, 0x9c1b, /* Wildcat PCH */ 0x9c90, 0x9c91, 0x9c92, 0x9c93, 0x9c94, 0x9c95, 0x9c96, 0x9c97, 0x9c98, 0x9c99, 0x9c9a, 0x9c9b, /* Patsburg (X79) PCH */ 0x1d10, 0x1d12, 0x1d14, 0x1d16, 0x1d18, 0x1d1a, 0x1d1c, 0x1d1e, /* Wellsburg (X99) PCH */ 0x8d10, 0x8d11, 0x8d12, 0x8d13, 0x8d14, 0x8d15, 0x8d16, 0x8d17, 0x8d18, 0x8d19, 0x8d1a, 0x8d1b, 0x8d1c, 0x8d1d, 0x8d1e, /* Lynx Point (9 series) PCH */ 0x8c90, 0x8c92, 0x8c94, 0x8c96, 0x8c98, 0x8c9a, 0x8c9c, 0x8c9e, }; static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev) { int i; /* Filter out a few obvious non-matches first */ if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) return false; for (i = 0; i < ARRAY_SIZE(pci_quirk_intel_pch_acs_ids); i++) if (pci_quirk_intel_pch_acs_ids[i] == dev->device) return true; return false; } static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags) { if (!pci_quirk_intel_pch_acs_match(dev)) return -ENOTTY; if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK) return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); return pci_acs_ctrl_enabled(acs_flags, 0); } /* * These QCOM Root Ports do provide ACS-like features to disable peer * transactions and validate bus numbers in requests, but do not provide an * actual PCIe ACS capability. Hardware supports source validation but it * will report the issue as Completer Abort instead of ACS Violation. * Hardware doesn't support peer-to-peer and each Root Port is a Root * Complex with unique segment numbers. It is not possible for one Root * Port to pass traffic to another Root Port. All PCIe transactions are * terminated inside the Root Port. */ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags) { return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } /* * Each of these NXP Root Ports is in a Root Complex with a unique segment * number and does provide isolation features to disable peer transactions * and validate bus numbers in requests, but does not provide an ACS * capability. */ static int pci_quirk_nxp_rp_acs(struct pci_dev *dev, u16 acs_flags) { return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags) { if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) return -ENOTTY; /* * Amazon's Annapurna Labs root ports don't include an ACS capability, * but do include ACS-like functionality. The hardware doesn't support * peer-to-peer transactions via the root port and each has a unique * segment number. * * Additionally, the root ports cannot send traffic to each other. */ acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); return acs_flags ? 0 : 1; } /* * Sunrise Point PCH root ports implement ACS, but unfortunately as shown in * the datasheet (Intel 100 Series Chipset Family PCH Datasheet, Vol. 2, * 12.1.46, 12.1.47)[1] this chipset uses dwords for the ACS capability and * control registers whereas the PCIe spec packs them into words (Rev 3.0, * 7.16 ACS Extended Capability). The bit definitions are correct, but the * control register is at offset 8 instead of 6 and we should probably use * dword accesses to them. This applies to the following PCI Device IDs, as * found in volume 1 of the datasheet[2]: * * 0xa110-0xa11f Sunrise Point-H PCI Express Root Port #{0-16} * 0xa167-0xa16a Sunrise Point-H PCI Express Root Port #{17-20} * * N.B. This doesn't fix what lspci shows. * * The 100 series chipset specification update includes this as errata #23[3]. * * The 200 series chipset (Union Point) has the same bug according to the * specification update (Intel 200 Series Chipset Family Platform Controller * Hub, Specification Update, January 2017, Revision 001, Document# 335194-001, * Errata 22)[4]. Per the datasheet[5], root port PCI Device IDs for this * chipset include: * * 0xa290-0xa29f PCI Express Root port #{0-16} * 0xa2e7-0xa2ee PCI Express Root port #{17-24} * * Mobile chipsets are also affected, 7th & 8th Generation * Specification update confirms ACS errata 22, status no fix: (7th Generation * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel * Processor Family I/O for U Quad Core Platforms Specification Update, * August 2017, Revision 002, Document#: 334660-002)[6] * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7] * * 0x9d10-0x9d1b PCI Express Root port #{1-12} * * [1] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html * [2] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html * [3] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html * [4] https://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html * [5] https://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html */ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev) { if (!pci_is_pcie(dev) || pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) return false; switch (dev->device) { case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */ case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */ case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */ return true; } return false; } #define INTEL_SPT_ACS_CTRL (PCI_ACS_CAP + 4) static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags) { int pos; u32 cap, ctrl; if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; pos = dev->acs_cap; if (!pos) return -ENOTTY; /* see pci_acs_flags_enabled() */ pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap); acs_flags &= (cap | PCI_ACS_EC); pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); return pci_acs_ctrl_enabled(acs_flags, ctrl); } static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags) { /* * SV, TB, and UF are not relevant to multifunction endpoints. * * Multifunction devices are only required to implement RR, CR, and DT * in their ACS capability if they support peer-to-peer transactions. * Devices matching this quirk have been verified by the vendor to not * perform peer-to-peer with other functions, allowing us to mask out * these bits as if they were unimplemented in the ACS capability. */ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT); } static int pci_quirk_rciep_acs(struct pci_dev *dev, u16 acs_flags) { /* * Intel RCiEP's are required to allow p2p only on translated * addresses. Refer to Intel VT-d specification, r3.1, sec 3.16, * "Root-Complex Peer to Peer Considerations". */ if (pci_pcie_type(dev) != PCI_EXP_TYPE_RC_END) return -ENOTTY; return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags) { /* * iProc PAXB Root Ports don't advertise an ACS capability, but * they do not allow peer-to-peer transactions between Root Ports. * Allow each Root Port to be in a separate IOMMU group by masking * SV/RR/CR/UF bits. */ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } /* * Wangxun 10G/1G NICs have no ACS capability, and on multi-function * devices, peer-to-peer transactions are not be used between the functions. * So add an ACS quirk for below devices to isolate functions. * SFxxx 1G NICs(em). * RP1000/RP2000 10G NICs(sp). */ static int pci_quirk_wangxun_nic_acs(struct pci_dev *dev, u16 acs_flags) { switch (dev->device) { case 0x0100 ... 0x010F: case 0x1001: case 0x2001: return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF); } return false; } static const struct pci_dev_acs_enabled { u16 vendor; u16 device; int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags); } pci_dev_acs_enabled[] = { { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_AMD, 0x780f, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_AMD, 0x7809, pci_quirk_amd_sb_acs }, { PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_SOLARFLARE, 0x0A03, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs }, /* 82580 */ { PCI_VENDOR_ID_INTEL, 0x1509, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x150E, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x150F, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1510, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1511, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1516, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1527, pci_quirk_mf_endpoint_acs }, /* 82576 */ { PCI_VENDOR_ID_INTEL, 0x10C9, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10E6, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10E7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10E8, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x150A, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x150D, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1518, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1526, pci_quirk_mf_endpoint_acs }, /* 82575 */ { PCI_VENDOR_ID_INTEL, 0x10A7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10A9, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10D6, pci_quirk_mf_endpoint_acs }, /* I350 */ { PCI_VENDOR_ID_INTEL, 0x1521, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1522, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1523, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1524, pci_quirk_mf_endpoint_acs }, /* 82571 (Quads omitted due to non-ACS switch) */ { PCI_VENDOR_ID_INTEL, 0x105E, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x105F, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x1060, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x10D9, pci_quirk_mf_endpoint_acs }, /* I219 */ { PCI_VENDOR_ID_INTEL, 0x15b7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, 0x15b8, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_rciep_acs }, /* QCOM QDF2xxx root ports */ { PCI_VENDOR_ID_QCOM, 0x0400, pci_quirk_qcom_rp_acs }, { PCI_VENDOR_ID_QCOM, 0x0401, pci_quirk_qcom_rp_acs }, /* HXT SD4800 root ports. The ACS design is same as QCOM QDF2xxx */ { PCI_VENDOR_ID_HXT, 0x0401, pci_quirk_qcom_rp_acs }, /* Intel PCH root ports */ { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs }, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_spt_pch_acs }, { 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */ { 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */ /* Cavium ThunderX */ { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs }, /* Cavium multi-function devices */ { PCI_VENDOR_ID_CAVIUM, 0xA026, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_CAVIUM, 0xA059, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_CAVIUM, 0xA060, pci_quirk_mf_endpoint_acs }, /* APM X-Gene */ { PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs }, /* Ampere Computing */ { PCI_VENDOR_ID_AMPERE, 0xE005, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE006, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE007, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE008, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE009, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, /* Broadcom multi-function device */ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1751, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1752, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs }, /* Amazon Annapurna Labs */ { PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs }, /* Zhaoxin multi-function devices */ { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs }, /* NXP root ports, xx=16, 12, or 08 cores */ /* LX2xx0A : without security features + CAN-FD */ { PCI_VENDOR_ID_NXP, 0x8d81, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8da1, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8d83, pci_quirk_nxp_rp_acs }, /* LX2xx0C : security features + CAN-FD */ { PCI_VENDOR_ID_NXP, 0x8d80, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8da0, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8d82, pci_quirk_nxp_rp_acs }, /* LX2xx0E : security features + CAN */ { PCI_VENDOR_ID_NXP, 0x8d90, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8db0, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8d92, pci_quirk_nxp_rp_acs }, /* LX2xx0N : without security features + CAN */ { PCI_VENDOR_ID_NXP, 0x8d91, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8db1, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8d93, pci_quirk_nxp_rp_acs }, /* LX2xx2A : without security features + CAN-FD */ { PCI_VENDOR_ID_NXP, 0x8d89, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8da9, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8d8b, pci_quirk_nxp_rp_acs }, /* LX2xx2C : security features + CAN-FD */ { PCI_VENDOR_ID_NXP, 0x8d88, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8da8, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8d8a, pci_quirk_nxp_rp_acs }, /* LX2xx2E : security features + CAN */ { PCI_VENDOR_ID_NXP, 0x8d98, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8db8, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8d9a, pci_quirk_nxp_rp_acs }, /* LX2xx2N : without security features + CAN */ { PCI_VENDOR_ID_NXP, 0x8d99, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8db9, pci_quirk_nxp_rp_acs }, { PCI_VENDOR_ID_NXP, 0x8d9b, pci_quirk_nxp_rp_acs }, /* Zhaoxin Root/Downstream Ports */ { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs }, /* Wangxun nics */ { PCI_VENDOR_ID_WANGXUN, PCI_ANY_ID, pci_quirk_wangxun_nic_acs }, { 0 } }; /* * pci_dev_specific_acs_enabled - check whether device provides ACS controls * @dev: PCI device * @acs_flags: Bitmask of desired ACS controls * * Returns: * -ENOTTY: No quirk applies to this device; we can't tell whether the * device provides the desired controls * 0: Device does not provide all the desired controls * >0: Device provides all the controls in @acs_flags */ int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags) { const struct pci_dev_acs_enabled *i; int ret; /* * Allow devices that do not expose standard PCIe ACS capabilities * or control to indicate their support here. Multi-function express * devices which do not allow internal peer-to-peer between functions, * but do not implement PCIe ACS may wish to return true here. */ for (i = pci_dev_acs_enabled; i->acs_enabled; i++) { if ((i->vendor == dev->vendor || i->vendor == (u16)PCI_ANY_ID) && (i->device == dev->device || i->device == (u16)PCI_ANY_ID)) { ret = i->acs_enabled(dev, acs_flags); if (ret >= 0) return ret; } } return -ENOTTY; } /* Config space offset of Root Complex Base Address register */ #define INTEL_LPC_RCBA_REG 0xf0 /* 31:14 RCBA address */ #define INTEL_LPC_RCBA_MASK 0xffffc000 /* RCBA Enable */ #define INTEL_LPC_RCBA_ENABLE (1 << 0) /* Backbone Scratch Pad Register */ #define INTEL_BSPR_REG 0x1104 /* Backbone Peer Non-Posted Disable */ #define INTEL_BSPR_REG_BPNPD (1 << 8) /* Backbone Peer Posted Disable */ #define INTEL_BSPR_REG_BPPD (1 << 9) /* Upstream Peer Decode Configuration Register */ #define INTEL_UPDCR_REG 0x1014 /* 5:0 Peer Decode Enable bits */ #define INTEL_UPDCR_REG_MASK 0x3f static int pci_quirk_enable_intel_lpc_acs(struct pci_dev *dev) { u32 rcba, bspr, updcr; void __iomem *rcba_mem; /* * Read the RCBA register from the LPC (D31:F0). PCH root ports * are D28:F* and therefore get probed before LPC, thus we can't * use pci_get_slot()/pci_read_config_dword() here. */ pci_bus_read_config_dword(dev->bus, PCI_DEVFN(31, 0), INTEL_LPC_RCBA_REG, &rcba); if (!(rcba & INTEL_LPC_RCBA_ENABLE)) return -EINVAL; rcba_mem = ioremap(rcba & INTEL_LPC_RCBA_MASK, PAGE_ALIGN(INTEL_UPDCR_REG)); if (!rcba_mem) return -ENOMEM; /* * The BSPR can disallow peer cycles, but it's set by soft strap and * therefore read-only. If both posted and non-posted peer cycles are * disallowed, we're ok. If either are allowed, then we need to use * the UPDCR to disable peer decodes for each port. This provides the * PCIe ACS equivalent of PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF */ bspr = readl(rcba_mem + INTEL_BSPR_REG); bspr &= INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD; if (bspr != (INTEL_BSPR_REG_BPNPD | INTEL_BSPR_REG_BPPD)) { updcr = readl(rcba_mem + INTEL_UPDCR_REG); if (updcr & INTEL_UPDCR_REG_MASK) { pci_info(dev, "Disabling UPDCR peer decodes\n"); updcr &= ~INTEL_UPDCR_REG_MASK; writel(updcr, rcba_mem + INTEL_UPDCR_REG); } } iounmap(rcba_mem); return 0; } /* Miscellaneous Port Configuration register */ #define INTEL_MPC_REG 0xd8 /* MPC: Invalid Receive Bus Number Check Enable */ #define INTEL_MPC_REG_IRBNCE (1 << 26) static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev) { u32 mpc; /* * When enabled, the IRBNCE bit of the MPC register enables the * equivalent of PCI ACS Source Validation (PCI_ACS_SV), which * ensures that requester IDs fall within the bus number range * of the bridge. Enable if not already. */ pci_read_config_dword(dev, INTEL_MPC_REG, &mpc); if (!(mpc & INTEL_MPC_REG_IRBNCE)) { pci_info(dev, "Enabling MPC IRBNCE\n"); mpc |= INTEL_MPC_REG_IRBNCE; pci_write_config_word(dev, INTEL_MPC_REG, mpc); } } /* * Currently this quirk does the equivalent of * PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF * * TODO: This quirk also needs to do equivalent of PCI_ACS_TB, * if dev->external_facing || dev->untrusted */ static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev) { if (!pci_quirk_intel_pch_acs_match(dev)) return -ENOTTY; if (pci_quirk_enable_intel_lpc_acs(dev)) { pci_warn(dev, "Failed to enable Intel PCH ACS quirk\n"); return 0; } pci_quirk_enable_intel_rp_mpc_acs(dev); dev->dev_flags |= PCI_DEV_FLAGS_ACS_ENABLED_QUIRK; pci_info(dev, "Intel PCH root port ACS workaround enabled\n"); return 0; } static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev) { int pos; u32 cap, ctrl; if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; pos = dev->acs_cap; if (!pos) return -ENOTTY; pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap); pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); ctrl |= (cap & PCI_ACS_SV); ctrl |= (cap & PCI_ACS_RR); ctrl |= (cap & PCI_ACS_CR); ctrl |= (cap & PCI_ACS_UF); if (pci_ats_disabled() || dev->external_facing || dev->untrusted) ctrl |= (cap & PCI_ACS_TB); pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl); pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n"); return 0; } static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev) { int pos; u32 cap, ctrl; if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; pos = dev->acs_cap; if (!pos) return -ENOTTY; pci_read_config_dword(dev, pos + PCI_ACS_CAP, &cap); pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl); ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC); pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl); pci_info(dev, "Intel SPT PCH root port workaround: disabled ACS redirect\n"); return 0; } static const struct pci_dev_acs_ops { u16 vendor; u16 device; int (*enable_acs)(struct pci_dev *dev); int (*disable_acs_redir)(struct pci_dev *dev); } pci_dev_acs_ops[] = { { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, .enable_acs = pci_quirk_enable_intel_pch_acs, }, { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, .enable_acs = pci_quirk_enable_intel_spt_pch_acs, .disable_acs_redir = pci_quirk_disable_intel_spt_pch_acs_redir, }, }; int pci_dev_specific_enable_acs(struct pci_dev *dev) { const struct pci_dev_acs_ops *p; int i, ret; for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) { p = &pci_dev_acs_ops[i]; if ((p->vendor == dev->vendor || p->vendor == (u16)PCI_ANY_ID) && (p->device == dev->device || p->device == (u16)PCI_ANY_ID) && p->enable_acs) { ret = p->enable_acs(dev); if (ret >= 0) return ret; } } return -ENOTTY; } int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) { const struct pci_dev_acs_ops *p; int i, ret; for (i = 0; i < ARRAY_SIZE(pci_dev_acs_ops); i++) { p = &pci_dev_acs_ops[i]; if ((p->vendor == dev->vendor || p->vendor == (u16)PCI_ANY_ID) && (p->device == dev->device || p->device == (u16)PCI_ANY_ID) && p->disable_acs_redir) { ret = p->disable_acs_redir(dev); if (ret >= 0) return ret; } } return -ENOTTY; } /* * The PCI capabilities list for Intel DH895xCC VFs (device ID 0x0443) with * QuickAssist Technology (QAT) is prematurely terminated in hardware. The * Next Capability pointer in the MSI Capability Structure should point to * the PCIe Capability Structure but is incorrectly hardwired as 0 terminating * the list. */ static void quirk_intel_qat_vf_cap(struct pci_dev *pdev) { int pos, i = 0; u8 next_cap; u16 reg16, *cap; struct pci_cap_saved_state *state; /* Bail if the hardware bug is fixed */ if (pdev->pcie_cap || pci_find_capability(pdev, PCI_CAP_ID_EXP)) return; /* Bail if MSI Capability Structure is not found for some reason */ pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); if (!pos) return; /* * Bail if Next Capability pointer in the MSI Capability Structure * is not the expected incorrect 0x00. */ pci_read_config_byte(pdev, pos + 1, &next_cap); if (next_cap) return; /* * PCIe Capability Structure is expected to be at 0x50 and should * terminate the list (Next Capability pointer is 0x00). Verify * Capability Id and Next Capability pointer is as expected. * Open-code some of set_pcie_port_type() and pci_cfg_space_size_ext() * to correctly set kernel data structures which have already been * set incorrectly due to the hardware bug. */ pos = 0x50; pci_read_config_word(pdev, pos, &reg16); if (reg16 == (0x0000 | PCI_CAP_ID_EXP)) { u32 status; #ifndef PCI_EXP_SAVE_REGS #define PCI_EXP_SAVE_REGS 7 #endif int size = PCI_EXP_SAVE_REGS * sizeof(u16); pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); pdev->pcie_flags_reg = reg16; pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16); pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD; pdev->cfg_size = PCI_CFG_SPACE_EXP_SIZE; if (pci_read_config_dword(pdev, PCI_CFG_SPACE_SIZE, &status) != PCIBIOS_SUCCESSFUL || (status == 0xffffffff)) pdev->cfg_size = PCI_CFG_SPACE_SIZE; if (pci_find_saved_cap(pdev, PCI_CAP_ID_EXP)) return; /* Save PCIe cap */ state = kzalloc(sizeof(*state) + size, GFP_KERNEL); if (!state) return; state->cap.cap_nr = PCI_CAP_ID_EXP; state->cap.cap_extended = 0; state->cap.size = size; cap = (u16 *)&state->cap.data[0]; pcie_capability_read_word(pdev, PCI_EXP_DEVCTL, &cap[i++]); pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &cap[i++]); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &cap[i++]); pcie_capability_read_word(pdev, PCI_EXP_RTCTL, &cap[i++]); pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &cap[i++]); pcie_capability_read_word(pdev, PCI_EXP_LNKCTL2, &cap[i++]); pcie_capability_read_word(pdev, PCI_EXP_SLTCTL2, &cap[i++]); hlist_add_head(&state->next, &pdev->saved_cap_space); } } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap); /* * FLR may cause the following to devices to hang: * * AMD Starship/Matisse HD Audio Controller 0x1487 * AMD Starship USB 3.0 Host Controller 0x148c * AMD Matisse USB 3.0 Host Controller 0x149c * Intel 82579LM Gigabit Ethernet Controller 0x1502 * Intel 82579V Gigabit Ethernet Controller 0x1503 * */ static void quirk_no_flr(struct pci_dev *dev) { dev->dev_flags |= PCI_DEV_FLAGS_NO_FLR_RESET; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1487, quirk_no_flr); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x148c, quirk_no_flr); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr); /* FLR may cause the SolidRun SNET DPU (rev 0x1) to hang */ static void quirk_no_flr_snet(struct pci_dev *dev) { if (dev->revision == 0x1) quirk_no_flr(dev); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SOLIDRUN, 0x1000, quirk_no_flr_snet); static void quirk_no_ext_tags(struct pci_dev *pdev) { struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); if (!bridge) return; bridge->no_ext_tags = 1; pci_info(pdev, "disabling Extended Tags (this device can't handle them)\n"); pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL); } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); #ifdef CONFIG_PCI_ATS /* * Some devices require additional driver setup to enable ATS. Don't use * ATS for those devices as ATS will be enabled before the driver has had a * chance to load and configure the device. */ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { if (pdev->device == 0x15d8) { if (pdev->revision == 0xcf && pdev->subsystem_vendor == 0xea50 && (pdev->subsystem_device == 0xce19 || pdev->subsystem_device == 0xcc10 || pdev->subsystem_device == 0xcc08)) goto no_ats; else return; } no_ats: pci_info(pdev, "disabling ATS\n"); pdev->ats_cap = 0; } /* AMD Stoney platform GPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); /* AMD Iceland dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); /* AMD Navi10 dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7310, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7318, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7319, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731a, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731b, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731e, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x731f, quirk_amd_harvest_no_ats); /* AMD Navi14 dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7341, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7347, quirk_amd_harvest_no_ats); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x734f, quirk_amd_harvest_no_ats); /* AMD Raven platform iGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x15d8, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ /* Freescale PCIe doesn't support MSI in RC mode */ static void quirk_fsl_no_msi(struct pci_dev *pdev) { if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) pdev->no_msi = 1; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi); /* * Although not allowed by the spec, some multi-function devices have * dependencies of one function (consumer) on another (supplier). For the * consumer to work in D0, the supplier must also be in D0. Create a * device link from the consumer to the supplier to enforce this * dependency. Runtime PM is allowed by default on the consumer to prevent * it from permanently keeping the supplier awake. */ static void pci_create_device_link(struct pci_dev *pdev, unsigned int consumer, unsigned int supplier, unsigned int class, unsigned int class_shift) { struct pci_dev *supplier_pdev; if (PCI_FUNC(pdev->devfn) != consumer) return; supplier_pdev = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), pdev->bus->number, PCI_DEVFN(PCI_SLOT(pdev->devfn), supplier)); if (!supplier_pdev || (supplier_pdev->class >> class_shift) != class) { pci_dev_put(supplier_pdev); return; } if (device_link_add(&pdev->dev, &supplier_pdev->dev, DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME)) pci_info(pdev, "D0 power state depends on %s\n", pci_name(supplier_pdev)); else pci_err(pdev, "Cannot enforce power dependency on %s\n", pci_name(supplier_pdev)); pm_runtime_allow(&pdev->dev); pci_dev_put(supplier_pdev); } /* * Create device link for GPUs with integrated HDA controller for streaming * audio to attached displays. */ static void quirk_gpu_hda(struct pci_dev *hda) { pci_create_device_link(hda, 1, 0, PCI_BASE_CLASS_DISPLAY, 16); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda); /* * Create device link for GPUs with integrated USB xHCI Host * controller to VGA. */ static void quirk_gpu_usb(struct pci_dev *usb) { pci_create_device_link(usb, 2, 0, PCI_BASE_CLASS_DISPLAY, 16); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_CLASS_SERIAL_USB, 8, quirk_gpu_usb); /* * Create device link for GPUs with integrated Type-C UCSI controller * to VGA. Currently there is no class code defined for UCSI device over PCI * so using UNKNOWN class for now and it will be updated when UCSI * over PCI gets a class code. */ #define PCI_CLASS_SERIAL_UNKNOWN 0x0c80 static void quirk_gpu_usb_typec_ucsi(struct pci_dev *ucsi) { pci_create_device_link(ucsi, 3, 0, PCI_BASE_CLASS_DISPLAY, 16); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_CLASS_SERIAL_UNKNOWN, 8, quirk_gpu_usb_typec_ucsi); DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID, PCI_CLASS_SERIAL_UNKNOWN, 8, quirk_gpu_usb_typec_ucsi); /* * Enable the NVIDIA GPU integrated HDA controller if the BIOS left it * disabled. https://devtalk.nvidia.com/default/topic/1024022 */ static void quirk_nvidia_hda(struct pci_dev *gpu) { u8 hdr_type; u32 val; /* There was no integrated HDA controller before MCP89 */ if (gpu->device < PCI_DEVICE_ID_NVIDIA_GEFORCE_320M) return; /* Bit 25 at offset 0x488 enables the HDA controller */ pci_read_config_dword(gpu, 0x488, &val); if (val & BIT(25)) return; pci_info(gpu, "Enabling HDA controller\n"); pci_write_config_dword(gpu, 0x488, val | BIT(25)); /* The GPU becomes a multi-function device when the HDA is enabled */ pci_read_config_byte(gpu, PCI_HEADER_TYPE, &hdr_type); gpu->multifunction = !!(hdr_type & 0x80); } DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda); DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, 16, quirk_nvidia_hda); /* * Some IDT switches incorrectly flag an ACS Source Validation error on * completions for config read requests even though PCIe r4.0, sec * 6.12.1.1, says that completions are never affected by ACS Source * Validation. Here's the text of IDT 89H32H8G3-YC, erratum #36: * * Item #36 - Downstream port applies ACS Source Validation to Completions * Section 6.12.1.1 of the PCI Express Base Specification 3.1 states that * completions are never affected by ACS Source Validation. However, * completions received by a downstream port of the PCIe switch from a * device that has not yet captured a PCIe bus number are incorrectly * dropped by ACS Source Validation by the switch downstream port. * * The workaround suggested by IDT is to issue a config write to the * downstream device before issuing the first config read. This allows the * downstream device to capture its bus and device numbers (see PCIe r4.0, * sec 2.2.9), thus avoiding the ACS error on the completion. * * However, we don't know when the device is ready to accept the config * write, so we do config reads until we receive a non-Config Request Retry * Status, then do the config write. * * To avoid hitting the erratum when doing the config reads, we disable ACS * SV around this process. */ int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout) { int pos; u16 ctrl = 0; bool found; struct pci_dev *bridge = bus->self; pos = bridge->acs_cap; /* Disable ACS SV before initial config reads */ if (pos) { pci_read_config_word(bridge, pos + PCI_ACS_CTRL, &ctrl); if (ctrl & PCI_ACS_SV) pci_write_config_word(bridge, pos + PCI_ACS_CTRL, ctrl & ~PCI_ACS_SV); } found = pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout); /* Write Vendor ID (read-only) so the endpoint latches its bus/dev */ if (found) pci_bus_write_config_word(bus, devfn, PCI_VENDOR_ID, 0); /* Re-enable ACS_SV if it was previously enabled */ if (ctrl & PCI_ACS_SV) pci_write_config_word(bridge, pos + PCI_ACS_CTRL, ctrl); return found; } /* * Microsemi Switchtec NTB uses devfn proxy IDs to move TLPs between * NT endpoints via the internal switch fabric. These IDs replace the * originating Requester ID TLPs which access host memory on peer NTB * ports. Therefore, all proxy IDs must be aliased to the NTB device * to permit access when the IOMMU is turned on. */ static void quirk_switchtec_ntb_dma_alias(struct pci_dev *pdev) { void __iomem *mmio; struct ntb_info_regs __iomem *mmio_ntb; struct ntb_ctrl_regs __iomem *mmio_ctrl; u64 partition_map; u8 partition; int pp; if (pci_enable_device(pdev)) { pci_err(pdev, "Cannot enable Switchtec device\n"); return; } mmio = pci_iomap(pdev, 0, 0); if (mmio == NULL) { pci_disable_device(pdev); pci_err(pdev, "Cannot iomap Switchtec device\n"); return; } pci_info(pdev, "Setting Switchtec proxy ID aliases\n"); mmio_ntb = mmio + SWITCHTEC_GAS_NTB_OFFSET; mmio_ctrl = (void __iomem *) mmio_ntb + SWITCHTEC_NTB_REG_CTRL_OFFSET; partition = ioread8(&mmio_ntb->partition_id); partition_map = ioread32(&mmio_ntb->ep_map); partition_map |= ((u64) ioread32(&mmio_ntb->ep_map + 4)) << 32; partition_map &= ~(1ULL << partition); for (pp = 0; pp < (sizeof(partition_map) * 8); pp++) { struct ntb_ctrl_regs __iomem *mmio_peer_ctrl; u32 table_sz = 0; int te; if (!(partition_map & (1ULL << pp))) continue; pci_dbg(pdev, "Processing partition %d\n", pp); mmio_peer_ctrl = &mmio_ctrl[pp]; table_sz = ioread16(&mmio_peer_ctrl->req_id_table_size); if (!table_sz) { pci_warn(pdev, "Partition %d table_sz 0\n", pp); continue; } if (table_sz > 512) { pci_warn(pdev, "Invalid Switchtec partition %d table_sz %d\n", pp, table_sz); continue; } for (te = 0; te < table_sz; te++) { u32 rid_entry; u8 devfn; rid_entry = ioread32(&mmio_peer_ctrl->req_id_table[te]); devfn = (rid_entry >> 1) & 0xFF; pci_dbg(pdev, "Aliasing Partition %d Proxy ID %02x.%d\n", pp, PCI_SLOT(devfn), PCI_FUNC(devfn)); pci_add_dma_alias(pdev, devfn, 1); } } pci_iounmap(pdev, mmio); pci_disable_device(pdev); } #define SWITCHTEC_QUIRK(vid) \ DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_MICROSEMI, vid, \ PCI_CLASS_BRIDGE_OTHER, 8, quirk_switchtec_ntb_dma_alias) SWITCHTEC_QUIRK(0x8531); /* PFX 24xG3 */ SWITCHTEC_QUIRK(0x8532); /* PFX 32xG3 */ SWITCHTEC_QUIRK(0x8533); /* PFX 48xG3 */ SWITCHTEC_QUIRK(0x8534); /* PFX 64xG3 */ SWITCHTEC_QUIRK(0x8535); /* PFX 80xG3 */ SWITCHTEC_QUIRK(0x8536); /* PFX 96xG3 */ SWITCHTEC_QUIRK(0x8541); /* PSX 24xG3 */ SWITCHTEC_QUIRK(0x8542); /* PSX 32xG3 */ SWITCHTEC_QUIRK(0x8543); /* PSX 48xG3 */ SWITCHTEC_QUIRK(0x8544); /* PSX 64xG3 */ SWITCHTEC_QUIRK(0x8545); /* PSX 80xG3 */ SWITCHTEC_QUIRK(0x8546); /* PSX 96xG3 */ SWITCHTEC_QUIRK(0x8551); /* PAX 24XG3 */ SWITCHTEC_QUIRK(0x8552); /* PAX 32XG3 */ SWITCHTEC_QUIRK(0x8553); /* PAX 48XG3 */ SWITCHTEC_QUIRK(0x8554); /* PAX 64XG3 */ SWITCHTEC_QUIRK(0x8555); /* PAX 80XG3 */ SWITCHTEC_QUIRK(0x8556); /* PAX 96XG3 */ SWITCHTEC_QUIRK(0x8561); /* PFXL 24XG3 */ SWITCHTEC_QUIRK(0x8562); /* PFXL 32XG3 */ SWITCHTEC_QUIRK(0x8563); /* PFXL 48XG3 */ SWITCHTEC_QUIRK(0x8564); /* PFXL 64XG3 */ SWITCHTEC_QUIRK(0x8565); /* PFXL 80XG3 */ SWITCHTEC_QUIRK(0x8566); /* PFXL 96XG3 */ SWITCHTEC_QUIRK(0x8571); /* PFXI 24XG3 */ SWITCHTEC_QUIRK(0x8572); /* PFXI 32XG3 */ SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */ SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */ SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */ SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */ SWITCHTEC_QUIRK(0x4000); /* PFX 100XG4 */ SWITCHTEC_QUIRK(0x4084); /* PFX 84XG4 */ SWITCHTEC_QUIRK(0x4068); /* PFX 68XG4 */ SWITCHTEC_QUIRK(0x4052); /* PFX 52XG4 */ SWITCHTEC_QUIRK(0x4036); /* PFX 36XG4 */ SWITCHTEC_QUIRK(0x4028); /* PFX 28XG4 */ SWITCHTEC_QUIRK(0x4100); /* PSX 100XG4 */ SWITCHTEC_QUIRK(0x4184); /* PSX 84XG4 */ SWITCHTEC_QUIRK(0x4168); /* PSX 68XG4 */ SWITCHTEC_QUIRK(0x4152); /* PSX 52XG4 */ SWITCHTEC_QUIRK(0x4136); /* PSX 36XG4 */ SWITCHTEC_QUIRK(0x4128); /* PSX 28XG4 */ SWITCHTEC_QUIRK(0x4200); /* PAX 100XG4 */ SWITCHTEC_QUIRK(0x4284); /* PAX 84XG4 */ SWITCHTEC_QUIRK(0x4268); /* PAX 68XG4 */ SWITCHTEC_QUIRK(0x4252); /* PAX 52XG4 */ SWITCHTEC_QUIRK(0x4236); /* PAX 36XG4 */ SWITCHTEC_QUIRK(0x4228); /* PAX 28XG4 */ SWITCHTEC_QUIRK(0x4352); /* PFXA 52XG4 */ SWITCHTEC_QUIRK(0x4336); /* PFXA 36XG4 */ SWITCHTEC_QUIRK(0x4328); /* PFXA 28XG4 */ SWITCHTEC_QUIRK(0x4452); /* PSXA 52XG4 */ SWITCHTEC_QUIRK(0x4436); /* PSXA 36XG4 */ SWITCHTEC_QUIRK(0x4428); /* PSXA 28XG4 */ SWITCHTEC_QUIRK(0x4552); /* PAXA 52XG4 */ SWITCHTEC_QUIRK(0x4536); /* PAXA 36XG4 */ SWITCHTEC_QUIRK(0x4528); /* PAXA 28XG4 */ SWITCHTEC_QUIRK(0x5000); /* PFX 100XG5 */ SWITCHTEC_QUIRK(0x5084); /* PFX 84XG5 */ SWITCHTEC_QUIRK(0x5068); /* PFX 68XG5 */ SWITCHTEC_QUIRK(0x5052); /* PFX 52XG5 */ SWITCHTEC_QUIRK(0x5036); /* PFX 36XG5 */ SWITCHTEC_QUIRK(0x5028); /* PFX 28XG5 */ SWITCHTEC_QUIRK(0x5100); /* PSX 100XG5 */ SWITCHTEC_QUIRK(0x5184); /* PSX 84XG5 */ SWITCHTEC_QUIRK(0x5168); /* PSX 68XG5 */ SWITCHTEC_QUIRK(0x5152); /* PSX 52XG5 */ SWITCHTEC_QUIRK(0x5136); /* PSX 36XG5 */ SWITCHTEC_QUIRK(0x5128); /* PSX 28XG5 */ SWITCHTEC_QUIRK(0x5200); /* PAX 100XG5 */ SWITCHTEC_QUIRK(0x5284); /* PAX 84XG5 */ SWITCHTEC_QUIRK(0x5268); /* PAX 68XG5 */ SWITCHTEC_QUIRK(0x5252); /* PAX 52XG5 */ SWITCHTEC_QUIRK(0x5236); /* PAX 36XG5 */ SWITCHTEC_QUIRK(0x5228); /* PAX 28XG5 */ SWITCHTEC_QUIRK(0x5300); /* PFXA 100XG5 */ SWITCHTEC_QUIRK(0x5384); /* PFXA 84XG5 */ SWITCHTEC_QUIRK(0x5368); /* PFXA 68XG5 */ SWITCHTEC_QUIRK(0x5352); /* PFXA 52XG5 */ SWITCHTEC_QUIRK(0x5336); /* PFXA 36XG5 */ SWITCHTEC_QUIRK(0x5328); /* PFXA 28XG5 */ SWITCHTEC_QUIRK(0x5400); /* PSXA 100XG5 */ SWITCHTEC_QUIRK(0x5484); /* PSXA 84XG5 */ SWITCHTEC_QUIRK(0x5468); /* PSXA 68XG5 */ SWITCHTEC_QUIRK(0x5452); /* PSXA 52XG5 */ SWITCHTEC_QUIRK(0x5436); /* PSXA 36XG5 */ SWITCHTEC_QUIRK(0x5428); /* PSXA 28XG5 */ SWITCHTEC_QUIRK(0x5500); /* PAXA 100XG5 */ SWITCHTEC_QUIRK(0x5584); /* PAXA 84XG5 */ SWITCHTEC_QUIRK(0x5568); /* PAXA 68XG5 */ SWITCHTEC_QUIRK(0x5552); /* PAXA 52XG5 */ SWITCHTEC_QUIRK(0x5536); /* PAXA 36XG5 */ SWITCHTEC_QUIRK(0x5528); /* PAXA 28XG5 */ /* * The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints. * These IDs are used to forward responses to the originator on the other * side of the NTB. Alias all possible IDs to the NTB to permit access when * the IOMMU is turned on. */ static void quirk_plx_ntb_dma_alias(struct pci_dev *pdev) { pci_info(pdev, "Setting PLX NTB proxy ID aliases\n"); /* PLX NTB may use all 256 devfns */ pci_add_dma_alias(pdev, 0, 256); } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b0, quirk_plx_ntb_dma_alias); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PLX, 0x87b1, quirk_plx_ntb_dma_alias); /* * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does * not always reset the secondary Nvidia GPU between reboots if the system * is configured to use Hybrid Graphics mode. This results in the GPU * being left in whatever state it was in during the *previous* boot, which * causes spurious interrupts from the GPU, which in turn causes us to * disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly, * this also completely breaks nouveau. * * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a * clean state and fixes all these issues. * * When the machine is configured in Dedicated display mode, the issue * doesn't occur. Fortunately the GPU advertises NoReset+ when in this * mode, so we can detect that and avoid resetting it. */ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev) { void __iomem *map; int ret; if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO || pdev->subsystem_device != 0x222e || !pci_reset_supported(pdev)) return; if (pci_enable_device_mem(pdev)) return; /* * Based on nvkm_device_ctor() in * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c */ map = pci_iomap(pdev, 0, 0x23000); if (!map) { pci_err(pdev, "Can't map MMIO space\n"); goto out_disable; } /* * Make sure the GPU looks like it's been POSTed before resetting * it. */ if (ioread32(map + 0x2240c) & 0x2) { pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n"); ret = pci_reset_bus(pdev); if (ret < 0) pci_err(pdev, "Failed to reset GPU: %d\n", ret); } iounmap(map); out_disable: pci_disable_device(pdev); } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1, PCI_CLASS_DISPLAY_VGA, 8, quirk_reset_lenovo_thinkpad_p50_nvgpu); /* * Device [1b21:2142] * When in D0, PME# doesn't get asserted when plugging USB 3.0 device. */ static void pci_fixup_no_d0_pme(struct pci_dev *dev) { pci_info(dev, "PME# does not work under D0, disabling it\n"); dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT); } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme); /* * Device 12d8:0x400e [OHCI] and 12d8:0x400f [EHCI] * * These devices advertise PME# support in all power states but don't * reliably assert it. * * These devices also advertise MSI, but documentation (PI7C9X440SL.pdf) * says "The MSI Function is not implemented on this device" in chapters * 7.3.27, 7.3.29-7.3.31. */ static void pci_fixup_no_msi_no_pme(struct pci_dev *dev) { #ifdef CONFIG_PCI_MSI pci_info(dev, "MSI is not implemented on this device, disabling it\n"); dev->no_msi = 1; #endif pci_info(dev, "PME# is unreliable, disabling it\n"); dev->pme_support = 0; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400e, pci_fixup_no_msi_no_pme); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_PERICOM, 0x400f, pci_fixup_no_msi_no_pme); static void apex_pci_fixup_class(struct pci_dev *pdev) { pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class; } DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a, PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class); /* * Pericom PI7C9X2G404/PI7C9X2G304/PI7C9X2G303 switch erratum E5 - * ACS P2P Request Redirect is not functional * * When ACS P2P Request Redirect is enabled and bandwidth is not balanced * between upstream and downstream ports, packets are queued in an internal * buffer until CPLD packet. The workaround is to use the switch in store and * forward mode. */ #define PI7C9X2Gxxx_MODE_REG 0x74 #define PI7C9X2Gxxx_STORE_FORWARD_MODE BIT(0) static void pci_fixup_pericom_acs_store_forward(struct pci_dev *pdev) { struct pci_dev *upstream; u16 val; /* Downstream ports only */ if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM) return; /* Check for ACS P2P Request Redirect use */ if (!pdev->acs_cap) return; pci_read_config_word(pdev, pdev->acs_cap + PCI_ACS_CTRL, &val); if (!(val & PCI_ACS_RR)) return; upstream = pci_upstream_bridge(pdev); if (!upstream) return; pci_read_config_word(upstream, PI7C9X2Gxxx_MODE_REG, &val); if (!(val & PI7C9X2Gxxx_STORE_FORWARD_MODE)) { pci_info(upstream, "Setting PI7C9X2Gxxx store-forward mode to avoid ACS erratum\n"); pci_write_config_word(upstream, PI7C9X2Gxxx_MODE_REG, val | PI7C9X2Gxxx_STORE_FORWARD_MODE); } } /* * Apply fixup on enable and on resume, in order to apply the fix up whenever * ACS configuration changes or switch mode is reset */ DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2404, pci_fixup_pericom_acs_store_forward); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2404, pci_fixup_pericom_acs_store_forward); DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2304, pci_fixup_pericom_acs_store_forward); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2304, pci_fixup_pericom_acs_store_forward); DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_PERICOM, 0x2303, pci_fixup_pericom_acs_store_forward); DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_PERICOM, 0x2303, pci_fixup_pericom_acs_store_forward); static void nvidia_ion_ahci_fixup(struct pci_dev *pdev) { pdev->dev_flags |= PCI_DEV_FLAGS_HAS_MSI_MASKING; } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0ab8, nvidia_ion_ahci_fixup); static void rom_bar_overlap_defect(struct pci_dev *dev) { pci_info(dev, "working around ROM BAR overlap defect\n"); dev->rom_bar_overlap = 1; } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1533, rom_bar_overlap_defect); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1536, rom_bar_overlap_defect); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1537, rom_bar_overlap_defect); DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1538, rom_bar_overlap_defect); #ifdef CONFIG_PCIEASPM /* * Several Intel DG2 graphics devices advertise that they can only tolerate * 1us latency when transitioning from L1 to L0, which may prevent ASPM L1 * from being enabled. But in fact these devices can tolerate unlimited * latency. Override their Device Capabilities value to allow ASPM L1 to * be enabled. */ static void aspm_l1_acceptable_latency(struct pci_dev *dev) { u32 l1_lat = FIELD_GET(PCI_EXP_DEVCAP_L1, dev->devcap); if (l1_lat < 7) { dev->devcap |= FIELD_PREP(PCI_EXP_DEVCAP_L1, 7); pci_info(dev, "ASPM: overriding L1 acceptable latency from %#x to 0x7\n", l1_lat); } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f80, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f81, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f82, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f83, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f84, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f85, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f86, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f87, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x4f88, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5690, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5691, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5692, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5693, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5694, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x5695, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a0, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a1, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a2, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a3, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a4, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a5, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56a6, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56b0, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56b1, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c0, aspm_l1_acceptable_latency); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x56c1, aspm_l1_acceptable_latency); #endif #ifdef CONFIG_PCIE_DPC /* * Intel Ice Lake, Tiger Lake and Alder Lake BIOS has a bug that clears * the DPC RP PIO Log Size of the integrated Thunderbolt PCIe Root * Ports. */ static void dpc_log_size(struct pci_dev *dev) { u16 dpc, val; dpc = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC); if (!dpc) return; pci_read_config_word(dev, dpc + PCI_EXP_DPC_CAP, &val); if (!(val & PCI_EXP_DPC_CAP_RP_EXT)) return; if (!((val & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8)) { pci_info(dev, "Overriding RP PIO Log Size to 4\n"); dev->dpc_rp_log_size = 4; } } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x461f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x462f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x463f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x466e, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a1d, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a1f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a21, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x8a23, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a23, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a25, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a27, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a29, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2b, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2d, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a2f, dpc_log_size); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x9a31, dpc_log_size); #endif /* * For a PCI device with multiple downstream devices, its driver may use * a flattened device tree to describe the downstream devices. * To overlay the flattened device tree, the PCI device and all its ancestor * devices need to have device tree nodes on system base device tree. Thus, * before driver probing, it might need to add a device tree node as the final * fixup. */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5020, of_pci_make_dev_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_XILINX, 0x5021, of_pci_make_dev_node); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_REDHAT, 0x0005, of_pci_make_dev_node);
linux-master
drivers/pci/quirks.c
// SPDX-License-Identifier: GPL-2.0 /* * Host bridge related code */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/module.h> #include "pci.h" static struct pci_bus *find_pci_root_bus(struct pci_bus *bus) { while (bus->parent) bus = bus->parent; return bus; } struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus) { struct pci_bus *root_bus = find_pci_root_bus(bus); return to_pci_host_bridge(root_bus->bridge); } EXPORT_SYMBOL_GPL(pci_find_host_bridge); struct device *pci_get_host_bridge_device(struct pci_dev *dev) { struct pci_bus *root_bus = find_pci_root_bus(dev->bus); struct device *bridge = root_bus->bridge; kobject_get(&bridge->kobj); return bridge; } void pci_put_host_bridge_device(struct device *dev) { kobject_put(&dev->kobj); } void pci_set_host_bridge_release(struct pci_host_bridge *bridge, void (*release_fn)(struct pci_host_bridge *), void *release_data) { bridge->release_fn = release_fn; bridge->release_data = release_data; } EXPORT_SYMBOL_GPL(pci_set_host_bridge_release); void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, struct resource *res) { struct pci_host_bridge *bridge = pci_find_host_bridge(bus); struct resource_entry *window; resource_size_t offset = 0; resource_list_for_each_entry(window, &bridge->windows) { if (resource_contains(window->res, res)) { offset = window->offset; break; } } region->start = res->start - offset; region->end = res->end - offset; } EXPORT_SYMBOL(pcibios_resource_to_bus); static bool region_contains(struct pci_bus_region *region1, struct pci_bus_region *region2) { return region1->start <= region2->start && region1->end >= region2->end; } void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res, struct pci_bus_region *region) { struct pci_host_bridge *bridge = pci_find_host_bridge(bus); struct resource_entry *window; resource_size_t offset = 0; resource_list_for_each_entry(window, &bridge->windows) { struct pci_bus_region bus_region; if (resource_type(res) != resource_type(window->res)) continue; bus_region.start = window->res->start - window->offset; bus_region.end = window->res->end - window->offset; if (region_contains(&bus_region, region)) { offset = window->offset; break; } } res->start = region->start + offset; res->end = region->end + offset; } EXPORT_SYMBOL(pcibios_bus_to_resource);
linux-master
drivers/pci/host-bridge.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI ROM access routines * * (C) Copyright 2004 Jon Smirl <[email protected]> * (C) Copyright 2004 Silicon Graphics, Inc. Jesse Barnes <[email protected]> */ #include <linux/kernel.h> #include <linux/export.h> #include <linux/pci.h> #include <linux/slab.h> #include "pci.h" /** * pci_enable_rom - enable ROM decoding for a PCI device * @pdev: PCI device to enable * * Enable ROM decoding on @dev. This involves simply turning on the last * bit of the PCI ROM BAR. Note that some cards may share address decoders * between the ROM and other resources, so enabling it may disable access * to MMIO registers or other card memory. */ int pci_enable_rom(struct pci_dev *pdev) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; struct pci_bus_region region; u32 rom_addr; if (!res->flags) return -1; /* Nothing to enable if we're using a shadow copy in RAM */ if (res->flags & IORESOURCE_ROM_SHADOW) return 0; /* * Ideally pci_update_resource() would update the ROM BAR address, * and we would only set the enable bit here. But apparently some * devices have buggy ROM BARs that read as zero when disabled. */ pcibios_resource_to_bus(pdev->bus, &region, res); pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); rom_addr &= ~PCI_ROM_ADDRESS_MASK; rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE; pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); return 0; } EXPORT_SYMBOL_GPL(pci_enable_rom); /** * pci_disable_rom - disable ROM decoding for a PCI device * @pdev: PCI device to disable * * Disable ROM decoding on a PCI device by turning off the last bit in the * ROM BAR. */ void pci_disable_rom(struct pci_dev *pdev) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; u32 rom_addr; if (res->flags & IORESOURCE_ROM_SHADOW) return; pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); rom_addr &= ~PCI_ROM_ADDRESS_ENABLE; pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr); } EXPORT_SYMBOL_GPL(pci_disable_rom); /** * pci_get_rom_size - obtain the actual size of the ROM image * @pdev: target PCI device * @rom: kernel virtual pointer to image of ROM * @size: size of PCI window * return: size of actual ROM image * * Determine the actual length of the ROM image. * The PCI window size could be much larger than the * actual image size. */ static size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size) { void __iomem *image; int last_image; unsigned int length; image = rom; do { void __iomem *pds; /* Standard PCI ROMs start out with these bytes 55 AA */ if (readw(image) != 0xAA55) { pci_info(pdev, "Invalid PCI ROM header signature: expecting 0xaa55, got %#06x\n", readw(image)); break; } /* get the PCI data structure and check its "PCIR" signature */ pds = image + readw(image + 24); if (readl(pds) != 0x52494350) { pci_info(pdev, "Invalid PCI ROM data signature: expecting 0x52494350, got %#010x\n", readl(pds)); break; } last_image = readb(pds + 21) & 0x80; length = readw(pds + 16); image += length * 512; /* Avoid iterating through memory outside the resource window */ if (image >= rom + size) break; if (!last_image) { if (readw(image) != 0xAA55) { pci_info(pdev, "No more image in the PCI ROM\n"); break; } } } while (length && !last_image); /* never return a size larger than the PCI resource window */ /* there are known ROMs that get the size wrong */ return min((size_t)(image - rom), size); } /** * pci_map_rom - map a PCI ROM to kernel space * @pdev: pointer to pci device struct * @size: pointer to receive size of pci window over ROM * * Return: kernel virtual pointer to image of ROM * * Map a PCI ROM into kernel space. If ROM is boot video ROM, * the shadow BIOS copy will be returned instead of the * actual ROM. */ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; loff_t start; void __iomem *rom; /* assign the ROM an address if it doesn't have one */ if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE)) return NULL; start = pci_resource_start(pdev, PCI_ROM_RESOURCE); *size = pci_resource_len(pdev, PCI_ROM_RESOURCE); if (*size == 0) return NULL; /* Enable ROM space decodes */ if (pci_enable_rom(pdev)) return NULL; rom = ioremap(start, *size); if (!rom) goto err_ioremap; /* * Try to find the true size of the ROM since sometimes the PCI window * size is much larger than the actual size of the ROM. * True size is important if the ROM is going to be copied. */ *size = pci_get_rom_size(pdev, rom, *size); if (!*size) goto invalid_rom; return rom; invalid_rom: iounmap(rom); err_ioremap: /* restore enable if ioremap fails */ if (!(res->flags & IORESOURCE_ROM_ENABLE)) pci_disable_rom(pdev); return NULL; } EXPORT_SYMBOL(pci_map_rom); /** * pci_unmap_rom - unmap the ROM from kernel space * @pdev: pointer to pci device struct * @rom: virtual address of the previous mapping * * Remove a mapping of a previously mapped ROM */ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom) { struct resource *res = &pdev->resource[PCI_ROM_RESOURCE]; iounmap(rom); /* Disable again before continuing */ if (!(res->flags & IORESOURCE_ROM_ENABLE)) pci_disable_rom(pdev); } EXPORT_SYMBOL(pci_unmap_rom);
linux-master
drivers/pci/rom.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/pci.h> #include <linux/module.h> #include "pci.h" static void pci_free_resources(struct pci_dev *dev) { struct resource *res; pci_dev_for_each_resource(dev, res) { if (res->parent) release_resource(res); } } static void pci_stop_dev(struct pci_dev *dev) { pci_pme_active(dev, false); if (pci_dev_is_added(dev)) { device_release_driver(&dev->dev); pci_proc_detach_device(dev); pci_remove_sysfs_dev_files(dev); of_pci_remove_node(dev); pci_dev_assign_added(dev, false); } } static void pci_destroy_dev(struct pci_dev *dev) { if (!dev->dev.kobj.parent) return; device_del(&dev->dev); down_write(&pci_bus_sem); list_del(&dev->bus_list); up_write(&pci_bus_sem); pci_doe_destroy(dev); pcie_aspm_exit_link_state(dev); pci_bridge_d3_update(dev); pci_free_resources(dev); put_device(&dev->dev); } void pci_remove_bus(struct pci_bus *bus) { pci_proc_detach_bus(bus); down_write(&pci_bus_sem); list_del(&bus->node); pci_bus_release_busn_res(bus); up_write(&pci_bus_sem); pci_remove_legacy_files(bus); if (bus->ops->remove_bus) bus->ops->remove_bus(bus); pcibios_remove_bus(bus); device_unregister(&bus->dev); } EXPORT_SYMBOL(pci_remove_bus); static void pci_stop_bus_device(struct pci_dev *dev) { struct pci_bus *bus = dev->subordinate; struct pci_dev *child, *tmp; /* * Stopping an SR-IOV PF device removes all the associated VFs, * which will update the bus->devices list and confuse the * iterator. Therefore, iterate in reverse so we remove the VFs * first, then the PF. */ if (bus) { list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list) pci_stop_bus_device(child); } pci_stop_dev(dev); } static void pci_remove_bus_device(struct pci_dev *dev) { struct pci_bus *bus = dev->subordinate; struct pci_dev *child, *tmp; if (bus) { list_for_each_entry_safe(child, tmp, &bus->devices, bus_list) pci_remove_bus_device(child); pci_remove_bus(bus); dev->subordinate = NULL; } pci_destroy_dev(dev); } /** * pci_stop_and_remove_bus_device - remove a PCI device and any children * @dev: the device to remove * * Remove a PCI device from the device lists, informing the drivers * that the device has been removed. We also remove any subordinate * buses and children in a depth-first manner. * * For each device we remove, delete the device structure from the * device lists, remove the /proc entry, and notify userspace * (/sbin/hotplug). */ void pci_stop_and_remove_bus_device(struct pci_dev *dev) { pci_stop_bus_device(dev); pci_remove_bus_device(dev); } EXPORT_SYMBOL(pci_stop_and_remove_bus_device); void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev) { pci_lock_rescan_remove(); pci_stop_and_remove_bus_device(dev); pci_unlock_rescan_remove(); } EXPORT_SYMBOL_GPL(pci_stop_and_remove_bus_device_locked); void pci_stop_root_bus(struct pci_bus *bus) { struct pci_dev *child, *tmp; struct pci_host_bridge *host_bridge; if (!pci_is_root_bus(bus)) return; host_bridge = to_pci_host_bridge(bus->bridge); list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list) pci_stop_bus_device(child); /* stop the host bridge */ device_release_driver(&host_bridge->dev); } EXPORT_SYMBOL_GPL(pci_stop_root_bus); void pci_remove_root_bus(struct pci_bus *bus) { struct pci_dev *child, *tmp; struct pci_host_bridge *host_bridge; if (!pci_is_root_bus(bus)) return; host_bridge = to_pci_host_bridge(bus->bridge); list_for_each_entry_safe(child, tmp, &bus->devices, bus_list) pci_remove_bus_device(child); #ifdef CONFIG_PCI_DOMAINS_GENERIC /* Release domain_nr if it was dynamically allocated */ if (host_bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET) pci_bus_release_domain_nr(bus, host_bridge->dev.parent); #endif pci_remove_bus(bus); host_bridge->bus = NULL; /* remove the host bridge */ device_del(&host_bridge->dev); } EXPORT_SYMBOL_GPL(pci_remove_root_bus);
linux-master
drivers/pci/remove.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI support in ACPI * * Copyright (C) 2005 David Shaohua Li <[email protected]> * Copyright (C) 2004 Tom Long Nguyen <[email protected]> * Copyright (C) 2004 Intel Corp. */ #include <linux/delay.h> #include <linux/init.h> #include <linux/irqdomain.h> #include <linux/pci.h> #include <linux/msi.h> #include <linux/pci_hotplug.h> #include <linux/module.h> #include <linux/pci-acpi.h> #include <linux/pm_runtime.h> #include <linux/pm_qos.h> #include <linux/rwsem.h> #include "pci.h" /* * The GUID is defined in the PCI Firmware Specification available * here to PCI-SIG members: * https://members.pcisig.com/wg/PCI-SIG/document/15350 */ const guid_t pci_acpi_dsm_guid = GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a, 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d); #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) { struct device *dev = &adev->dev; struct resource_entry *entry; struct list_head list; unsigned long flags; int ret; INIT_LIST_HEAD(&list); flags = IORESOURCE_MEM; ret = acpi_dev_get_resources(adev, &list, acpi_dev_filter_resource_type_cb, (void *) flags); if (ret < 0) { dev_err(dev, "failed to parse _CRS method, error code %d\n", ret); return ret; } if (ret == 0) { dev_err(dev, "no IO and memory resources present in _CRS\n"); return -EINVAL; } entry = list_first_entry(&list, struct resource_entry, node); *res = *entry->res; acpi_dev_free_resource_list(&list); return 0; } static acpi_status acpi_match_rc(acpi_handle handle, u32 lvl, void *context, void **retval) { u16 *segment = context; unsigned long long uid; acpi_status status; status = acpi_evaluate_integer(handle, METHOD_NAME__UID, NULL, &uid); if (ACPI_FAILURE(status) || uid != *segment) return AE_CTRL_DEPTH; *(acpi_handle *)retval = handle; return AE_CTRL_TERMINATE; } int acpi_get_rc_resources(struct device *dev, const char *hid, u16 segment, struct resource *res) { struct acpi_device *adev; acpi_status status; acpi_handle handle; int ret; status = acpi_get_devices(hid, acpi_match_rc, &segment, &handle); if (ACPI_FAILURE(status)) { dev_err(dev, "can't find _HID %s device to locate resources\n", hid); return -ENODEV; } adev = acpi_fetch_acpi_dev(handle); if (!adev) return -ENODEV; ret = acpi_get_rc_addr(adev, res); if (ret) { dev_err(dev, "can't get resource from %s\n", dev_name(&adev->dev)); return ret; } return 0; } #endif phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) { acpi_status status = AE_NOT_EXIST; unsigned long long mcfg_addr; if (handle) status = acpi_evaluate_integer(handle, METHOD_NAME__CBA, NULL, &mcfg_addr); if (ACPI_FAILURE(status)) return 0; return (phys_addr_t)mcfg_addr; } /* _HPX PCI Setting Record (Type 0); same as _HPP */ struct hpx_type0 { u32 revision; /* Not present in _HPP */ u8 cache_line_size; /* Not applicable to PCIe */ u8 latency_timer; /* Not applicable to PCIe */ u8 enable_serr; u8 enable_perr; }; static struct hpx_type0 pci_default_type0 = { .revision = 1, .cache_line_size = 8, .latency_timer = 0x40, .enable_serr = 0, .enable_perr = 0, }; static void program_hpx_type0(struct pci_dev *dev, struct hpx_type0 *hpx) { u16 pci_cmd, pci_bctl; if (!hpx) hpx = &pci_default_type0; if (hpx->revision > 1) { pci_warn(dev, "PCI settings rev %d not supported; using defaults\n", hpx->revision); hpx = &pci_default_type0; } pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpx->cache_line_size); pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpx->latency_timer); pci_read_config_word(dev, PCI_COMMAND, &pci_cmd); if (hpx->enable_serr) pci_cmd |= PCI_COMMAND_SERR; if (hpx->enable_perr) pci_cmd |= PCI_COMMAND_PARITY; pci_write_config_word(dev, PCI_COMMAND, pci_cmd); /* Program bridge control value */ if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, hpx->latency_timer); pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl); if (hpx->enable_perr) pci_bctl |= PCI_BRIDGE_CTL_PARITY; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl); } } static acpi_status decode_type0_hpx_record(union acpi_object *record, struct hpx_type0 *hpx0) { int i; union acpi_object *fields = record->package.elements; u32 revision = fields[1].integer.value; switch (revision) { case 1: if (record->package.count != 6) return AE_ERROR; for (i = 2; i < 6; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; hpx0->revision = revision; hpx0->cache_line_size = fields[2].integer.value; hpx0->latency_timer = fields[3].integer.value; hpx0->enable_serr = fields[4].integer.value; hpx0->enable_perr = fields[5].integer.value; break; default: pr_warn("%s: Type 0 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } /* _HPX PCI-X Setting Record (Type 1) */ struct hpx_type1 { u32 revision; u8 max_mem_read; u8 avg_max_split; u16 tot_max_split; }; static void program_hpx_type1(struct pci_dev *dev, struct hpx_type1 *hpx) { int pos; if (!hpx) return; pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!pos) return; pci_warn(dev, "PCI-X settings not supported\n"); } static acpi_status decode_type1_hpx_record(union acpi_object *record, struct hpx_type1 *hpx1) { int i; union acpi_object *fields = record->package.elements; u32 revision = fields[1].integer.value; switch (revision) { case 1: if (record->package.count != 5) return AE_ERROR; for (i = 2; i < 5; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; hpx1->revision = revision; hpx1->max_mem_read = fields[2].integer.value; hpx1->avg_max_split = fields[3].integer.value; hpx1->tot_max_split = fields[4].integer.value; break; default: pr_warn("%s: Type 1 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } static bool pcie_root_rcb_set(struct pci_dev *dev) { struct pci_dev *rp = pcie_find_root_port(dev); u16 lnkctl; if (!rp) return false; pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl); if (lnkctl & PCI_EXP_LNKCTL_RCB) return true; return false; } /* _HPX PCI Express Setting Record (Type 2) */ struct hpx_type2 { u32 revision; u32 unc_err_mask_and; u32 unc_err_mask_or; u32 unc_err_sever_and; u32 unc_err_sever_or; u32 cor_err_mask_and; u32 cor_err_mask_or; u32 adv_err_cap_and; u32 adv_err_cap_or; u16 pci_exp_devctl_and; u16 pci_exp_devctl_or; u16 pci_exp_lnkctl_and; u16 pci_exp_lnkctl_or; u32 sec_unc_err_sever_and; u32 sec_unc_err_sever_or; u32 sec_unc_err_mask_and; u32 sec_unc_err_mask_or; }; static void program_hpx_type2(struct pci_dev *dev, struct hpx_type2 *hpx) { int pos; u32 reg32; if (!hpx) return; if (!pci_is_pcie(dev)) return; if (hpx->revision > 1) { pci_warn(dev, "PCIe settings rev %d not supported\n", hpx->revision); return; } /* * Don't allow _HPX to change MPS or MRRS settings. We manage * those to make sure they're consistent with the rest of the * platform. */ hpx->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ; hpx->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); /* Initialize Device Control Register */ pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, ~hpx->pci_exp_devctl_and, hpx->pci_exp_devctl_or); /* Initialize Link Control Register */ if (pcie_cap_has_lnkctl(dev)) { /* * If the Root Port supports Read Completion Boundary of * 128, set RCB to 128. Otherwise, clear it. */ hpx->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB; hpx->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB; if (pcie_root_rcb_set(dev)) hpx->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB; pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, ~hpx->pci_exp_lnkctl_and, hpx->pci_exp_lnkctl_or); } /* Find Advanced Error Reporting Enhanced Capability */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return; /* Initialize Uncorrectable Error Mask Register */ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32); reg32 = (reg32 & hpx->unc_err_mask_and) | hpx->unc_err_mask_or; pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32); /* Initialize Uncorrectable Error Severity Register */ pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32); reg32 = (reg32 & hpx->unc_err_sever_and) | hpx->unc_err_sever_or; pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32); /* Initialize Correctable Error Mask Register */ pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32); reg32 = (reg32 & hpx->cor_err_mask_and) | hpx->cor_err_mask_or; pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32); /* Initialize Advanced Error Capabilities and Control Register */ pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32); reg32 = (reg32 & hpx->adv_err_cap_and) | hpx->adv_err_cap_or; /* Don't enable ECRC generation or checking if unsupported */ if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) reg32 &= ~PCI_ERR_CAP_ECRC_GENE; if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); /* * FIXME: The following two registers are not supported yet. * * o Secondary Uncorrectable Error Severity Register * o Secondary Uncorrectable Error Mask Register */ } static acpi_status decode_type2_hpx_record(union acpi_object *record, struct hpx_type2 *hpx2) { int i; union acpi_object *fields = record->package.elements; u32 revision = fields[1].integer.value; switch (revision) { case 1: if (record->package.count != 18) return AE_ERROR; for (i = 2; i < 18; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; hpx2->revision = revision; hpx2->unc_err_mask_and = fields[2].integer.value; hpx2->unc_err_mask_or = fields[3].integer.value; hpx2->unc_err_sever_and = fields[4].integer.value; hpx2->unc_err_sever_or = fields[5].integer.value; hpx2->cor_err_mask_and = fields[6].integer.value; hpx2->cor_err_mask_or = fields[7].integer.value; hpx2->adv_err_cap_and = fields[8].integer.value; hpx2->adv_err_cap_or = fields[9].integer.value; hpx2->pci_exp_devctl_and = fields[10].integer.value; hpx2->pci_exp_devctl_or = fields[11].integer.value; hpx2->pci_exp_lnkctl_and = fields[12].integer.value; hpx2->pci_exp_lnkctl_or = fields[13].integer.value; hpx2->sec_unc_err_sever_and = fields[14].integer.value; hpx2->sec_unc_err_sever_or = fields[15].integer.value; hpx2->sec_unc_err_mask_and = fields[16].integer.value; hpx2->sec_unc_err_mask_or = fields[17].integer.value; break; default: pr_warn("%s: Type 2 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } /* _HPX PCI Express Setting Record (Type 3) */ struct hpx_type3 { u16 device_type; u16 function_type; u16 config_space_location; u16 pci_exp_cap_id; u16 pci_exp_cap_ver; u16 pci_exp_vendor_id; u16 dvsec_id; u16 dvsec_rev; u16 match_offset; u32 match_mask_and; u32 match_value; u16 reg_offset; u32 reg_mask_and; u32 reg_mask_or; }; enum hpx_type3_dev_type { HPX_TYPE_ENDPOINT = BIT(0), HPX_TYPE_LEG_END = BIT(1), HPX_TYPE_RC_END = BIT(2), HPX_TYPE_RC_EC = BIT(3), HPX_TYPE_ROOT_PORT = BIT(4), HPX_TYPE_UPSTREAM = BIT(5), HPX_TYPE_DOWNSTREAM = BIT(6), HPX_TYPE_PCI_BRIDGE = BIT(7), HPX_TYPE_PCIE_BRIDGE = BIT(8), }; static u16 hpx3_device_type(struct pci_dev *dev) { u16 pcie_type = pci_pcie_type(dev); static const int pcie_to_hpx3_type[] = { [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT, [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END, [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END, [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC, [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT, [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM, [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM, [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE, [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE, }; if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type)) return 0; return pcie_to_hpx3_type[pcie_type]; } enum hpx_type3_fn_type { HPX_FN_NORMAL = BIT(0), HPX_FN_SRIOV_PHYS = BIT(1), HPX_FN_SRIOV_VIRT = BIT(2), }; static u8 hpx3_function_type(struct pci_dev *dev) { if (dev->is_virtfn) return HPX_FN_SRIOV_VIRT; else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0) return HPX_FN_SRIOV_PHYS; else return HPX_FN_NORMAL; } static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id) { u8 cap_ver = hpx3_cap_id & 0xf; if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id) return true; else if (cap_ver == pcie_cap_id) return true; return false; } enum hpx_type3_cfg_loc { HPX_CFG_PCICFG = 0, HPX_CFG_PCIE_CAP = 1, HPX_CFG_PCIE_CAP_EXT = 2, HPX_CFG_VEND_CAP = 3, HPX_CFG_DVSEC = 4, HPX_CFG_MAX, }; static void program_hpx_type3_register(struct pci_dev *dev, const struct hpx_type3 *reg) { u32 match_reg, write_reg, header, orig_value; u16 pos; if (!(hpx3_device_type(dev) & reg->device_type)) return; if (!(hpx3_function_type(dev) & reg->function_type)) return; switch (reg->config_space_location) { case HPX_CFG_PCICFG: pos = 0; break; case HPX_CFG_PCIE_CAP: pos = pci_find_capability(dev, reg->pci_exp_cap_id); if (pos == 0) return; break; case HPX_CFG_PCIE_CAP_EXT: pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id); if (pos == 0) return; pci_read_config_dword(dev, pos, &header); if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header), reg->pci_exp_cap_ver)) return; break; case HPX_CFG_VEND_CAP: case HPX_CFG_DVSEC: default: pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location"); return; } pci_read_config_dword(dev, pos + reg->match_offset, &match_reg); if ((match_reg & reg->match_mask_and) != reg->match_value) return; pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg); orig_value = write_reg; write_reg &= reg->reg_mask_and; write_reg |= reg->reg_mask_or; if (orig_value == write_reg) return; pci_write_config_dword(dev, pos + reg->reg_offset, write_reg); pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x", pos, orig_value, write_reg); } static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx) { if (!hpx) return; if (!pci_is_pcie(dev)) return; program_hpx_type3_register(dev, hpx); } static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, union acpi_object *reg_fields) { hpx3_reg->device_type = reg_fields[0].integer.value; hpx3_reg->function_type = reg_fields[1].integer.value; hpx3_reg->config_space_location = reg_fields[2].integer.value; hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value; hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value; hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value; hpx3_reg->dvsec_id = reg_fields[6].integer.value; hpx3_reg->dvsec_rev = reg_fields[7].integer.value; hpx3_reg->match_offset = reg_fields[8].integer.value; hpx3_reg->match_mask_and = reg_fields[9].integer.value; hpx3_reg->match_value = reg_fields[10].integer.value; hpx3_reg->reg_offset = reg_fields[11].integer.value; hpx3_reg->reg_mask_and = reg_fields[12].integer.value; hpx3_reg->reg_mask_or = reg_fields[13].integer.value; } static acpi_status program_type3_hpx_record(struct pci_dev *dev, union acpi_object *record) { union acpi_object *fields = record->package.elements; u32 desc_count, expected_length, revision; union acpi_object *reg_fields; struct hpx_type3 hpx3; int i; revision = fields[1].integer.value; switch (revision) { case 1: desc_count = fields[2].integer.value; expected_length = 3 + desc_count * 14; if (record->package.count != expected_length) return AE_ERROR; for (i = 2; i < expected_length; i++) if (fields[i].type != ACPI_TYPE_INTEGER) return AE_ERROR; for (i = 0; i < desc_count; i++) { reg_fields = fields + 3 + i * 14; parse_hpx3_register(&hpx3, reg_fields); program_hpx_type3(dev, &hpx3); } break; default: printk(KERN_WARNING "%s: Type 3 Revision %d record not supported\n", __func__, revision); return AE_ERROR; } return AE_OK; } static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle) { acpi_status status; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *package, *record, *fields; struct hpx_type0 hpx0; struct hpx_type1 hpx1; struct hpx_type2 hpx2; u32 type; int i; status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); if (ACPI_FAILURE(status)) return status; package = (union acpi_object *)buffer.pointer; if (package->type != ACPI_TYPE_PACKAGE) { status = AE_ERROR; goto exit; } for (i = 0; i < package->package.count; i++) { record = &package->package.elements[i]; if (record->type != ACPI_TYPE_PACKAGE) { status = AE_ERROR; goto exit; } fields = record->package.elements; if (fields[0].type != ACPI_TYPE_INTEGER || fields[1].type != ACPI_TYPE_INTEGER) { status = AE_ERROR; goto exit; } type = fields[0].integer.value; switch (type) { case 0: memset(&hpx0, 0, sizeof(hpx0)); status = decode_type0_hpx_record(record, &hpx0); if (ACPI_FAILURE(status)) goto exit; program_hpx_type0(dev, &hpx0); break; case 1: memset(&hpx1, 0, sizeof(hpx1)); status = decode_type1_hpx_record(record, &hpx1); if (ACPI_FAILURE(status)) goto exit; program_hpx_type1(dev, &hpx1); break; case 2: memset(&hpx2, 0, sizeof(hpx2)); status = decode_type2_hpx_record(record, &hpx2); if (ACPI_FAILURE(status)) goto exit; program_hpx_type2(dev, &hpx2); break; case 3: status = program_type3_hpx_record(dev, record); if (ACPI_FAILURE(status)) goto exit; break; default: pr_err("%s: Type %d record not supported\n", __func__, type); status = AE_ERROR; goto exit; } } exit: kfree(buffer.pointer); return status; } static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle) { acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *package, *fields; struct hpx_type0 hpx0; int i; memset(&hpx0, 0, sizeof(hpx0)); status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); if (ACPI_FAILURE(status)) return status; package = (union acpi_object *) buffer.pointer; if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 4) { status = AE_ERROR; goto exit; } fields = package->package.elements; for (i = 0; i < 4; i++) { if (fields[i].type != ACPI_TYPE_INTEGER) { status = AE_ERROR; goto exit; } } hpx0.revision = 1; hpx0.cache_line_size = fields[0].integer.value; hpx0.latency_timer = fields[1].integer.value; hpx0.enable_serr = fields[2].integer.value; hpx0.enable_perr = fields[3].integer.value; program_hpx_type0(dev, &hpx0); exit: kfree(buffer.pointer); return status; } /* pci_acpi_program_hp_params * * @dev - the pci_dev for which we want parameters */ int pci_acpi_program_hp_params(struct pci_dev *dev) { acpi_status status; acpi_handle handle, phandle; struct pci_bus *pbus; if (acpi_pci_disabled) return -ENODEV; handle = NULL; for (pbus = dev->bus; pbus; pbus = pbus->parent) { handle = acpi_pci_get_bridge_handle(pbus); if (handle) break; } /* * _HPP settings apply to all child buses, until another _HPP is * encountered. If we don't find an _HPP for the input pci dev, * look for it in the parent device scope since that would apply to * this pci dev. */ while (handle) { status = acpi_run_hpx(dev, handle); if (ACPI_SUCCESS(status)) return 0; status = acpi_run_hpp(dev, handle); if (ACPI_SUCCESS(status)) return 0; if (acpi_is_root_bridge(handle)) break; status = acpi_get_parent(handle, &phandle); if (ACPI_FAILURE(status)) break; handle = phandle; } return -ENODEV; } /** * pciehp_is_native - Check whether a hotplug port is handled by the OS * @bridge: Hotplug port to check * * Returns true if the given @bridge is handled by the native PCIe hotplug * driver. */ bool pciehp_is_native(struct pci_dev *bridge) { const struct pci_host_bridge *host; u32 slot_cap; if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) return false; pcie_capability_read_dword(bridge, PCI_EXP_SLTCAP, &slot_cap); if (!(slot_cap & PCI_EXP_SLTCAP_HPC)) return false; if (pcie_ports_native) return true; host = pci_find_host_bridge(bridge->bus); return host->native_pcie_hotplug; } /** * shpchp_is_native - Check whether a hotplug port is handled by the OS * @bridge: Hotplug port to check * * Returns true if the given @bridge is handled by the native SHPC hotplug * driver. */ bool shpchp_is_native(struct pci_dev *bridge) { return bridge->shpc_managed; } /** * pci_acpi_wake_bus - Root bus wakeup notification fork function. * @context: Device wakeup context. */ static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context) { struct acpi_device *adev; struct acpi_pci_root *root; adev = container_of(context, struct acpi_device, wakeup.context); root = acpi_driver_data(adev); pci_pme_wakeup_bus(root->bus); } /** * pci_acpi_wake_dev - PCI device wakeup notification work function. * @context: Device wakeup context. */ static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context) { struct pci_dev *pci_dev; pci_dev = to_pci_dev(context->dev); if (pci_dev->pme_poll) pci_dev->pme_poll = false; if (pci_dev->current_state == PCI_D3cold) { pci_wakeup_event(pci_dev); pm_request_resume(&pci_dev->dev); return; } /* Clear PME Status if set. */ if (pci_dev->pme_support) pci_check_pme_status(pci_dev); pci_wakeup_event(pci_dev); pm_request_resume(&pci_dev->dev); pci_pme_wakeup_bus(pci_dev->subordinate); } /** * pci_acpi_add_bus_pm_notifier - Register PM notifier for root PCI bus. * @dev: PCI root bridge ACPI device. */ acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev) { return acpi_add_pm_notifier(dev, NULL, pci_acpi_wake_bus); } /** * pci_acpi_add_pm_notifier - Register PM notifier for given PCI device. * @dev: ACPI device to add the notifier for. * @pci_dev: PCI device to check for the PME status if an event is signaled. */ acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, struct pci_dev *pci_dev) { return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev); } /* * _SxD returns the D-state with the highest power * (lowest D-state number) supported in the S-state "x". * * If the devices does not have a _PRW * (Power Resources for Wake) supporting system wakeup from "x" * then the OS is free to choose a lower power (higher number * D-state) than the return value from _SxD. * * But if _PRW is enabled at S-state "x", the OS * must not choose a power lower than _SxD -- * unless the device has an _SxW method specifying * the lowest power (highest D-state number) the device * may enter while still able to wake the system. * * ie. depending on global OS policy: * * if (_PRW at S-state x) * choose from highest power _SxD to lowest power _SxW * else // no _PRW at S-state x * choose highest power _SxD or any lower power */ pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) { int acpi_state, d_max; if (pdev->no_d3cold) d_max = ACPI_STATE_D3_HOT; else d_max = ACPI_STATE_D3_COLD; acpi_state = acpi_pm_device_sleep_state(&pdev->dev, NULL, d_max); if (acpi_state < 0) return PCI_POWER_ERROR; switch (acpi_state) { case ACPI_STATE_D0: return PCI_D0; case ACPI_STATE_D1: return PCI_D1; case ACPI_STATE_D2: return PCI_D2; case ACPI_STATE_D3_HOT: return PCI_D3hot; case ACPI_STATE_D3_COLD: return PCI_D3cold; } return PCI_POWER_ERROR; } static struct acpi_device *acpi_pci_find_companion(struct device *dev); void pci_set_acpi_fwnode(struct pci_dev *dev) { if (!dev_fwnode(&dev->dev) && !pci_dev_is_added(dev)) ACPI_COMPANION_SET(&dev->dev, acpi_pci_find_companion(&dev->dev)); } /** * pci_dev_acpi_reset - do a function level reset using _RST method * @dev: device to reset * @probe: if true, return 0 if device supports _RST */ int pci_dev_acpi_reset(struct pci_dev *dev, bool probe) { acpi_handle handle = ACPI_HANDLE(&dev->dev); if (!handle || !acpi_has_method(handle, "_RST")) return -ENOTTY; if (probe) return 0; if (ACPI_FAILURE(acpi_evaluate_object(handle, "_RST", NULL, NULL))) { pci_warn(dev, "ACPI _RST failed\n"); return -ENOTTY; } return 0; } bool acpi_pci_power_manageable(struct pci_dev *dev) { struct acpi_device *adev = ACPI_COMPANION(&dev->dev); return adev && acpi_device_power_manageable(adev); } bool acpi_pci_bridge_d3(struct pci_dev *dev) { struct pci_dev *rpdev; struct acpi_device *adev, *rpadev; const union acpi_object *obj; if (acpi_pci_disabled || !dev->is_hotplug_bridge) return false; adev = ACPI_COMPANION(&dev->dev); if (adev) { /* * If the bridge has _S0W, whether or not it can go into D3 * depends on what is returned by that object. In particular, * if the power state returned by _S0W is D2 or shallower, * entering D3 should not be allowed. */ if (acpi_dev_power_state_for_wake(adev) <= ACPI_STATE_D2) return false; /* * Otherwise, assume that the bridge can enter D3 so long as it * is power-manageable via ACPI. */ if (acpi_device_power_manageable(adev)) return true; } rpdev = pcie_find_root_port(dev); if (!rpdev) return false; if (rpdev == dev) rpadev = adev; else rpadev = ACPI_COMPANION(&rpdev->dev); if (!rpadev) return false; /* * If the Root Port cannot signal wakeup signals at all, i.e., it * doesn't supply a wakeup GPE via _PRW, it cannot signal hotplug * events from low-power states including D3hot and D3cold. */ if (!rpadev->wakeup.flags.valid) return false; /* * In the bridge-below-a-Root-Port case, evaluate _S0W for the Root Port * to verify whether or not it can signal wakeup from D3. */ if (rpadev != adev && acpi_dev_power_state_for_wake(rpadev) <= ACPI_STATE_D2) return false; /* * The "HotPlugSupportInD3" property in a Root Port _DSD indicates * the Port can signal hotplug events while in D3. We assume any * bridges *below* that Root Port can also signal hotplug events * while in D3. */ if (!acpi_dev_get_property(rpadev, "HotPlugSupportInD3", ACPI_TYPE_INTEGER, &obj) && obj->integer.value == 1) return true; return false; } static void acpi_pci_config_space_access(struct pci_dev *dev, bool enable) { int val = enable ? ACPI_REG_CONNECT : ACPI_REG_DISCONNECT; int ret = acpi_evaluate_reg(ACPI_HANDLE(&dev->dev), ACPI_ADR_SPACE_PCI_CONFIG, val); if (ret) pci_dbg(dev, "ACPI _REG %s evaluation failed (%d)\n", enable ? "connect" : "disconnect", ret); } int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) { struct acpi_device *adev = ACPI_COMPANION(&dev->dev); static const u8 state_conv[] = { [PCI_D0] = ACPI_STATE_D0, [PCI_D1] = ACPI_STATE_D1, [PCI_D2] = ACPI_STATE_D2, [PCI_D3hot] = ACPI_STATE_D3_HOT, [PCI_D3cold] = ACPI_STATE_D3_COLD, }; int error; /* If the ACPI device has _EJ0, ignore the device */ if (!adev || acpi_has_method(adev->handle, "_EJ0")) return -ENODEV; switch (state) { case PCI_D0: case PCI_D1: case PCI_D2: case PCI_D3hot: case PCI_D3cold: break; default: return -EINVAL; } if (state == PCI_D3cold) { if (dev_pm_qos_flags(&dev->dev, PM_QOS_FLAG_NO_POWER_OFF) == PM_QOS_FLAGS_ALL) return -EBUSY; /* Notify AML lack of PCI config space availability */ acpi_pci_config_space_access(dev, false); } error = acpi_device_set_power(adev, state_conv[state]); if (error) return error; pci_dbg(dev, "power state changed by ACPI to %s\n", acpi_power_state_string(adev->power.state)); /* * Notify AML of PCI config space availability. Config space is * accessible in all states except D3cold; the only transitions * that change availability are transitions to D3cold and from * D3cold to D0. */ if (state == PCI_D0) acpi_pci_config_space_access(dev, true); return 0; } pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) { struct acpi_device *adev = ACPI_COMPANION(&dev->dev); static const pci_power_t state_conv[] = { [ACPI_STATE_D0] = PCI_D0, [ACPI_STATE_D1] = PCI_D1, [ACPI_STATE_D2] = PCI_D2, [ACPI_STATE_D3_HOT] = PCI_D3hot, [ACPI_STATE_D3_COLD] = PCI_D3cold, }; int state; if (!adev || !acpi_device_power_manageable(adev)) return PCI_UNKNOWN; state = adev->power.state; if (state == ACPI_STATE_UNKNOWN) return PCI_UNKNOWN; return state_conv[state]; } void acpi_pci_refresh_power_state(struct pci_dev *dev) { struct acpi_device *adev = ACPI_COMPANION(&dev->dev); if (adev && acpi_device_power_manageable(adev)) acpi_device_update_power(adev, NULL); } static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) { while (bus->parent) { if (acpi_pm_device_can_wakeup(&bus->self->dev)) return acpi_pm_set_device_wakeup(&bus->self->dev, enable); bus = bus->parent; } /* We have reached the root bus. */ if (bus->bridge) { if (acpi_pm_device_can_wakeup(bus->bridge)) return acpi_pm_set_device_wakeup(bus->bridge, enable); } return 0; } int acpi_pci_wakeup(struct pci_dev *dev, bool enable) { if (acpi_pci_disabled) return 0; if (acpi_pm_device_can_wakeup(&dev->dev)) return acpi_pm_set_device_wakeup(&dev->dev, enable); return acpi_pci_propagate_wakeup(dev->bus, enable); } bool acpi_pci_need_resume(struct pci_dev *dev) { struct acpi_device *adev; if (acpi_pci_disabled) return false; /* * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over * system-wide suspend/resume confuses the platform firmware, so avoid * doing that. According to Section 16.1.6 of ACPI 6.2, endpoint * devices are expected to be in D3 before invoking the S3 entry path * from the firmware, so they should not be affected by this issue. */ if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0) return true; adev = ACPI_COMPANION(&dev->dev); if (!adev || !acpi_device_power_manageable(adev)) return false; if (adev->wakeup.flags.valid && device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count) return true; if (acpi_target_system_state() == ACPI_STATE_S0) return false; return !!adev->power.flags.dsw_present; } void acpi_pci_add_bus(struct pci_bus *bus) { union acpi_object *obj; struct pci_host_bridge *bridge; if (acpi_pci_disabled || !bus->bridge || !ACPI_HANDLE(bus->bridge)) return; acpi_pci_slot_enumerate(bus); acpiphp_enumerate_slots(bus); /* * For a host bridge, check its _DSM for function 8 and if * that is available, mark it in pci_host_bridge. */ if (!pci_is_root_bus(bus)) return; obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3, DSM_PCI_POWER_ON_RESET_DELAY, NULL); if (!obj) return; if (obj->type == ACPI_TYPE_INTEGER && obj->integer.value == 1) { bridge = pci_find_host_bridge(bus); bridge->ignore_reset_delay = 1; } ACPI_FREE(obj); } void acpi_pci_remove_bus(struct pci_bus *bus) { if (acpi_pci_disabled || !bus->bridge) return; acpiphp_remove_slots(bus); acpi_pci_slot_remove(bus); } /* ACPI bus type */ static DECLARE_RWSEM(pci_acpi_companion_lookup_sem); static struct acpi_device *(*pci_acpi_find_companion_hook)(struct pci_dev *); /** * pci_acpi_set_companion_lookup_hook - Set ACPI companion lookup callback. * @func: ACPI companion lookup callback pointer or NULL. * * Set a special ACPI companion lookup callback for PCI devices whose companion * objects in the ACPI namespace have _ADR with non-standard bus-device-function * encodings. * * Return 0 on success or a negative error code on failure (in which case no * changes are made). * * The caller is responsible for the appropriate ordering of the invocations of * this function with respect to the enumeration of the PCI devices needing the * callback installed by it. */ int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *)) { int ret; if (!func) return -EINVAL; down_write(&pci_acpi_companion_lookup_sem); if (pci_acpi_find_companion_hook) { ret = -EBUSY; } else { pci_acpi_find_companion_hook = func; ret = 0; } up_write(&pci_acpi_companion_lookup_sem); return ret; } EXPORT_SYMBOL_GPL(pci_acpi_set_companion_lookup_hook); /** * pci_acpi_clear_companion_lookup_hook - Clear ACPI companion lookup callback. * * Clear the special ACPI companion lookup callback previously set by * pci_acpi_set_companion_lookup_hook(). Block until the last running instance * of the callback returns before clearing it. * * The caller is responsible for the appropriate ordering of the invocations of * this function with respect to the enumeration of the PCI devices needing the * callback cleared by it. */ void pci_acpi_clear_companion_lookup_hook(void) { down_write(&pci_acpi_companion_lookup_sem); pci_acpi_find_companion_hook = NULL; up_write(&pci_acpi_companion_lookup_sem); } EXPORT_SYMBOL_GPL(pci_acpi_clear_companion_lookup_hook); static struct acpi_device *acpi_pci_find_companion(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct acpi_device *adev; bool check_children; u64 addr; if (!dev->parent) return NULL; down_read(&pci_acpi_companion_lookup_sem); adev = pci_acpi_find_companion_hook ? pci_acpi_find_companion_hook(pci_dev) : NULL; up_read(&pci_acpi_companion_lookup_sem); if (adev) return adev; check_children = pci_is_bridge(pci_dev); /* Please ref to ACPI spec for the syntax of _ADR */ addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); adev = acpi_find_child_device(ACPI_COMPANION(dev->parent), addr, check_children); /* * There may be ACPI device objects in the ACPI namespace that are * children of the device object representing the host bridge, but don't * represent PCI devices. Both _HID and _ADR may be present for them, * even though that is against the specification (for example, see * Section 6.1 of ACPI 6.3), but in many cases the _ADR returns 0 which * appears to indicate that they should not be taken into consideration * as potential companions of PCI devices on the root bus. * * To catch this special case, disregard the returned device object if * it has a valid _HID, addr is 0 and the PCI device at hand is on the * root bus. */ if (adev && adev->pnp.type.platform_id && !addr && pci_is_root_bus(pci_dev->bus)) return NULL; return adev; } /** * pci_acpi_optimize_delay - optimize PCI D3 and D3cold delay from ACPI * @pdev: the PCI device whose delay is to be updated * @handle: ACPI handle of this device * * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM * control method of either the device itself or the PCI host bridge. * * Function 8, "Reset Delay," applies to the entire hierarchy below a PCI * host bridge. If it returns one, the OS may assume that all devices in * the hierarchy have already completed power-on reset delays. * * Function 9, "Device Readiness Durations," applies only to the object * where it is located. It returns delay durations required after various * events if the device requires less time than the spec requires. Delays * from this function take precedence over the Reset Delay function. * * These _DSM functions are defined by the draft ECN of January 28, 2014, * titled "ACPI additions for FW latency optimizations." */ static void pci_acpi_optimize_delay(struct pci_dev *pdev, acpi_handle handle) { struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); int value; union acpi_object *obj, *elements; if (bridge->ignore_reset_delay) pdev->d3cold_delay = 0; obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3, DSM_PCI_DEVICE_READINESS_DURATIONS, NULL); if (!obj) return; if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 5) { elements = obj->package.elements; if (elements[0].type == ACPI_TYPE_INTEGER) { value = (int)elements[0].integer.value / 1000; if (value < PCI_PM_D3COLD_WAIT) pdev->d3cold_delay = value; } if (elements[3].type == ACPI_TYPE_INTEGER) { value = (int)elements[3].integer.value / 1000; if (value < PCI_PM_D3HOT_WAIT) pdev->d3hot_delay = value; } } ACPI_FREE(obj); } static void pci_acpi_set_external_facing(struct pci_dev *dev) { u8 val; if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) return; if (device_property_read_u8(&dev->dev, "ExternalFacingPort", &val)) return; /* * These root ports expose PCIe (including DMA) outside of the * system. Everything downstream from them is external. */ if (val) dev->external_facing = 1; } void pci_acpi_setup(struct device *dev, struct acpi_device *adev) { struct pci_dev *pci_dev = to_pci_dev(dev); pci_acpi_optimize_delay(pci_dev, adev->handle); pci_acpi_set_external_facing(pci_dev); pci_acpi_add_edr_notifier(pci_dev); pci_acpi_add_pm_notifier(adev, pci_dev); if (!adev->wakeup.flags.valid) return; device_set_wakeup_capable(dev, true); /* * For bridges that can do D3 we enable wake automatically (as * we do for the power management itself in that case). The * reason is that the bridge may have additional methods such as * _DSW that need to be called. */ if (pci_dev->bridge_d3) device_wakeup_enable(dev); acpi_pci_wakeup(pci_dev, false); acpi_device_power_add_dependent(adev, dev); if (pci_is_bridge(pci_dev)) acpi_dev_power_up_children_with_adr(adev); } void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev) { struct pci_dev *pci_dev = to_pci_dev(dev); pci_acpi_remove_edr_notifier(pci_dev); pci_acpi_remove_pm_notifier(adev); if (adev->wakeup.flags.valid) { acpi_device_power_remove_dependent(adev, dev); if (pci_dev->bridge_d3) device_wakeup_disable(dev); device_set_wakeup_capable(dev, false); } } static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev); /** * pci_msi_register_fwnode_provider - Register callback to retrieve fwnode * @fn: Callback matching a device to a fwnode that identifies a PCI * MSI domain. * * This should be called by irqchip driver, which is the parent of * the MSI domain to provide callback interface to query fwnode. */ void pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *)) { pci_msi_get_fwnode_cb = fn; } /** * pci_host_bridge_acpi_msi_domain - Retrieve MSI domain of a PCI host bridge * @bus: The PCI host bridge bus. * * This function uses the callback function registered by * pci_msi_register_fwnode_provider() to retrieve the irq_domain with * type DOMAIN_BUS_PCI_MSI of the specified host bridge bus. * This returns NULL on error or when the domain is not found. */ struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { struct fwnode_handle *fwnode; if (!pci_msi_get_fwnode_cb) return NULL; fwnode = pci_msi_get_fwnode_cb(&bus->dev); if (!fwnode) return NULL; return irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI); } static int __init acpi_pci_init(void) { if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) { pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n"); pci_no_msi(); } if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) { pr_info("ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); pcie_no_aspm(); } if (acpi_pci_disabled) return 0; acpi_pci_slot_init(); acpiphp_init(); return 0; } arch_initcall(acpi_pci_init);
linux-master
drivers/pci/pci-acpi.c
// SPDX-License-Identifier: GPL-2.0 /* * Xen PCI Frontend * * Author: Ryan Wilson <[email protected]> */ #include <linux/module.h> #include <linux/init.h> #include <linux/mm.h> #include <xen/xenbus.h> #include <xen/events.h> #include <xen/grant_table.h> #include <xen/page.h> #include <linux/spinlock.h> #include <linux/pci.h> #include <linux/msi.h> #include <xen/interface/io/pciif.h> #include <asm/xen/pci.h> #include <linux/interrupt.h> #include <linux/atomic.h> #include <linux/workqueue.h> #include <linux/bitops.h> #include <linux/time.h> #include <linux/ktime.h> #include <xen/platform_pci.h> #include <asm/xen/swiotlb-xen.h> #define INVALID_EVTCHN (-1) struct pci_bus_entry { struct list_head list; struct pci_bus *bus; }; #define _PDEVB_op_active (0) #define PDEVB_op_active (1 << (_PDEVB_op_active)) struct pcifront_device { struct xenbus_device *xdev; struct list_head root_buses; int evtchn; grant_ref_t gnt_ref; int irq; /* Lock this when doing any operations in sh_info */ spinlock_t sh_info_lock; struct xen_pci_sharedinfo *sh_info; struct work_struct op_work; unsigned long flags; }; struct pcifront_sd { struct pci_sysdata sd; struct pcifront_device *pdev; }; static inline struct pcifront_device * pcifront_get_pdev(struct pcifront_sd *sd) { return sd->pdev; } static inline void pcifront_init_sd(struct pcifront_sd *sd, unsigned int domain, unsigned int bus, struct pcifront_device *pdev) { /* Because we do not expose that information via XenBus. */ sd->sd.node = first_online_node; sd->sd.domain = domain; sd->pdev = pdev; } static DEFINE_SPINLOCK(pcifront_dev_lock); static struct pcifront_device *pcifront_dev; static int errno_to_pcibios_err(int errno) { switch (errno) { case XEN_PCI_ERR_success: return PCIBIOS_SUCCESSFUL; case XEN_PCI_ERR_dev_not_found: return PCIBIOS_DEVICE_NOT_FOUND; case XEN_PCI_ERR_invalid_offset: case XEN_PCI_ERR_op_failed: return PCIBIOS_BAD_REGISTER_NUMBER; case XEN_PCI_ERR_not_implemented: return PCIBIOS_FUNC_NOT_SUPPORTED; case XEN_PCI_ERR_access_denied: return PCIBIOS_SET_FAILED; } return errno; } static inline void schedule_pcifront_aer_op(struct pcifront_device *pdev) { if (test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags) && !test_and_set_bit(_PDEVB_op_active, &pdev->flags)) { dev_dbg(&pdev->xdev->dev, "schedule aer frontend job\n"); schedule_work(&pdev->op_work); } } static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op) { int err = 0; struct xen_pci_op *active_op = &pdev->sh_info->op; unsigned long irq_flags; evtchn_port_t port = pdev->evtchn; unsigned int irq = pdev->irq; s64 ns, ns_timeout; spin_lock_irqsave(&pdev->sh_info_lock, irq_flags); memcpy(active_op, op, sizeof(struct xen_pci_op)); /* Go */ wmb(); set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); notify_remote_via_evtchn(port); /* * We set a poll timeout of 3 seconds but give up on return after * 2 seconds. It is better to time out too late rather than too early * (in the latter case we end up continually re-executing poll() with a * timeout in the past). 1s difference gives plenty of slack for error. */ ns_timeout = ktime_get_ns() + 2 * (s64)NSEC_PER_SEC; xen_clear_irq_pending(irq); while (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)) { xen_poll_irq_timeout(irq, jiffies + 3*HZ); xen_clear_irq_pending(irq); ns = ktime_get_ns(); if (ns > ns_timeout) { dev_err(&pdev->xdev->dev, "pciback not responding!!!\n"); clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags); err = XEN_PCI_ERR_dev_not_found; goto out; } } /* * We might lose backend service request since we * reuse same evtchn with pci_conf backend response. So re-schedule * aer pcifront service. */ if (test_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags)) { dev_err(&pdev->xdev->dev, "schedule aer pcifront service\n"); schedule_pcifront_aer_op(pdev); } memcpy(op, active_op, sizeof(struct xen_pci_op)); err = op->err; out: spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags); return err; } /* Access to this function is spinlocked in drivers/pci/access.c */ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { int err = 0; struct xen_pci_op op = { .cmd = XEN_PCI_OP_conf_read, .domain = pci_domain_nr(bus), .bus = bus->number, .devfn = devfn, .offset = where, .size = size, }; struct pcifront_sd *sd = bus->sysdata; struct pcifront_device *pdev = pcifront_get_pdev(sd); dev_dbg(&pdev->xdev->dev, "read dev=%04x:%02x:%02x.%d - offset %x size %d\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size); err = do_pci_op(pdev, &op); if (likely(!err)) { dev_dbg(&pdev->xdev->dev, "read got back value %x\n", op.value); *val = op.value; } else if (err == -ENODEV) { /* No device here, pretend that it just returned 0 */ err = 0; *val = 0; } return errno_to_pcibios_err(err); } /* Access to this function is spinlocked in drivers/pci/access.c */ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { struct xen_pci_op op = { .cmd = XEN_PCI_OP_conf_write, .domain = pci_domain_nr(bus), .bus = bus->number, .devfn = devfn, .offset = where, .size = size, .value = val, }; struct pcifront_sd *sd = bus->sysdata; struct pcifront_device *pdev = pcifront_get_pdev(sd); dev_dbg(&pdev->xdev->dev, "write dev=%04x:%02x:%02x.%d - offset %x size %d val %x\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val); return errno_to_pcibios_err(do_pci_op(pdev, &op)); } static struct pci_ops pcifront_bus_ops = { .read = pcifront_bus_read, .write = pcifront_bus_write, }; #ifdef CONFIG_PCI_MSI static int pci_frontend_enable_msix(struct pci_dev *dev, int vector[], int nvec) { int err; int i; struct xen_pci_op op = { .cmd = XEN_PCI_OP_enable_msix, .domain = pci_domain_nr(dev->bus), .bus = dev->bus->number, .devfn = dev->devfn, .value = nvec, }; struct pcifront_sd *sd = dev->bus->sysdata; struct pcifront_device *pdev = pcifront_get_pdev(sd); struct msi_desc *entry; if (nvec > SH_INFO_MAX_VEC) { pci_err(dev, "too many vectors (0x%x) for PCI frontend:" " Increase SH_INFO_MAX_VEC\n", nvec); return -EINVAL; } i = 0; msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) { op.msix_entries[i].entry = entry->msi_index; /* Vector is useless at this point. */ op.msix_entries[i].vector = -1; i++; } err = do_pci_op(pdev, &op); if (likely(!err)) { if (likely(!op.value)) { /* we get the result */ for (i = 0; i < nvec; i++) { if (op.msix_entries[i].vector <= 0) { pci_warn(dev, "MSI-X entry %d is invalid: %d!\n", i, op.msix_entries[i].vector); err = -EINVAL; vector[i] = -1; continue; } vector[i] = op.msix_entries[i].vector; } } else { pr_info("enable msix get value %x\n", op.value); err = op.value; } } else { pci_err(dev, "enable msix get err %x\n", err); } return err; } static void pci_frontend_disable_msix(struct pci_dev *dev) { int err; struct xen_pci_op op = { .cmd = XEN_PCI_OP_disable_msix, .domain = pci_domain_nr(dev->bus), .bus = dev->bus->number, .devfn = dev->devfn, }; struct pcifront_sd *sd = dev->bus->sysdata; struct pcifront_device *pdev = pcifront_get_pdev(sd); err = do_pci_op(pdev, &op); /* What should do for error ? */ if (err) pci_err(dev, "pci_disable_msix get err %x\n", err); } static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[]) { int err; struct xen_pci_op op = { .cmd = XEN_PCI_OP_enable_msi, .domain = pci_domain_nr(dev->bus), .bus = dev->bus->number, .devfn = dev->devfn, }; struct pcifront_sd *sd = dev->bus->sysdata; struct pcifront_device *pdev = pcifront_get_pdev(sd); err = do_pci_op(pdev, &op); if (likely(!err)) { vector[0] = op.value; if (op.value <= 0) { pci_warn(dev, "MSI entry is invalid: %d!\n", op.value); err = -EINVAL; vector[0] = -1; } } else { pci_err(dev, "pci frontend enable msi failed for dev " "%x:%x\n", op.bus, op.devfn); err = -EINVAL; } return err; } static void pci_frontend_disable_msi(struct pci_dev *dev) { int err; struct xen_pci_op op = { .cmd = XEN_PCI_OP_disable_msi, .domain = pci_domain_nr(dev->bus), .bus = dev->bus->number, .devfn = dev->devfn, }; struct pcifront_sd *sd = dev->bus->sysdata; struct pcifront_device *pdev = pcifront_get_pdev(sd); err = do_pci_op(pdev, &op); if (err == XEN_PCI_ERR_dev_not_found) { /* XXX No response from backend, what shall we do? */ pr_info("get no response from backend for disable MSI\n"); return; } if (err) /* how can pciback notify us fail? */ pr_info("get fake response from backend\n"); } static struct xen_pci_frontend_ops pci_frontend_ops = { .enable_msi = pci_frontend_enable_msi, .disable_msi = pci_frontend_disable_msi, .enable_msix = pci_frontend_enable_msix, .disable_msix = pci_frontend_disable_msix, }; static void pci_frontend_registrar(int enable) { if (enable) xen_pci_frontend = &pci_frontend_ops; else xen_pci_frontend = NULL; }; #else static inline void pci_frontend_registrar(int enable) { }; #endif /* CONFIG_PCI_MSI */ /* Claim resources for the PCI frontend as-is, backend won't allow changes */ static int pcifront_claim_resource(struct pci_dev *dev, void *data) { struct pcifront_device *pdev = data; int i; struct resource *r; pci_dev_for_each_resource(dev, r, i) { if (!r->parent && r->start && r->flags) { dev_info(&pdev->xdev->dev, "claiming resource %s/%d\n", pci_name(dev), i); if (pci_claim_resource(dev, i)) { dev_err(&pdev->xdev->dev, "Could not claim resource %s/%d! " "Device offline. Try using e820_host=1 in the guest config.\n", pci_name(dev), i); } } } return 0; } static int pcifront_scan_bus(struct pcifront_device *pdev, unsigned int domain, unsigned int bus, struct pci_bus *b) { struct pci_dev *d; unsigned int devfn; /* * Scan the bus for functions and add. * We omit handling of PCI bridge attachment because pciback prevents * bridges from being exported. */ for (devfn = 0; devfn < 0x100; devfn++) { d = pci_get_slot(b, devfn); if (d) { /* Device is already known. */ pci_dev_put(d); continue; } d = pci_scan_single_device(b, devfn); if (d) dev_info(&pdev->xdev->dev, "New device on " "%04x:%02x:%02x.%d found.\n", domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); } return 0; } static int pcifront_scan_root(struct pcifront_device *pdev, unsigned int domain, unsigned int bus) { struct pci_bus *b; LIST_HEAD(resources); struct pcifront_sd *sd = NULL; struct pci_bus_entry *bus_entry = NULL; int err = 0; static struct resource busn_res = { .start = 0, .end = 255, .flags = IORESOURCE_BUS, }; #ifndef CONFIG_PCI_DOMAINS if (domain != 0) { dev_err(&pdev->xdev->dev, "PCI Root in non-zero PCI Domain! domain=%d\n", domain); dev_err(&pdev->xdev->dev, "Please compile with CONFIG_PCI_DOMAINS\n"); err = -EINVAL; goto err_out; } #endif dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", domain, bus); bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL); sd = kzalloc(sizeof(*sd), GFP_KERNEL); if (!bus_entry || !sd) { err = -ENOMEM; goto err_out; } pci_add_resource(&resources, &ioport_resource); pci_add_resource(&resources, &iomem_resource); pci_add_resource(&resources, &busn_res); pcifront_init_sd(sd, domain, bus, pdev); pci_lock_rescan_remove(); b = pci_scan_root_bus(&pdev->xdev->dev, bus, &pcifront_bus_ops, sd, &resources); if (!b) { dev_err(&pdev->xdev->dev, "Error creating PCI Frontend Bus!\n"); err = -ENOMEM; pci_unlock_rescan_remove(); pci_free_resource_list(&resources); goto err_out; } bus_entry->bus = b; list_add(&bus_entry->list, &pdev->root_buses); /* * pci_scan_root_bus skips devices which do not have a * devfn==0. The pcifront_scan_bus enumerates all devfn. */ err = pcifront_scan_bus(pdev, domain, bus, b); /* Claim resources before going "live" with our devices */ pci_walk_bus(b, pcifront_claim_resource, pdev); /* Create SysFS and notify udev of the devices. Aka: "going live" */ pci_bus_add_devices(b); pci_unlock_rescan_remove(); return err; err_out: kfree(bus_entry); kfree(sd); return err; } static int pcifront_rescan_root(struct pcifront_device *pdev, unsigned int domain, unsigned int bus) { int err; struct pci_bus *b; b = pci_find_bus(domain, bus); if (!b) /* If the bus is unknown, create it. */ return pcifront_scan_root(pdev, domain, bus); dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n", domain, bus); err = pcifront_scan_bus(pdev, domain, bus, b); /* Claim resources before going "live" with our devices */ pci_walk_bus(b, pcifront_claim_resource, pdev); /* Create SysFS and notify udev of the devices. Aka: "going live" */ pci_bus_add_devices(b); return err; } static void free_root_bus_devs(struct pci_bus *bus) { struct pci_dev *dev; while (!list_empty(&bus->devices)) { dev = container_of(bus->devices.next, struct pci_dev, bus_list); pci_dbg(dev, "removing device\n"); pci_stop_and_remove_bus_device(dev); } } static void pcifront_free_roots(struct pcifront_device *pdev) { struct pci_bus_entry *bus_entry, *t; dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n"); pci_lock_rescan_remove(); list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) { list_del(&bus_entry->list); free_root_bus_devs(bus_entry->bus); kfree(bus_entry->bus->sysdata); device_unregister(bus_entry->bus->bridge); pci_remove_bus(bus_entry->bus); kfree(bus_entry); } pci_unlock_rescan_remove(); } static pci_ers_result_t pcifront_common_process(int cmd, struct pcifront_device *pdev, pci_channel_state_t state) { struct pci_driver *pdrv; int bus = pdev->sh_info->aer_op.bus; int devfn = pdev->sh_info->aer_op.devfn; int domain = pdev->sh_info->aer_op.domain; struct pci_dev *pcidev; dev_dbg(&pdev->xdev->dev, "pcifront AER process: cmd %x (bus:%x, devfn%x)", cmd, bus, devfn); pcidev = pci_get_domain_bus_and_slot(domain, bus, devfn); if (!pcidev || !pcidev->dev.driver) { dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n"); pci_dev_put(pcidev); return PCI_ERS_RESULT_NONE; } pdrv = to_pci_driver(pcidev->dev.driver); if (pdrv->err_handler && pdrv->err_handler->error_detected) { pci_dbg(pcidev, "trying to call AER service\n"); switch (cmd) { case XEN_PCI_OP_aer_detected: return pdrv->err_handler->error_detected(pcidev, state); case XEN_PCI_OP_aer_mmio: return pdrv->err_handler->mmio_enabled(pcidev); case XEN_PCI_OP_aer_slotreset: return pdrv->err_handler->slot_reset(pcidev); case XEN_PCI_OP_aer_resume: pdrv->err_handler->resume(pcidev); return PCI_ERS_RESULT_NONE; default: dev_err(&pdev->xdev->dev, "bad request in aer recovery operation!\n"); } } return PCI_ERS_RESULT_NONE; } static void pcifront_do_aer(struct work_struct *data) { struct pcifront_device *pdev = container_of(data, struct pcifront_device, op_work); int cmd = pdev->sh_info->aer_op.cmd; pci_channel_state_t state = (pci_channel_state_t)pdev->sh_info->aer_op.err; /* * If a pci_conf op is in progress, we have to wait until it is done * before service aer op */ dev_dbg(&pdev->xdev->dev, "pcifront service aer bus %x devfn %x\n", pdev->sh_info->aer_op.bus, pdev->sh_info->aer_op.devfn); pdev->sh_info->aer_op.err = pcifront_common_process(cmd, pdev, state); /* Post the operation to the guest. */ wmb(); clear_bit(_XEN_PCIB_active, (unsigned long *)&pdev->sh_info->flags); notify_remote_via_evtchn(pdev->evtchn); /*in case of we lost an aer request in four lines time_window*/ smp_mb__before_atomic(); clear_bit(_PDEVB_op_active, &pdev->flags); smp_mb__after_atomic(); schedule_pcifront_aer_op(pdev); } static irqreturn_t pcifront_handler_aer(int irq, void *dev) { struct pcifront_device *pdev = dev; schedule_pcifront_aer_op(pdev); return IRQ_HANDLED; } static int pcifront_connect_and_init_dma(struct pcifront_device *pdev) { int err = 0; spin_lock(&pcifront_dev_lock); if (!pcifront_dev) { dev_info(&pdev->xdev->dev, "Installing PCI frontend\n"); pcifront_dev = pdev; } else err = -EEXIST; spin_unlock(&pcifront_dev_lock); return err; } static void pcifront_disconnect(struct pcifront_device *pdev) { spin_lock(&pcifront_dev_lock); if (pdev == pcifront_dev) { dev_info(&pdev->xdev->dev, "Disconnecting PCI Frontend Buses\n"); pcifront_dev = NULL; } spin_unlock(&pcifront_dev_lock); } static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev) { struct pcifront_device *pdev; pdev = kzalloc(sizeof(struct pcifront_device), GFP_KERNEL); if (pdev == NULL) goto out; if (xenbus_setup_ring(xdev, GFP_KERNEL, (void **)&pdev->sh_info, 1, &pdev->gnt_ref)) { kfree(pdev); pdev = NULL; goto out; } pdev->sh_info->flags = 0; /*Flag for registering PV AER handler*/ set_bit(_XEN_PCIB_AERHANDLER, (void *)&pdev->sh_info->flags); dev_set_drvdata(&xdev->dev, pdev); pdev->xdev = xdev; INIT_LIST_HEAD(&pdev->root_buses); spin_lock_init(&pdev->sh_info_lock); pdev->evtchn = INVALID_EVTCHN; pdev->irq = -1; INIT_WORK(&pdev->op_work, pcifront_do_aer); dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n", pdev, pdev->sh_info); out: return pdev; } static void free_pdev(struct pcifront_device *pdev) { dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev); pcifront_free_roots(pdev); cancel_work_sync(&pdev->op_work); if (pdev->irq >= 0) unbind_from_irqhandler(pdev->irq, pdev); if (pdev->evtchn != INVALID_EVTCHN) xenbus_free_evtchn(pdev->xdev, pdev->evtchn); xenbus_teardown_ring((void **)&pdev->sh_info, 1, &pdev->gnt_ref); dev_set_drvdata(&pdev->xdev->dev, NULL); kfree(pdev); } static int pcifront_publish_info(struct pcifront_device *pdev) { int err = 0; struct xenbus_transaction trans; err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn); if (err) goto out; err = bind_evtchn_to_irqhandler(pdev->evtchn, pcifront_handler_aer, 0, "pcifront", pdev); if (err < 0) return err; pdev->irq = err; do_publish: err = xenbus_transaction_start(&trans); if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error writing configuration for backend " "(start transaction)"); goto out; } err = xenbus_printf(trans, pdev->xdev->nodename, "pci-op-ref", "%u", pdev->gnt_ref); if (!err) err = xenbus_printf(trans, pdev->xdev->nodename, "event-channel", "%u", pdev->evtchn); if (!err) err = xenbus_printf(trans, pdev->xdev->nodename, "magic", XEN_PCI_MAGIC); if (err) { xenbus_transaction_end(trans, 1); xenbus_dev_fatal(pdev->xdev, err, "Error writing configuration for backend"); goto out; } else { err = xenbus_transaction_end(trans, 0); if (err == -EAGAIN) goto do_publish; else if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error completing transaction " "for backend"); goto out; } } xenbus_switch_state(pdev->xdev, XenbusStateInitialised); dev_dbg(&pdev->xdev->dev, "publishing successful!\n"); out: return err; } static void pcifront_connect(struct pcifront_device *pdev) { int err; int i, num_roots, len; char str[64]; unsigned int domain, bus; err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "root_num", "%d", &num_roots); if (err == -ENOENT) { xenbus_dev_error(pdev->xdev, err, "No PCI Roots found, trying 0000:00"); err = pcifront_rescan_root(pdev, 0, 0); if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error scanning PCI root 0000:00"); return; } num_roots = 0; } else if (err != 1) { xenbus_dev_fatal(pdev->xdev, err >= 0 ? -EINVAL : err, "Error reading number of PCI roots"); return; } for (i = 0; i < num_roots; i++) { len = snprintf(str, sizeof(str), "root-%d", i); if (unlikely(len >= (sizeof(str) - 1))) return; err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%x:%x", &domain, &bus); if (err != 2) { xenbus_dev_fatal(pdev->xdev, err >= 0 ? -EINVAL : err, "Error reading PCI root %d", i); return; } err = pcifront_rescan_root(pdev, domain, bus); if (err) { xenbus_dev_fatal(pdev->xdev, err, "Error scanning PCI root %04x:%02x", domain, bus); return; } } xenbus_switch_state(pdev->xdev, XenbusStateConnected); } static void pcifront_try_connect(struct pcifront_device *pdev) { int err; /* Only connect once */ if (xenbus_read_driver_state(pdev->xdev->nodename) != XenbusStateInitialised) return; err = pcifront_connect_and_init_dma(pdev); if (err && err != -EEXIST) { xenbus_dev_fatal(pdev->xdev, err, "Error setting up PCI Frontend"); return; } pcifront_connect(pdev); } static int pcifront_try_disconnect(struct pcifront_device *pdev) { int err = 0; enum xenbus_state prev_state; prev_state = xenbus_read_driver_state(pdev->xdev->nodename); if (prev_state >= XenbusStateClosing) goto out; if (prev_state == XenbusStateConnected) { pcifront_free_roots(pdev); pcifront_disconnect(pdev); } err = xenbus_switch_state(pdev->xdev, XenbusStateClosed); out: return err; } static void pcifront_attach_devices(struct pcifront_device *pdev) { if (xenbus_read_driver_state(pdev->xdev->nodename) == XenbusStateReconfiguring) pcifront_connect(pdev); } static int pcifront_detach_devices(struct pcifront_device *pdev) { int err = 0; int i, num_devs; enum xenbus_state state; unsigned int domain, bus, slot, func; struct pci_dev *pci_dev; char str[64]; state = xenbus_read_driver_state(pdev->xdev->nodename); if (state == XenbusStateInitialised) { dev_dbg(&pdev->xdev->dev, "Handle skipped connect.\n"); /* We missed Connected and need to initialize. */ err = pcifront_connect_and_init_dma(pdev); if (err && err != -EEXIST) { xenbus_dev_fatal(pdev->xdev, err, "Error setting up PCI Frontend"); goto out; } goto out_switch_state; } else if (state != XenbusStateConnected) { goto out; } err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "num_devs", "%d", &num_devs); if (err != 1) { if (err >= 0) err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error reading number of PCI devices"); goto out; } /* Find devices being detached and remove them. */ for (i = 0; i < num_devs; i++) { int l, state; l = snprintf(str, sizeof(str), "state-%d", i); if (unlikely(l >= (sizeof(str) - 1))) { err = -ENOMEM; goto out; } state = xenbus_read_unsigned(pdev->xdev->otherend, str, XenbusStateUnknown); if (state != XenbusStateClosing) continue; /* Remove device. */ l = snprintf(str, sizeof(str), "vdev-%d", i); if (unlikely(l >= (sizeof(str) - 1))) { err = -ENOMEM; goto out; } err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%x:%x:%x.%x", &domain, &bus, &slot, &func); if (err != 4) { if (err >= 0) err = -EINVAL; xenbus_dev_fatal(pdev->xdev, err, "Error reading PCI device %d", i); goto out; } pci_dev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, func)); if (!pci_dev) { dev_dbg(&pdev->xdev->dev, "Cannot get PCI device %04x:%02x:%02x.%d\n", domain, bus, slot, func); continue; } pci_lock_rescan_remove(); pci_stop_and_remove_bus_device(pci_dev); pci_dev_put(pci_dev); pci_unlock_rescan_remove(); dev_dbg(&pdev->xdev->dev, "PCI device %04x:%02x:%02x.%d removed.\n", domain, bus, slot, func); } out_switch_state: err = xenbus_switch_state(pdev->xdev, XenbusStateReconfiguring); out: return err; } static void pcifront_backend_changed(struct xenbus_device *xdev, enum xenbus_state be_state) { struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev); switch (be_state) { case XenbusStateUnknown: case XenbusStateInitialising: case XenbusStateInitWait: case XenbusStateInitialised: break; case XenbusStateConnected: pcifront_try_connect(pdev); break; case XenbusStateClosed: if (xdev->state == XenbusStateClosed) break; fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: dev_warn(&xdev->dev, "backend going away!\n"); pcifront_try_disconnect(pdev); break; case XenbusStateReconfiguring: pcifront_detach_devices(pdev); break; case XenbusStateReconfigured: pcifront_attach_devices(pdev); break; } } static int pcifront_xenbus_probe(struct xenbus_device *xdev, const struct xenbus_device_id *id) { int err = 0; struct pcifront_device *pdev = alloc_pdev(xdev); if (pdev == NULL) { err = -ENOMEM; xenbus_dev_fatal(xdev, err, "Error allocating pcifront_device struct"); goto out; } err = pcifront_publish_info(pdev); if (err) free_pdev(pdev); out: return err; } static void pcifront_xenbus_remove(struct xenbus_device *xdev) { struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev); if (pdev) free_pdev(pdev); } static const struct xenbus_device_id xenpci_ids[] = { {"pci"}, {""}, }; static struct xenbus_driver xenpci_driver = { .name = "pcifront", .ids = xenpci_ids, .probe = pcifront_xenbus_probe, .remove = pcifront_xenbus_remove, .otherend_changed = pcifront_backend_changed, }; static int __init pcifront_init(void) { if (!xen_pv_domain() || xen_initial_domain()) return -ENODEV; if (!xen_has_pv_devices()) return -ENODEV; pci_frontend_registrar(1 /* enable */); return xenbus_register_frontend(&xenpci_driver); } static void __exit pcifront_cleanup(void) { xenbus_unregister_driver(&xenpci_driver); pci_frontend_registrar(0 /* disable */); } module_init(pcifront_init); module_exit(pcifront_cleanup); MODULE_DESCRIPTION("Xen PCI passthrough frontend."); MODULE_LICENSE("GPL"); MODULE_ALIAS("xen:pci");
linux-master
drivers/pci/xen-pcifront.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2016 Broadcom */ #include <linux/device.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/pci-ecam.h> #include <linux/slab.h> /* * On 64-bit systems, we do a single ioremap for the whole config space * since we have enough virtual address range available. On 32-bit, we * ioremap the config space for each bus individually. */ static const bool per_bus_mapping = !IS_ENABLED(CONFIG_64BIT); /* * Create a PCI config space window * - reserve mem region * - alloc struct pci_config_window with space for all mappings * - ioremap the config space */ struct pci_config_window *pci_ecam_create(struct device *dev, struct resource *cfgres, struct resource *busr, const struct pci_ecam_ops *ops) { unsigned int bus_shift = ops->bus_shift; struct pci_config_window *cfg; unsigned int bus_range, bus_range_max, bsz; struct resource *conflict; int err; if (busr->start > busr->end) return ERR_PTR(-EINVAL); cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); if (!cfg) return ERR_PTR(-ENOMEM); /* ECAM-compliant platforms need not supply ops->bus_shift */ if (!bus_shift) bus_shift = PCIE_ECAM_BUS_SHIFT; cfg->parent = dev; cfg->ops = ops; cfg->busr.start = busr->start; cfg->busr.end = busr->end; cfg->busr.flags = IORESOURCE_BUS; cfg->bus_shift = bus_shift; bus_range = resource_size(&cfg->busr); bus_range_max = resource_size(cfgres) >> bus_shift; if (bus_range > bus_range_max) { bus_range = bus_range_max; cfg->busr.end = busr->start + bus_range - 1; dev_warn(dev, "ECAM area %pR can only accommodate %pR (reduced from %pR desired)\n", cfgres, &cfg->busr, busr); } bsz = 1 << bus_shift; cfg->res.start = cfgres->start; cfg->res.end = cfgres->end; cfg->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; cfg->res.name = "PCI ECAM"; conflict = request_resource_conflict(&iomem_resource, &cfg->res); if (conflict) { err = -EBUSY; dev_err(dev, "can't claim ECAM area %pR: address conflict with %s %pR\n", &cfg->res, conflict->name, conflict); goto err_exit; } if (per_bus_mapping) { cfg->winp = kcalloc(bus_range, sizeof(*cfg->winp), GFP_KERNEL); if (!cfg->winp) goto err_exit_malloc; } else { cfg->win = pci_remap_cfgspace(cfgres->start, bus_range * bsz); if (!cfg->win) goto err_exit_iomap; } if (ops->init) { err = ops->init(cfg); if (err) goto err_exit; } dev_info(dev, "ECAM at %pR for %pR\n", &cfg->res, &cfg->busr); return cfg; err_exit_iomap: dev_err(dev, "ECAM ioremap failed\n"); err_exit_malloc: err = -ENOMEM; err_exit: pci_ecam_free(cfg); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(pci_ecam_create); void pci_ecam_free(struct pci_config_window *cfg) { int i; if (per_bus_mapping) { if (cfg->winp) { for (i = 0; i < resource_size(&cfg->busr); i++) if (cfg->winp[i]) iounmap(cfg->winp[i]); kfree(cfg->winp); } } else { if (cfg->win) iounmap(cfg->win); } if (cfg->res.parent) release_resource(&cfg->res); kfree(cfg); } EXPORT_SYMBOL_GPL(pci_ecam_free); static int pci_ecam_add_bus(struct pci_bus *bus) { struct pci_config_window *cfg = bus->sysdata; unsigned int bsz = 1 << cfg->bus_shift; unsigned int busn = bus->number; phys_addr_t start; if (!per_bus_mapping) return 0; if (busn < cfg->busr.start || busn > cfg->busr.end) return -EINVAL; busn -= cfg->busr.start; start = cfg->res.start + busn * bsz; cfg->winp[busn] = pci_remap_cfgspace(start, bsz); if (!cfg->winp[busn]) return -ENOMEM; return 0; } static void pci_ecam_remove_bus(struct pci_bus *bus) { struct pci_config_window *cfg = bus->sysdata; unsigned int busn = bus->number; if (!per_bus_mapping || busn < cfg->busr.start || busn > cfg->busr.end) return; busn -= cfg->busr.start; if (cfg->winp[busn]) { iounmap(cfg->winp[busn]); cfg->winp[busn] = NULL; } } /* * Function to implement the pci_ops ->map_bus method */ void __iomem *pci_ecam_map_bus(struct pci_bus *bus, unsigned int devfn, int where) { struct pci_config_window *cfg = bus->sysdata; unsigned int bus_shift = cfg->ops->bus_shift; unsigned int devfn_shift = cfg->ops->bus_shift - 8; unsigned int busn = bus->number; void __iomem *base; u32 bus_offset, devfn_offset; if (busn < cfg->busr.start || busn > cfg->busr.end) return NULL; busn -= cfg->busr.start; if (per_bus_mapping) { base = cfg->winp[busn]; busn = 0; } else base = cfg->win; if (cfg->ops->bus_shift) { bus_offset = (busn & PCIE_ECAM_BUS_MASK) << bus_shift; devfn_offset = (devfn & PCIE_ECAM_DEVFN_MASK) << devfn_shift; where &= PCIE_ECAM_REG_MASK; return base + (bus_offset | devfn_offset | where); } return base + PCIE_ECAM_OFFSET(busn, devfn, where); } EXPORT_SYMBOL_GPL(pci_ecam_map_bus); /* ECAM ops */ const struct pci_ecam_ops pci_generic_ecam_ops = { .pci_ops = { .add_bus = pci_ecam_add_bus, .remove_bus = pci_ecam_remove_bus, .map_bus = pci_ecam_map_bus, .read = pci_generic_config_read, .write = pci_generic_config_write, } }; EXPORT_SYMBOL_GPL(pci_generic_ecam_ops); #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) /* ECAM ops for 32-bit access only (non-compliant) */ const struct pci_ecam_ops pci_32b_ops = { .pci_ops = { .add_bus = pci_ecam_add_bus, .remove_bus = pci_ecam_remove_bus, .map_bus = pci_ecam_map_bus, .read = pci_generic_config_read32, .write = pci_generic_config_write32, } }; /* ECAM ops for 32-bit read only (non-compliant) */ const struct pci_ecam_ops pci_32b_read_ops = { .pci_ops = { .add_bus = pci_ecam_add_bus, .remove_bus = pci_ecam_remove_bus, .map_bus = pci_ecam_map_bus, .read = pci_generic_config_read32, .write = pci_generic_config_write, } }; #endif
linux-master
drivers/pci/ecam.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Marvell * * Author: Thomas Petazzoni <[email protected]> * * This file helps PCI controller drivers implement a fake root port * PCI bridge when the HW doesn't provide such a root port PCI * bridge. * * It emulates a PCI bridge by providing a fake PCI configuration * space (and optionally a PCIe capability configuration space) in * memory. By default the read/write operations simply read and update * this fake configuration space in memory. However, PCI controller * drivers can provide through the 'struct pci_sw_bridge_ops' * structure a set of operations to override or complement this * default behavior. */ #include <linux/pci.h> #include "pci-bridge-emul.h" #define PCI_BRIDGE_CONF_END PCI_STD_HEADER_SIZEOF #define PCI_CAP_SSID_SIZEOF (PCI_SSVID_DEVICE_ID + 2) #define PCI_CAP_PCIE_SIZEOF (PCI_EXP_SLTSTA2 + 2) /** * struct pci_bridge_reg_behavior - register bits behaviors * @ro: Read-Only bits * @rw: Read-Write bits * @w1c: Write-1-to-Clear bits * * Reads and Writes will be filtered by specified behavior. All other bits not * declared are assumed 'Reserved' and will return 0 on reads, per PCIe 5.0: * "Reserved register fields must be read only and must return 0 (all 0's for * multi-bit fields) when read". */ struct pci_bridge_reg_behavior { /* Read-only bits */ u32 ro; /* Read-write bits */ u32 rw; /* Write-1-to-clear bits */ u32 w1c; }; static const struct pci_bridge_reg_behavior pci_regs_behavior[PCI_STD_HEADER_SIZEOF / 4] = { [PCI_VENDOR_ID / 4] = { .ro = ~0 }, [PCI_COMMAND / 4] = { .rw = (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_PARITY | PCI_COMMAND_SERR), .ro = ((PCI_COMMAND_SPECIAL | PCI_COMMAND_INVALIDATE | PCI_COMMAND_VGA_PALETTE | PCI_COMMAND_WAIT | PCI_COMMAND_FAST_BACK) | (PCI_STATUS_CAP_LIST | PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MASK) << 16), .w1c = PCI_STATUS_ERROR_BITS << 16, }, [PCI_CLASS_REVISION / 4] = { .ro = ~0 }, /* * Cache Line Size register: implement as read-only, we do not * pretend implementing "Memory Write and Invalidate" * transactions" * * Latency Timer Register: implemented as read-only, as "A * bridge that is not capable of a burst transfer of more than * two data phases on its primary interface is permitted to * hardwire the Latency Timer to a value of 16 or less" * * Header Type: always read-only * * BIST register: implemented as read-only, as "A bridge that * does not support BIST must implement this register as a * read-only register that returns 0 when read" */ [PCI_CACHE_LINE_SIZE / 4] = { .ro = ~0 }, /* * Base Address registers not used must be implemented as * read-only registers that return 0 when read. */ [PCI_BASE_ADDRESS_0 / 4] = { .ro = ~0 }, [PCI_BASE_ADDRESS_1 / 4] = { .ro = ~0 }, [PCI_PRIMARY_BUS / 4] = { /* Primary, secondary and subordinate bus are RW */ .rw = GENMASK(24, 0), /* Secondary latency is read-only */ .ro = GENMASK(31, 24), }, [PCI_IO_BASE / 4] = { /* The high four bits of I/O base/limit are RW */ .rw = (GENMASK(15, 12) | GENMASK(7, 4)), /* The low four bits of I/O base/limit are RO */ .ro = (((PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MASK) << 16) | GENMASK(11, 8) | GENMASK(3, 0)), .w1c = PCI_STATUS_ERROR_BITS << 16, }, [PCI_MEMORY_BASE / 4] = { /* The high 12-bits of mem base/limit are RW */ .rw = GENMASK(31, 20) | GENMASK(15, 4), /* The low four bits of mem base/limit are RO */ .ro = GENMASK(19, 16) | GENMASK(3, 0), }, [PCI_PREF_MEMORY_BASE / 4] = { /* The high 12-bits of pref mem base/limit are RW */ .rw = GENMASK(31, 20) | GENMASK(15, 4), /* The low four bits of pref mem base/limit are RO */ .ro = GENMASK(19, 16) | GENMASK(3, 0), }, [PCI_PREF_BASE_UPPER32 / 4] = { .rw = ~0, }, [PCI_PREF_LIMIT_UPPER32 / 4] = { .rw = ~0, }, [PCI_IO_BASE_UPPER16 / 4] = { .rw = ~0, }, [PCI_CAPABILITY_LIST / 4] = { .ro = GENMASK(7, 0), }, /* * If expansion ROM is unsupported then ROM Base Address register must * be implemented as read-only register that return 0 when read, same * as for unused Base Address registers. */ [PCI_ROM_ADDRESS1 / 4] = { .ro = ~0, }, /* * Interrupt line (bits 7:0) are RW, interrupt pin (bits 15:8) * are RO, and bridge control (31:16) are a mix of RW, RO, * reserved and W1C bits */ [PCI_INTERRUPT_LINE / 4] = { /* Interrupt line is RW */ .rw = (GENMASK(7, 0) | ((PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR | PCI_BRIDGE_CTL_ISA | PCI_BRIDGE_CTL_VGA | PCI_BRIDGE_CTL_MASTER_ABORT | PCI_BRIDGE_CTL_BUS_RESET | BIT(8) | BIT(9) | BIT(11)) << 16)), /* Interrupt pin is RO */ .ro = (GENMASK(15, 8) | ((PCI_BRIDGE_CTL_FAST_BACK) << 16)), .w1c = BIT(10) << 16, }, }; static const struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] = { [PCI_CAP_LIST_ID / 4] = { /* * Capability ID, Next Capability Pointer and * bits [14:0] of Capabilities register are all read-only. * Bit 15 of Capabilities register is reserved. */ .ro = GENMASK(30, 0), }, [PCI_EXP_DEVCAP / 4] = { /* * Bits [31:29] and [17:16] are reserved. * Bits [27:18] are reserved for non-upstream ports. * Bits 28 and [14:6] are reserved for non-endpoint devices. * Other bits are read-only. */ .ro = BIT(15) | GENMASK(5, 0), }, [PCI_EXP_DEVCTL / 4] = { /* * Device control register is RW, except bit 15 which is * reserved for non-endpoints or non-PCIe-to-PCI/X bridges. */ .rw = GENMASK(14, 0), /* * Device status register has bits 6 and [3:0] W1C, [5:4] RO, * the rest is reserved. Also bit 6 is reserved for non-upstream * ports. */ .w1c = GENMASK(3, 0) << 16, .ro = GENMASK(5, 4) << 16, }, [PCI_EXP_LNKCAP / 4] = { /* * All bits are RO, except bit 23 which is reserved and * bit 18 which is reserved for non-upstream ports. */ .ro = lower_32_bits(~(BIT(23) | PCI_EXP_LNKCAP_CLKPM)), }, [PCI_EXP_LNKCTL / 4] = { /* * Link control has bits [15:14], [11:3] and [1:0] RW, the * rest is reserved. Bit 8 is reserved for non-upstream ports. * * Link status has bits [13:0] RO, and bits [15:14] * W1C. */ .rw = GENMASK(15, 14) | GENMASK(11, 9) | GENMASK(7, 3) | GENMASK(1, 0), .ro = GENMASK(13, 0) << 16, .w1c = GENMASK(15, 14) << 16, }, [PCI_EXP_SLTCAP / 4] = { .ro = ~0, }, [PCI_EXP_SLTCTL / 4] = { /* * Slot control has bits [14:0] RW, the rest is * reserved. * * Slot status has bits 8 and [4:0] W1C, bits [7:5] RO, the * rest is reserved. */ .rw = GENMASK(14, 0), .w1c = (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC) << 16, .ro = (PCI_EXP_SLTSTA_MRLSS | PCI_EXP_SLTSTA_PDS | PCI_EXP_SLTSTA_EIS) << 16, }, [PCI_EXP_RTCTL / 4] = { /* * Root control has bits [4:0] RW, the rest is * reserved. * * Root capabilities has bit 0 RO, the rest is reserved. */ .rw = (PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE | PCI_EXP_RTCTL_SEFEE | PCI_EXP_RTCTL_PMEIE | PCI_EXP_RTCTL_CRSSVE), .ro = PCI_EXP_RTCAP_CRSVIS << 16, }, [PCI_EXP_RTSTA / 4] = { /* * Root status has bits 17 and [15:0] RO, bit 16 W1C, the rest * is reserved. */ .ro = GENMASK(15, 0) | PCI_EXP_RTSTA_PENDING, .w1c = PCI_EXP_RTSTA_PME, }, [PCI_EXP_DEVCAP2 / 4] = { /* * Device capabilities 2 register has reserved bits [30:27]. * Also bits [26:24] are reserved for non-upstream ports. */ .ro = BIT(31) | GENMASK(23, 0), }, [PCI_EXP_DEVCTL2 / 4] = { /* * Device control 2 register is RW. Bit 11 is reserved for * non-upstream ports. * * Device status 2 register is reserved. */ .rw = GENMASK(15, 12) | GENMASK(10, 0), }, [PCI_EXP_LNKCAP2 / 4] = { /* Link capabilities 2 register has reserved bits [30:25] and 0. */ .ro = BIT(31) | GENMASK(24, 1), }, [PCI_EXP_LNKCTL2 / 4] = { /* * Link control 2 register is RW. * * Link status 2 register has bits 5, 15 W1C; * bits 10, 11 reserved and others are RO. */ .rw = GENMASK(15, 0), .w1c = (BIT(15) | BIT(5)) << 16, .ro = (GENMASK(14, 12) | GENMASK(9, 6) | GENMASK(4, 0)) << 16, }, [PCI_EXP_SLTCAP2 / 4] = { /* Slot capabilities 2 register is reserved. */ }, [PCI_EXP_SLTCTL2 / 4] = { /* Both Slot control 2 and Slot status 2 registers are reserved. */ }, }; static pci_bridge_emul_read_status_t pci_bridge_emul_read_ssid(struct pci_bridge_emul *bridge, int reg, u32 *value) { switch (reg) { case PCI_CAP_LIST_ID: *value = PCI_CAP_ID_SSVID | ((bridge->pcie_start > bridge->ssid_start) ? (bridge->pcie_start << 8) : 0); return PCI_BRIDGE_EMUL_HANDLED; case PCI_SSVID_VENDOR_ID: *value = bridge->subsystem_vendor_id | (bridge->subsystem_id << 16); return PCI_BRIDGE_EMUL_HANDLED; default: return PCI_BRIDGE_EMUL_NOT_HANDLED; } } /* * Initialize a pci_bridge_emul structure to represent a fake PCI * bridge configuration space. The caller needs to have initialized * the PCI configuration space with whatever values make sense * (typically at least vendor, device, revision), the ->ops pointer, * and optionally ->data and ->has_pcie. */ int pci_bridge_emul_init(struct pci_bridge_emul *bridge, unsigned int flags) { BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END); /* * class_revision: Class is high 24 bits and revision is low 8 bit * of this member, while class for PCI Bridge Normal Decode has the * 24-bit value: PCI_CLASS_BRIDGE_PCI_NORMAL */ bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI_NORMAL << 8); bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE; bridge->conf.cache_line_size = 0x10; bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST); bridge->pci_regs_behavior = kmemdup(pci_regs_behavior, sizeof(pci_regs_behavior), GFP_KERNEL); if (!bridge->pci_regs_behavior) return -ENOMEM; /* If ssid_start and pcie_start were not specified then choose the lowest possible value. */ if (!bridge->ssid_start && !bridge->pcie_start) { if (bridge->subsystem_vendor_id) bridge->ssid_start = PCI_BRIDGE_CONF_END; if (bridge->has_pcie) bridge->pcie_start = bridge->ssid_start + PCI_CAP_SSID_SIZEOF; } else if (!bridge->ssid_start && bridge->subsystem_vendor_id) { if (bridge->pcie_start - PCI_BRIDGE_CONF_END >= PCI_CAP_SSID_SIZEOF) bridge->ssid_start = PCI_BRIDGE_CONF_END; else bridge->ssid_start = bridge->pcie_start + PCI_CAP_PCIE_SIZEOF; } else if (!bridge->pcie_start && bridge->has_pcie) { if (bridge->ssid_start - PCI_BRIDGE_CONF_END >= PCI_CAP_PCIE_SIZEOF) bridge->pcie_start = PCI_BRIDGE_CONF_END; else bridge->pcie_start = bridge->ssid_start + PCI_CAP_SSID_SIZEOF; } bridge->conf.capabilities_pointer = min(bridge->ssid_start, bridge->pcie_start); if (bridge->conf.capabilities_pointer) bridge->conf.status |= cpu_to_le16(PCI_STATUS_CAP_LIST); if (bridge->has_pcie) { bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP; bridge->pcie_conf.next = (bridge->ssid_start > bridge->pcie_start) ? bridge->ssid_start : 0; bridge->pcie_conf.cap |= cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4); bridge->pcie_cap_regs_behavior = kmemdup(pcie_cap_regs_behavior, sizeof(pcie_cap_regs_behavior), GFP_KERNEL); if (!bridge->pcie_cap_regs_behavior) { kfree(bridge->pci_regs_behavior); return -ENOMEM; } /* These bits are applicable only for PCI and reserved on PCIe */ bridge->pci_regs_behavior[PCI_CACHE_LINE_SIZE / 4].ro &= ~GENMASK(15, 8); bridge->pci_regs_behavior[PCI_COMMAND / 4].ro &= ~((PCI_COMMAND_SPECIAL | PCI_COMMAND_INVALIDATE | PCI_COMMAND_VGA_PALETTE | PCI_COMMAND_WAIT | PCI_COMMAND_FAST_BACK) | (PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MASK) << 16); bridge->pci_regs_behavior[PCI_PRIMARY_BUS / 4].ro &= ~GENMASK(31, 24); bridge->pci_regs_behavior[PCI_IO_BASE / 4].ro &= ~((PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MASK) << 16); bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].rw &= ~((PCI_BRIDGE_CTL_MASTER_ABORT | BIT(8) | BIT(9) | BIT(11)) << 16); bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].ro &= ~((PCI_BRIDGE_CTL_FAST_BACK) << 16); bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].w1c &= ~(BIT(10) << 16); } if (flags & PCI_BRIDGE_EMUL_NO_PREFMEM_FORWARD) { bridge->pci_regs_behavior[PCI_PREF_MEMORY_BASE / 4].ro = ~0; bridge->pci_regs_behavior[PCI_PREF_MEMORY_BASE / 4].rw = 0; } if (flags & PCI_BRIDGE_EMUL_NO_IO_FORWARD) { bridge->pci_regs_behavior[PCI_COMMAND / 4].ro |= PCI_COMMAND_IO; bridge->pci_regs_behavior[PCI_COMMAND / 4].rw &= ~PCI_COMMAND_IO; bridge->pci_regs_behavior[PCI_IO_BASE / 4].ro |= GENMASK(15, 0); bridge->pci_regs_behavior[PCI_IO_BASE / 4].rw &= ~GENMASK(15, 0); bridge->pci_regs_behavior[PCI_IO_BASE_UPPER16 / 4].ro = ~0; bridge->pci_regs_behavior[PCI_IO_BASE_UPPER16 / 4].rw = 0; } return 0; } EXPORT_SYMBOL_GPL(pci_bridge_emul_init); /* * Cleanup a pci_bridge_emul structure that was previously initialized * using pci_bridge_emul_init(). */ void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge) { if (bridge->has_pcie) kfree(bridge->pcie_cap_regs_behavior); kfree(bridge->pci_regs_behavior); } EXPORT_SYMBOL_GPL(pci_bridge_emul_cleanup); /* * Should be called by the PCI controller driver when reading the PCI * configuration space of the fake bridge. It will call back the * ->ops->read_base or ->ops->read_pcie operations. */ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where, int size, u32 *value) { int ret; int reg = where & ~3; pci_bridge_emul_read_status_t (*read_op)(struct pci_bridge_emul *bridge, int reg, u32 *value); __le32 *cfgspace; const struct pci_bridge_reg_behavior *behavior; if (reg < PCI_BRIDGE_CONF_END) { /* Emulated PCI space */ read_op = bridge->ops->read_base; cfgspace = (__le32 *) &bridge->conf; behavior = bridge->pci_regs_behavior; } else if (reg >= bridge->ssid_start && reg < bridge->ssid_start + PCI_CAP_SSID_SIZEOF && bridge->subsystem_vendor_id) { /* Emulated PCI Bridge Subsystem Vendor ID capability */ reg -= bridge->ssid_start; read_op = pci_bridge_emul_read_ssid; cfgspace = NULL; behavior = NULL; } else if (reg >= bridge->pcie_start && reg < bridge->pcie_start + PCI_CAP_PCIE_SIZEOF && bridge->has_pcie) { /* Our emulated PCIe capability */ reg -= bridge->pcie_start; read_op = bridge->ops->read_pcie; cfgspace = (__le32 *) &bridge->pcie_conf; behavior = bridge->pcie_cap_regs_behavior; } else if (reg >= PCI_CFG_SPACE_SIZE && bridge->has_pcie) { /* PCIe extended capability space */ reg -= PCI_CFG_SPACE_SIZE; read_op = bridge->ops->read_ext; cfgspace = NULL; behavior = NULL; } else { /* Not implemented */ *value = 0; return PCIBIOS_SUCCESSFUL; } if (read_op) ret = read_op(bridge, reg, value); else ret = PCI_BRIDGE_EMUL_NOT_HANDLED; if (ret == PCI_BRIDGE_EMUL_NOT_HANDLED) { if (cfgspace) *value = le32_to_cpu(cfgspace[reg / 4]); else *value = 0; } /* * Make sure we never return any reserved bit with a value * different from 0. */ if (behavior) *value &= behavior[reg / 4].ro | behavior[reg / 4].rw | behavior[reg / 4].w1c; if (size == 1) *value = (*value >> (8 * (where & 3))) & 0xff; else if (size == 2) *value = (*value >> (8 * (where & 3))) & 0xffff; else if (size != 4) return PCIBIOS_BAD_REGISTER_NUMBER; return PCIBIOS_SUCCESSFUL; } EXPORT_SYMBOL_GPL(pci_bridge_emul_conf_read); /* * Should be called by the PCI controller driver when writing the PCI * configuration space of the fake bridge. It will call back the * ->ops->write_base or ->ops->write_pcie operations. */ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where, int size, u32 value) { int reg = where & ~3; int mask, ret, old, new, shift; void (*write_op)(struct pci_bridge_emul *bridge, int reg, u32 old, u32 new, u32 mask); __le32 *cfgspace; const struct pci_bridge_reg_behavior *behavior; ret = pci_bridge_emul_conf_read(bridge, reg, 4, &old); if (ret != PCIBIOS_SUCCESSFUL) return ret; if (reg < PCI_BRIDGE_CONF_END) { /* Emulated PCI space */ write_op = bridge->ops->write_base; cfgspace = (__le32 *) &bridge->conf; behavior = bridge->pci_regs_behavior; } else if (reg >= bridge->pcie_start && reg < bridge->pcie_start + PCI_CAP_PCIE_SIZEOF && bridge->has_pcie) { /* Our emulated PCIe capability */ reg -= bridge->pcie_start; write_op = bridge->ops->write_pcie; cfgspace = (__le32 *) &bridge->pcie_conf; behavior = bridge->pcie_cap_regs_behavior; } else if (reg >= PCI_CFG_SPACE_SIZE && bridge->has_pcie) { /* PCIe extended capability space */ reg -= PCI_CFG_SPACE_SIZE; write_op = bridge->ops->write_ext; cfgspace = NULL; behavior = NULL; } else { /* Not implemented */ return PCIBIOS_SUCCESSFUL; } shift = (where & 0x3) * 8; if (size == 4) mask = 0xffffffff; else if (size == 2) mask = 0xffff << shift; else if (size == 1) mask = 0xff << shift; else return PCIBIOS_BAD_REGISTER_NUMBER; if (behavior) { /* Keep all bits, except the RW bits */ new = old & (~mask | ~behavior[reg / 4].rw); /* Update the value of the RW bits */ new |= (value << shift) & (behavior[reg / 4].rw & mask); /* Clear the W1C bits */ new &= ~((value << shift) & (behavior[reg / 4].w1c & mask)); } else { new = old & ~mask; new |= (value << shift) & mask; } if (cfgspace) { /* Save the new value with the cleared W1C bits into the cfgspace */ cfgspace[reg / 4] = cpu_to_le32(new); } if (behavior) { /* * Clear the W1C bits not specified by the write mask, so that the * write_op() does not clear them. */ new &= ~(behavior[reg / 4].w1c & ~mask); /* * Set the W1C bits specified by the write mask, so that write_op() * knows about that they are to be cleared. */ new |= (value << shift) & (behavior[reg / 4].w1c & mask); } if (write_op) write_op(bridge, reg, old, new, mask); return PCIBIOS_SUCCESSFUL; } EXPORT_SYMBOL_GPL(pci_bridge_emul_conf_write);
linux-master
drivers/pci/pci-bridge-emul.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI detection and setup code */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/msi.h> #include <linux/of_pci.h> #include <linux/pci_hotplug.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/cpumask.h> #include <linux/aer.h> #include <linux/acpi.h> #include <linux/hypervisor.h> #include <linux/irqdomain.h> #include <linux/pm_runtime.h> #include <linux/bitfield.h> #include "pci.h" #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ #define CARDBUS_RESERVE_BUSNR 3 static struct resource busn_resource = { .name = "PCI busn", .start = 0, .end = 255, .flags = IORESOURCE_BUS, }; /* Ugh. Need to stop exporting this to modules. */ LIST_HEAD(pci_root_buses); EXPORT_SYMBOL(pci_root_buses); static LIST_HEAD(pci_domain_busn_res_list); struct pci_domain_busn_res { struct list_head list; struct resource res; int domain_nr; }; static struct resource *get_pci_domain_busn_res(int domain_nr) { struct pci_domain_busn_res *r; list_for_each_entry(r, &pci_domain_busn_res_list, list) if (r->domain_nr == domain_nr) return &r->res; r = kzalloc(sizeof(*r), GFP_KERNEL); if (!r) return NULL; r->domain_nr = domain_nr; r->res.start = 0; r->res.end = 0xff; r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED; list_add_tail(&r->list, &pci_domain_busn_res_list); return &r->res; } /* * Some device drivers need know if PCI is initiated. * Basically, we think PCI is not initiated when there * is no device to be found on the pci_bus_type. */ int no_pci_devices(void) { struct device *dev; int no_devices; dev = bus_find_next_device(&pci_bus_type, NULL); no_devices = (dev == NULL); put_device(dev); return no_devices; } EXPORT_SYMBOL(no_pci_devices); /* * PCI Bus Class */ static void release_pcibus_dev(struct device *dev) { struct pci_bus *pci_bus = to_pci_bus(dev); put_device(pci_bus->bridge); pci_bus_remove_resources(pci_bus); pci_release_bus_of_node(pci_bus); kfree(pci_bus); } static struct class pcibus_class = { .name = "pci_bus", .dev_release = &release_pcibus_dev, .dev_groups = pcibus_groups, }; static int __init pcibus_class_init(void) { return class_register(&pcibus_class); } postcore_initcall(pcibus_class_init); static u64 pci_size(u64 base, u64 maxbase, u64 mask) { u64 size = mask & maxbase; /* Find the significant bits */ if (!size) return 0; /* * Get the lowest of them to find the decode size, and from that * the extent. */ size = size & ~(size-1); /* * base == maxbase can be valid only if the BAR has already been * programmed with all 1s. */ if (base == maxbase && ((base | (size - 1)) & mask) != mask) return 0; return size; } static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) { u32 mem_type; unsigned long flags; if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; flags |= IORESOURCE_IO; return flags; } flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; flags |= IORESOURCE_MEM; if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) flags |= IORESOURCE_PREFETCH; mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK; switch (mem_type) { case PCI_BASE_ADDRESS_MEM_TYPE_32: break; case PCI_BASE_ADDRESS_MEM_TYPE_1M: /* 1M mem BAR treated as 32-bit BAR */ break; case PCI_BASE_ADDRESS_MEM_TYPE_64: flags |= IORESOURCE_MEM_64; break; default: /* mem unknown type treated as 32-bit BAR */ break; } return flags; } #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO) /** * __pci_read_base - Read a PCI BAR * @dev: the PCI device * @type: type of the BAR * @res: resource buffer to be filled in * @pos: BAR position in the config space * * Returns 1 if the BAR is 64-bit, or 0 if 32-bit. */ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, struct resource *res, unsigned int pos) { u32 l = 0, sz = 0, mask; u64 l64, sz64, mask64; u16 orig_cmd; struct pci_bus_region region, inverted_region; mask = type ? PCI_ROM_ADDRESS_MASK : ~0; /* No printks while decoding is disabled! */ if (!dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) { pci_write_config_word(dev, PCI_COMMAND, orig_cmd & ~PCI_COMMAND_DECODE_ENABLE); } } res->name = pci_name(dev); pci_read_config_dword(dev, pos, &l); pci_write_config_dword(dev, pos, l | mask); pci_read_config_dword(dev, pos, &sz); pci_write_config_dword(dev, pos, l); /* * All bits set in sz means the device isn't working properly. * If the BAR isn't implemented, all bits must be 0. If it's a * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit * 1 must be clear. */ if (PCI_POSSIBLE_ERROR(sz)) sz = 0; /* * I don't know how l can have all bits set. Copied from old code. * Maybe it fixes a bug on some ancient platform. */ if (PCI_POSSIBLE_ERROR(l)) l = 0; if (type == pci_bar_unknown) { res->flags = decode_bar(dev, l); res->flags |= IORESOURCE_SIZEALIGN; if (res->flags & IORESOURCE_IO) { l64 = l & PCI_BASE_ADDRESS_IO_MASK; sz64 = sz & PCI_BASE_ADDRESS_IO_MASK; mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT; } else { l64 = l & PCI_BASE_ADDRESS_MEM_MASK; sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK; mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK; } } else { if (l & PCI_ROM_ADDRESS_ENABLE) res->flags |= IORESOURCE_ROM_ENABLE; l64 = l & PCI_ROM_ADDRESS_MASK; sz64 = sz & PCI_ROM_ADDRESS_MASK; mask64 = PCI_ROM_ADDRESS_MASK; } if (res->flags & IORESOURCE_MEM_64) { pci_read_config_dword(dev, pos + 4, &l); pci_write_config_dword(dev, pos + 4, ~0); pci_read_config_dword(dev, pos + 4, &sz); pci_write_config_dword(dev, pos + 4, l); l64 |= ((u64)l << 32); sz64 |= ((u64)sz << 32); mask64 |= ((u64)~0 << 32); } if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE)) pci_write_config_word(dev, PCI_COMMAND, orig_cmd); if (!sz64) goto fail; sz64 = pci_size(l64, sz64, mask64); if (!sz64) { pci_info(dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n", pos); goto fail; } if (res->flags & IORESOURCE_MEM_64) { if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8) && sz64 > 0x100000000ULL) { res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; res->start = 0; res->end = 0; pci_err(dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n", pos, (unsigned long long)sz64); goto out; } if ((sizeof(pci_bus_addr_t) < 8) && l) { /* Above 32-bit boundary; try to reallocate */ res->flags |= IORESOURCE_UNSET; res->start = 0; res->end = sz64 - 1; pci_info(dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n", pos, (unsigned long long)l64); goto out; } } region.start = l64; region.end = l64 + sz64 - 1; pcibios_bus_to_resource(dev->bus, res, &region); pcibios_resource_to_bus(dev->bus, &inverted_region, res); /* * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is * the corresponding resource address (the physical address used by * the CPU. Converting that resource address back to a bus address * should yield the original BAR value: * * resource_to_bus(bus_to_resource(A)) == A * * If it doesn't, CPU accesses to "bus_to_resource(A)" will not * be claimed by the device. */ if (inverted_region.start != region.start) { res->flags |= IORESOURCE_UNSET; res->start = 0; res->end = region.end - region.start; pci_info(dev, "reg 0x%x: initial BAR value %#010llx invalid\n", pos, (unsigned long long)region.start); } goto out; fail: res->flags = 0; out: if (res->flags) pci_info(dev, "reg 0x%x: %pR\n", pos, res); return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; } static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) { unsigned int pos, reg; if (dev->non_compliant_bars) return; /* Per PCIe r4.0, sec 9.3.4.1.11, the VF BARs are all RO Zero */ if (dev->is_virtfn) return; for (pos = 0; pos < howmany; pos++) { struct resource *res = &dev->resource[pos]; reg = PCI_BASE_ADDRESS_0 + (pos << 2); pos += __pci_read_base(dev, pci_bar_unknown, res, reg); } if (rom) { struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; dev->rom_base_reg = rom; res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_READONLY | IORESOURCE_SIZEALIGN; __pci_read_base(dev, pci_bar_mem32, res, rom); } } static void pci_read_bridge_windows(struct pci_dev *bridge) { u16 io; u32 pmem, tmp; pci_read_config_word(bridge, PCI_IO_BASE, &io); if (!io) { pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0); pci_read_config_word(bridge, PCI_IO_BASE, &io); pci_write_config_word(bridge, PCI_IO_BASE, 0x0); } if (io) bridge->io_window = 1; /* * DECchip 21050 pass 2 errata: the bridge may miss an address * disconnect boundary by one PCI data phase. Workaround: do not * use prefetching on this device. */ if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001) return; pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); if (!pmem) { pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0xffe0fff0); pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem); pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0); } if (!pmem) return; bridge->pref_window = 1; if ((pmem & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { /* * Bridge claims to have a 64-bit prefetchable memory * window; verify that the upper bits are actually * writable. */ pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &pmem); pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, 0xffffffff); pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp); pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, pmem); if (tmp) bridge->pref_64_window = 1; } } static void pci_read_bridge_io(struct pci_bus *child) { struct pci_dev *dev = child->self; u8 io_base_lo, io_limit_lo; unsigned long io_mask, io_granularity, base, limit; struct pci_bus_region region; struct resource *res; io_mask = PCI_IO_RANGE_MASK; io_granularity = 0x1000; if (dev->io_window_1k) { /* Support 1K I/O space granularity */ io_mask = PCI_IO_1K_RANGE_MASK; io_granularity = 0x400; } res = child->resource[0]; pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); base = (io_base_lo & io_mask) << 8; limit = (io_limit_lo & io_mask) << 8; if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { u16 io_base_hi, io_limit_hi; pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); base |= ((unsigned long) io_base_hi << 16); limit |= ((unsigned long) io_limit_hi << 16); } if (base <= limit) { res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; region.start = base; region.end = limit + io_granularity - 1; pcibios_bus_to_resource(dev->bus, res, &region); pci_info(dev, " bridge window %pR\n", res); } } static void pci_read_bridge_mmio(struct pci_bus *child) { struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; unsigned long base, limit; struct pci_bus_region region; struct resource *res; res = child->resource[1]; pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; if (base <= limit) { res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; region.start = base; region.end = limit + 0xfffff; pcibios_bus_to_resource(dev->bus, res, &region); pci_info(dev, " bridge window %pR\n", res); } } static void pci_read_bridge_mmio_pref(struct pci_bus *child) { struct pci_dev *dev = child->self; u16 mem_base_lo, mem_limit_lo; u64 base64, limit64; pci_bus_addr_t base, limit; struct pci_bus_region region; struct resource *res; res = child->resource[2]; pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { u32 mem_base_hi, mem_limit_hi; pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); /* * Some bridges set the base > limit by default, and some * (broken) BIOSes do not initialize them. If we find * this, just assume they are not being used. */ if (mem_base_hi <= mem_limit_hi) { base64 |= (u64) mem_base_hi << 32; limit64 |= (u64) mem_limit_hi << 32; } } base = (pci_bus_addr_t) base64; limit = (pci_bus_addr_t) limit64; if (base != base64) { pci_err(dev, "can't handle bridge window above 4GB (bus address %#010llx)\n", (unsigned long long) base64); return; } if (base <= limit) { res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; if (res->flags & PCI_PREF_RANGE_TYPE_64) res->flags |= IORESOURCE_MEM_64; region.start = base; region.end = limit + 0xfffff; pcibios_bus_to_resource(dev->bus, res, &region); pci_info(dev, " bridge window %pR\n", res); } } void pci_read_bridge_bases(struct pci_bus *child) { struct pci_dev *dev = child->self; struct resource *res; int i; if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */ return; pci_info(dev, "PCI bridge to %pR%s\n", &child->busn_res, dev->transparent ? " (subtractive decode)" : ""); pci_bus_remove_resources(child); for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; pci_read_bridge_io(child); pci_read_bridge_mmio(child); pci_read_bridge_mmio_pref(child); if (dev->transparent) { pci_bus_for_each_resource(child->parent, res) { if (res && res->flags) { pci_bus_add_resource(child, res, PCI_SUBTRACTIVE_DECODE); pci_info(dev, " bridge window %pR (subtractive decode)\n", res); } } } } static struct pci_bus *pci_alloc_bus(struct pci_bus *parent) { struct pci_bus *b; b = kzalloc(sizeof(*b), GFP_KERNEL); if (!b) return NULL; INIT_LIST_HEAD(&b->node); INIT_LIST_HEAD(&b->children); INIT_LIST_HEAD(&b->devices); INIT_LIST_HEAD(&b->slots); INIT_LIST_HEAD(&b->resources); b->max_bus_speed = PCI_SPEED_UNKNOWN; b->cur_bus_speed = PCI_SPEED_UNKNOWN; #ifdef CONFIG_PCI_DOMAINS_GENERIC if (parent) b->domain_nr = parent->domain_nr; #endif return b; } static void pci_release_host_bridge_dev(struct device *dev) { struct pci_host_bridge *bridge = to_pci_host_bridge(dev); if (bridge->release_fn) bridge->release_fn(bridge); pci_free_resource_list(&bridge->windows); pci_free_resource_list(&bridge->dma_ranges); kfree(bridge); } static void pci_init_host_bridge(struct pci_host_bridge *bridge) { INIT_LIST_HEAD(&bridge->windows); INIT_LIST_HEAD(&bridge->dma_ranges); /* * We assume we can manage these PCIe features. Some systems may * reserve these for use by the platform itself, e.g., an ACPI BIOS * may implement its own AER handling and use _OSC to prevent the * OS from interfering. */ bridge->native_aer = 1; bridge->native_pcie_hotplug = 1; bridge->native_shpc_hotplug = 1; bridge->native_pme = 1; bridge->native_ltr = 1; bridge->native_dpc = 1; bridge->domain_nr = PCI_DOMAIN_NR_NOT_SET; bridge->native_cxl_error = 1; device_initialize(&bridge->dev); } struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) { struct pci_host_bridge *bridge; bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); if (!bridge) return NULL; pci_init_host_bridge(bridge); bridge->dev.release = pci_release_host_bridge_dev; return bridge; } EXPORT_SYMBOL(pci_alloc_host_bridge); static void devm_pci_alloc_host_bridge_release(void *data) { pci_free_host_bridge(data); } struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, size_t priv) { int ret; struct pci_host_bridge *bridge; bridge = pci_alloc_host_bridge(priv); if (!bridge) return NULL; bridge->dev.parent = dev; ret = devm_add_action_or_reset(dev, devm_pci_alloc_host_bridge_release, bridge); if (ret) return NULL; ret = devm_of_pci_bridge_init(dev, bridge); if (ret) return NULL; return bridge; } EXPORT_SYMBOL(devm_pci_alloc_host_bridge); void pci_free_host_bridge(struct pci_host_bridge *bridge) { put_device(&bridge->dev); } EXPORT_SYMBOL(pci_free_host_bridge); /* Indexed by PCI_X_SSTATUS_FREQ (secondary bus mode and frequency) */ static const unsigned char pcix_bus_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCI_SPEED_66MHz_PCIX, /* 1 */ PCI_SPEED_100MHz_PCIX, /* 2 */ PCI_SPEED_133MHz_PCIX, /* 3 */ PCI_SPEED_UNKNOWN, /* 4 */ PCI_SPEED_66MHz_PCIX_ECC, /* 5 */ PCI_SPEED_100MHz_PCIX_ECC, /* 6 */ PCI_SPEED_133MHz_PCIX_ECC, /* 7 */ PCI_SPEED_UNKNOWN, /* 8 */ PCI_SPEED_66MHz_PCIX_266, /* 9 */ PCI_SPEED_100MHz_PCIX_266, /* A */ PCI_SPEED_133MHz_PCIX_266, /* B */ PCI_SPEED_UNKNOWN, /* C */ PCI_SPEED_66MHz_PCIX_533, /* D */ PCI_SPEED_100MHz_PCIX_533, /* E */ PCI_SPEED_133MHz_PCIX_533 /* F */ }; /* Indexed by PCI_EXP_LNKCAP_SLS, PCI_EXP_LNKSTA_CLS */ const unsigned char pcie_link_speed[] = { PCI_SPEED_UNKNOWN, /* 0 */ PCIE_SPEED_2_5GT, /* 1 */ PCIE_SPEED_5_0GT, /* 2 */ PCIE_SPEED_8_0GT, /* 3 */ PCIE_SPEED_16_0GT, /* 4 */ PCIE_SPEED_32_0GT, /* 5 */ PCIE_SPEED_64_0GT, /* 6 */ PCI_SPEED_UNKNOWN, /* 7 */ PCI_SPEED_UNKNOWN, /* 8 */ PCI_SPEED_UNKNOWN, /* 9 */ PCI_SPEED_UNKNOWN, /* A */ PCI_SPEED_UNKNOWN, /* B */ PCI_SPEED_UNKNOWN, /* C */ PCI_SPEED_UNKNOWN, /* D */ PCI_SPEED_UNKNOWN, /* E */ PCI_SPEED_UNKNOWN /* F */ }; EXPORT_SYMBOL_GPL(pcie_link_speed); const char *pci_speed_string(enum pci_bus_speed speed) { /* Indexed by the pci_bus_speed enum */ static const char *speed_strings[] = { "33 MHz PCI", /* 0x00 */ "66 MHz PCI", /* 0x01 */ "66 MHz PCI-X", /* 0x02 */ "100 MHz PCI-X", /* 0x03 */ "133 MHz PCI-X", /* 0x04 */ NULL, /* 0x05 */ NULL, /* 0x06 */ NULL, /* 0x07 */ NULL, /* 0x08 */ "66 MHz PCI-X 266", /* 0x09 */ "100 MHz PCI-X 266", /* 0x0a */ "133 MHz PCI-X 266", /* 0x0b */ "Unknown AGP", /* 0x0c */ "1x AGP", /* 0x0d */ "2x AGP", /* 0x0e */ "4x AGP", /* 0x0f */ "8x AGP", /* 0x10 */ "66 MHz PCI-X 533", /* 0x11 */ "100 MHz PCI-X 533", /* 0x12 */ "133 MHz PCI-X 533", /* 0x13 */ "2.5 GT/s PCIe", /* 0x14 */ "5.0 GT/s PCIe", /* 0x15 */ "8.0 GT/s PCIe", /* 0x16 */ "16.0 GT/s PCIe", /* 0x17 */ "32.0 GT/s PCIe", /* 0x18 */ "64.0 GT/s PCIe", /* 0x19 */ }; if (speed < ARRAY_SIZE(speed_strings)) return speed_strings[speed]; return "Unknown"; } EXPORT_SYMBOL_GPL(pci_speed_string); void pcie_update_link_speed(struct pci_bus *bus, u16 linksta) { bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS]; } EXPORT_SYMBOL_GPL(pcie_update_link_speed); static unsigned char agp_speeds[] = { AGP_UNKNOWN, AGP_1X, AGP_2X, AGP_4X, AGP_8X }; static enum pci_bus_speed agp_speed(int agp3, int agpstat) { int index = 0; if (agpstat & 4) index = 3; else if (agpstat & 2) index = 2; else if (agpstat & 1) index = 1; else goto out; if (agp3) { index += 2; if (index == 5) index = 0; } out: return agp_speeds[index]; } static void pci_set_bus_speed(struct pci_bus *bus) { struct pci_dev *bridge = bus->self; int pos; pos = pci_find_capability(bridge, PCI_CAP_ID_AGP); if (!pos) pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3); if (pos) { u32 agpstat, agpcmd; pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat); bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7); pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd); bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7); } pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); if (pos) { u16 status; enum pci_bus_speed max; pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS, &status); if (status & PCI_X_SSTATUS_533MHZ) { max = PCI_SPEED_133MHz_PCIX_533; } else if (status & PCI_X_SSTATUS_266MHZ) { max = PCI_SPEED_133MHz_PCIX_266; } else if (status & PCI_X_SSTATUS_133MHZ) { if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) max = PCI_SPEED_133MHz_PCIX_ECC; else max = PCI_SPEED_133MHz_PCIX; } else { max = PCI_SPEED_66MHz_PCIX; } bus->max_bus_speed = max; bus->cur_bus_speed = pcix_bus_speed[ (status & PCI_X_SSTATUS_FREQ) >> 6]; return; } if (pci_is_pcie(bridge)) { u32 linkcap; u16 linksta; pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap); bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS]; pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta); pcie_update_link_speed(bus, linksta); } } static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus) { struct irq_domain *d; /* If the host bridge driver sets a MSI domain of the bridge, use it */ d = dev_get_msi_domain(bus->bridge); /* * Any firmware interface that can resolve the msi_domain * should be called from here. */ if (!d) d = pci_host_bridge_of_msi_domain(bus); if (!d) d = pci_host_bridge_acpi_msi_domain(bus); /* * If no IRQ domain was found via the OF tree, try looking it up * directly through the fwnode_handle. */ if (!d) { struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus); if (fwnode) d = irq_find_matching_fwnode(fwnode, DOMAIN_BUS_PCI_MSI); } return d; } static void pci_set_bus_msi_domain(struct pci_bus *bus) { struct irq_domain *d; struct pci_bus *b; /* * The bus can be a root bus, a subordinate bus, or a virtual bus * created by an SR-IOV device. Walk up to the first bridge device * found or derive the domain from the host bridge. */ for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) { if (b->self) d = dev_get_msi_domain(&b->self->dev); } if (!d) d = pci_host_bridge_msi_domain(b); dev_set_msi_domain(&bus->dev, d); } static int pci_register_host_bridge(struct pci_host_bridge *bridge) { struct device *parent = bridge->dev.parent; struct resource_entry *window, *next, *n; struct pci_bus *bus, *b; resource_size_t offset, next_offset; LIST_HEAD(resources); struct resource *res, *next_res; char addr[64], *fmt; const char *name; int err; bus = pci_alloc_bus(NULL); if (!bus) return -ENOMEM; bridge->bus = bus; bus->sysdata = bridge->sysdata; bus->ops = bridge->ops; bus->number = bus->busn_res.start = bridge->busnr; #ifdef CONFIG_PCI_DOMAINS_GENERIC if (bridge->domain_nr == PCI_DOMAIN_NR_NOT_SET) bus->domain_nr = pci_bus_find_domain_nr(bus, parent); else bus->domain_nr = bridge->domain_nr; if (bus->domain_nr < 0) { err = bus->domain_nr; goto free; } #endif b = pci_find_bus(pci_domain_nr(bus), bridge->busnr); if (b) { /* Ignore it if we already got here via a different bridge */ dev_dbg(&b->dev, "bus already known\n"); err = -EEXIST; goto free; } dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus), bridge->busnr); err = pcibios_root_bridge_prepare(bridge); if (err) goto free; /* Temporarily move resources off the list */ list_splice_init(&bridge->windows, &resources); err = device_add(&bridge->dev); if (err) { put_device(&bridge->dev); goto free; } bus->bridge = get_device(&bridge->dev); device_enable_async_suspend(bus->bridge); pci_set_bus_of_node(bus); pci_set_bus_msi_domain(bus); if (bridge->msi_domain && !dev_get_msi_domain(&bus->dev) && !pci_host_of_has_msi_map(parent)) bus->bus_flags |= PCI_BUS_FLAGS_NO_MSI; if (!parent) set_dev_node(bus->bridge, pcibus_to_node(bus)); bus->dev.class = &pcibus_class; bus->dev.parent = bus->bridge; dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number); name = dev_name(&bus->dev); err = device_register(&bus->dev); if (err) goto unregister; pcibios_add_bus(bus); if (bus->ops->add_bus) { err = bus->ops->add_bus(bus); if (WARN_ON(err < 0)) dev_err(&bus->dev, "failed to add bus: %d\n", err); } /* Create legacy_io and legacy_mem files for this bus */ pci_create_legacy_files(bus); if (parent) dev_info(parent, "PCI host bridge to bus %s\n", name); else pr_info("PCI host bridge to bus %s\n", name); if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE) dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n"); /* Coalesce contiguous windows */ resource_list_for_each_entry_safe(window, n, &resources) { if (list_is_last(&window->node, &resources)) break; next = list_next_entry(window, node); offset = window->offset; res = window->res; next_offset = next->offset; next_res = next->res; if (res->flags != next_res->flags || offset != next_offset) continue; if (res->end + 1 == next_res->start) { next_res->start = res->start; res->flags = res->start = res->end = 0; } } /* Add initial resources to the bus */ resource_list_for_each_entry_safe(window, n, &resources) { offset = window->offset; res = window->res; if (!res->flags && !res->start && !res->end) { release_resource(res); resource_list_destroy_entry(window); continue; } list_move_tail(&window->node, &bridge->windows); if (res->flags & IORESOURCE_BUS) pci_bus_insert_busn_res(bus, bus->number, res->end); else pci_bus_add_resource(bus, res, 0); if (offset) { if (resource_type(res) == IORESOURCE_IO) fmt = " (bus address [%#06llx-%#06llx])"; else fmt = " (bus address [%#010llx-%#010llx])"; snprintf(addr, sizeof(addr), fmt, (unsigned long long)(res->start - offset), (unsigned long long)(res->end - offset)); } else addr[0] = '\0'; dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr); } down_write(&pci_bus_sem); list_add_tail(&bus->node, &pci_root_buses); up_write(&pci_bus_sem); return 0; unregister: put_device(&bridge->dev); device_del(&bridge->dev); free: #ifdef CONFIG_PCI_DOMAINS_GENERIC pci_bus_release_domain_nr(bus, parent); #endif kfree(bus); return err; } static bool pci_bridge_child_ext_cfg_accessible(struct pci_dev *bridge) { int pos; u32 status; /* * If extended config space isn't accessible on a bridge's primary * bus, we certainly can't access it on the secondary bus. */ if (bridge->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG) return false; /* * PCIe Root Ports and switch ports are PCIe on both sides, so if * extended config space is accessible on the primary, it's also * accessible on the secondary. */ if (pci_is_pcie(bridge) && (pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT || pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM || pci_pcie_type(bridge) == PCI_EXP_TYPE_DOWNSTREAM)) return true; /* * For the other bridge types: * - PCI-to-PCI bridges * - PCIe-to-PCI/PCI-X forward bridges * - PCI/PCI-X-to-PCIe reverse bridges * extended config space on the secondary side is only accessible * if the bridge supports PCI-X Mode 2. */ pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX); if (!pos) return false; pci_read_config_dword(bridge, pos + PCI_X_STATUS, &status); return status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ); } static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, struct pci_dev *bridge, int busnr) { struct pci_bus *child; struct pci_host_bridge *host; int i; int ret; /* Allocate a new bus and inherit stuff from the parent */ child = pci_alloc_bus(parent); if (!child) return NULL; child->parent = parent; child->sysdata = parent->sysdata; child->bus_flags = parent->bus_flags; host = pci_find_host_bridge(parent); if (host->child_ops) child->ops = host->child_ops; else child->ops = parent->ops; /* * Initialize some portions of the bus device, but don't register * it now as the parent is not properly set up yet. */ child->dev.class = &pcibus_class; dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr); /* Set up the primary, secondary and subordinate bus numbers */ child->number = child->busn_res.start = busnr; child->primary = parent->busn_res.start; child->busn_res.end = 0xff; if (!bridge) { child->dev.parent = parent->bridge; goto add_dev; } child->self = bridge; child->bridge = get_device(&bridge->dev); child->dev.parent = child->bridge; pci_set_bus_of_node(child); pci_set_bus_speed(child); /* * Check whether extended config space is accessible on the child * bus. Note that we currently assume it is always accessible on * the root bus. */ if (!pci_bridge_child_ext_cfg_accessible(bridge)) { child->bus_flags |= PCI_BUS_FLAGS_NO_EXTCFG; pci_info(child, "extended config space not accessible\n"); } /* Set up default resource pointers and names */ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i]; child->resource[i]->name = child->name; } bridge->subordinate = child; add_dev: pci_set_bus_msi_domain(child); ret = device_register(&child->dev); WARN_ON(ret < 0); pcibios_add_bus(child); if (child->ops->add_bus) { ret = child->ops->add_bus(child); if (WARN_ON(ret < 0)) dev_err(&child->dev, "failed to add bus: %d\n", ret); } /* Create legacy_io and legacy_mem files for this bus */ pci_create_legacy_files(child); return child; } struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr) { struct pci_bus *child; child = pci_alloc_child_bus(parent, dev, busnr); if (child) { down_write(&pci_bus_sem); list_add_tail(&child->node, &parent->children); up_write(&pci_bus_sem); } return child; } EXPORT_SYMBOL(pci_add_new_bus); static void pci_enable_crs(struct pci_dev *pdev) { u16 root_cap = 0; /* Enable CRS Software Visibility if supported */ pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap); if (root_cap & PCI_EXP_RTCAP_CRSVIS) pcie_capability_set_word(pdev, PCI_EXP_RTCTL, PCI_EXP_RTCTL_CRSSVE); } static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, unsigned int available_buses); /** * pci_ea_fixed_busnrs() - Read fixed Secondary and Subordinate bus * numbers from EA capability. * @dev: Bridge * @sec: updated with secondary bus number from EA * @sub: updated with subordinate bus number from EA * * If @dev is a bridge with EA capability that specifies valid secondary * and subordinate bus numbers, return true with the bus numbers in @sec * and @sub. Otherwise return false. */ static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub) { int ea, offset; u32 dw; u8 ea_sec, ea_sub; if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) return false; /* find PCI EA capability in list */ ea = pci_find_capability(dev, PCI_CAP_ID_EA); if (!ea) return false; offset = ea + PCI_EA_FIRST_ENT; pci_read_config_dword(dev, offset, &dw); ea_sec = dw & PCI_EA_SEC_BUS_MASK; ea_sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT; if (ea_sec == 0 || ea_sub < ea_sec) return false; *sec = ea_sec; *sub = ea_sub; return true; } /* * pci_scan_bridge_extend() - Scan buses behind a bridge * @bus: Parent bus the bridge is on * @dev: Bridge itself * @max: Starting subordinate number of buses behind this bridge * @available_buses: Total number of buses available for this bridge and * the devices below. After the minimal bus space has * been allocated the remaining buses will be * distributed equally between hotplug-capable bridges. * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges * that need to be reconfigured. * * If it's a bridge, configure it and scan the bus behind it. * For CardBus bridges, we don't scan behind as the devices will * be handled by the bridge driver itself. * * We need to process bridges in two passes -- first we scan those * already configured by the BIOS and after we are done with all of * them, we proceed to assigning numbers to the remaining buses in * order to avoid overlaps between old and new bus numbers. * * Return: New subordinate number covering all buses behind this bridge. */ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, int max, unsigned int available_buses, int pass) { struct pci_bus *child; int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); u32 buses, i, j = 0; u16 bctl; u8 primary, secondary, subordinate; int broken = 0; bool fixed_buses; u8 fixed_sec, fixed_sub; int next_busnr; /* * Make sure the bridge is powered on to be able to access config * space of devices below it. */ pm_runtime_get_sync(&dev->dev); pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); primary = buses & 0xFF; secondary = (buses >> 8) & 0xFF; subordinate = (buses >> 16) & 0xFF; pci_dbg(dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", secondary, subordinate, pass); if (!primary && (primary != bus->number) && secondary && subordinate) { pci_warn(dev, "Primary bus is hard wired to 0\n"); primary = bus->number; } /* Check if setup is sensible at all */ if (!pass && (primary != bus->number || secondary <= bus->number || secondary > subordinate)) { pci_info(dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n", secondary, subordinate); broken = 1; } /* * Disable Master-Abort Mode during probing to avoid reporting of * bus errors in some architectures. */ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); pci_enable_crs(dev); if ((secondary || subordinate) && !pcibios_assign_all_busses() && !is_cardbus && !broken) { unsigned int cmax, buses; /* * Bus already configured by firmware, process it in the * first pass and just note the configuration. */ if (pass) goto out; /* * The bus might already exist for two reasons: Either we * are rescanning the bus or the bus is reachable through * more than one bridge. The second case can happen with * the i450NX chipset. */ child = pci_find_bus(pci_domain_nr(bus), secondary); if (!child) { child = pci_add_new_bus(bus, dev, secondary); if (!child) goto out; child->primary = primary; pci_bus_insert_busn_res(child, secondary, subordinate); child->bridge_ctl = bctl; } buses = subordinate - secondary; cmax = pci_scan_child_bus_extend(child, buses); if (cmax > subordinate) pci_warn(dev, "bridge has subordinate %02x but max busn %02x\n", subordinate, cmax); /* Subordinate should equal child->busn_res.end */ if (subordinate > max) max = subordinate; } else { /* * We need to assign a number to this bus which we always * do in the second pass. */ if (!pass) { if (pcibios_assign_all_busses() || broken || is_cardbus) /* * Temporarily disable forwarding of the * configuration cycles on all bridges in * this bus segment to avoid possible * conflicts in the second pass between two * bridges programmed with overlapping bus * ranges. */ pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses & ~0xffffff); goto out; } /* Clear errors */ pci_write_config_word(dev, PCI_STATUS, 0xffff); /* Read bus numbers from EA Capability (if present) */ fixed_buses = pci_ea_fixed_busnrs(dev, &fixed_sec, &fixed_sub); if (fixed_buses) next_busnr = fixed_sec; else next_busnr = max + 1; /* * Prevent assigning a bus number that already exists. * This can happen when a bridge is hot-plugged, so in this * case we only re-scan this bus. */ child = pci_find_bus(pci_domain_nr(bus), next_busnr); if (!child) { child = pci_add_new_bus(bus, dev, next_busnr); if (!child) goto out; pci_bus_insert_busn_res(child, next_busnr, bus->busn_res.end); } max++; if (available_buses) available_buses--; buses = (buses & 0xff000000) | ((unsigned int)(child->primary) << 0) | ((unsigned int)(child->busn_res.start) << 8) | ((unsigned int)(child->busn_res.end) << 16); /* * yenta.c forces a secondary latency timer of 176. * Copy that behaviour here. */ if (is_cardbus) { buses &= ~0xff000000; buses |= CARDBUS_LATENCY_TIMER << 24; } /* We need to blast all three values with a single write */ pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses); if (!is_cardbus) { child->bridge_ctl = bctl; max = pci_scan_child_bus_extend(child, available_buses); } else { /* * For CardBus bridges, we leave 4 bus numbers as * cards with a PCI-to-PCI bridge can be inserted * later. */ for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) { struct pci_bus *parent = bus; if (pci_find_bus(pci_domain_nr(bus), max+i+1)) break; while (parent->parent) { if ((!pcibios_assign_all_busses()) && (parent->busn_res.end > max) && (parent->busn_res.end <= max+i)) { j = 1; } parent = parent->parent; } if (j) { /* * Often, there are two CardBus * bridges -- try to leave one * valid bus number for each one. */ i /= 2; break; } } max += i; } /* * Set subordinate bus number to its real value. * If fixed subordinate bus number exists from EA * capability then use it. */ if (fixed_buses) max = fixed_sub; pci_bus_update_busn_res_end(child, max); pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); } sprintf(child->name, (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"), pci_domain_nr(bus), child->number); /* Check that all devices are accessible */ while (bus->parent) { if ((child->busn_res.end > bus->busn_res.end) || (child->number > bus->busn_res.end) || (child->number < bus->number) || (child->busn_res.end < bus->number)) { dev_info(&dev->dev, "devices behind bridge are unusable because %pR cannot be assigned for them\n", &child->busn_res); break; } bus = bus->parent; } out: pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl); pm_runtime_put(&dev->dev); return max; } /* * pci_scan_bridge() - Scan buses behind a bridge * @bus: Parent bus the bridge is on * @dev: Bridge itself * @max: Starting subordinate number of buses behind this bridge * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges * that need to be reconfigured. * * If it's a bridge, configure it and scan the bus behind it. * For CardBus bridges, we don't scan behind as the devices will * be handled by the bridge driver itself. * * We need to process bridges in two passes -- first we scan those * already configured by the BIOS and after we are done with all of * them, we proceed to assigning numbers to the remaining buses in * order to avoid overlaps between old and new bus numbers. * * Return: New subordinate number covering all buses behind this bridge. */ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass) { return pci_scan_bridge_extend(bus, dev, max, 0, pass); } EXPORT_SYMBOL(pci_scan_bridge); /* * Read interrupt line and base address registers. * The architecture-dependent code can tweak these, of course. */ static void pci_read_irq(struct pci_dev *dev) { unsigned char irq; /* VFs are not allowed to use INTx, so skip the config reads */ if (dev->is_virtfn) { dev->pin = 0; dev->irq = 0; return; } pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq); dev->pin = irq; if (irq) pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); dev->irq = irq; } void set_pcie_port_type(struct pci_dev *pdev) { int pos; u16 reg16; u32 reg32; int type; struct pci_dev *parent; pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!pos) return; pdev->pcie_cap = pos; pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16); pdev->pcie_flags_reg = reg16; pci_read_config_dword(pdev, pos + PCI_EXP_DEVCAP, &pdev->devcap); pdev->pcie_mpss = FIELD_GET(PCI_EXP_DEVCAP_PAYLOAD, pdev->devcap); pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &reg32); if (reg32 & PCI_EXP_LNKCAP_DLLLARC) pdev->link_active_reporting = 1; parent = pci_upstream_bridge(pdev); if (!parent) return; /* * Some systems do not identify their upstream/downstream ports * correctly so detect impossible configurations here and correct * the port type accordingly. */ type = pci_pcie_type(pdev); if (type == PCI_EXP_TYPE_DOWNSTREAM) { /* * If pdev claims to be downstream port but the parent * device is also downstream port assume pdev is actually * upstream port. */ if (pcie_downstream_port(parent)) { pci_info(pdev, "claims to be downstream port but is acting as upstream port, correcting type\n"); pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE; pdev->pcie_flags_reg |= PCI_EXP_TYPE_UPSTREAM; } } else if (type == PCI_EXP_TYPE_UPSTREAM) { /* * If pdev claims to be upstream port but the parent * device is also upstream port assume pdev is actually * downstream port. */ if (pci_pcie_type(parent) == PCI_EXP_TYPE_UPSTREAM) { pci_info(pdev, "claims to be upstream port but is acting as downstream port, correcting type\n"); pdev->pcie_flags_reg &= ~PCI_EXP_FLAGS_TYPE; pdev->pcie_flags_reg |= PCI_EXP_TYPE_DOWNSTREAM; } } } void set_pcie_hotplug_bridge(struct pci_dev *pdev) { u32 reg32; pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32); if (reg32 & PCI_EXP_SLTCAP_HPC) pdev->is_hotplug_bridge = 1; } static void set_pcie_thunderbolt(struct pci_dev *dev) { u16 vsec; /* Is the device part of a Thunderbolt controller? */ vsec = pci_find_vsec_capability(dev, PCI_VENDOR_ID_INTEL, PCI_VSEC_ID_INTEL_TBT); if (vsec) dev->is_thunderbolt = 1; } static void set_pcie_untrusted(struct pci_dev *dev) { struct pci_dev *parent; /* * If the upstream bridge is untrusted we treat this device * untrusted as well. */ parent = pci_upstream_bridge(dev); if (parent && (parent->untrusted || parent->external_facing)) dev->untrusted = true; } static void pci_set_removable(struct pci_dev *dev) { struct pci_dev *parent = pci_upstream_bridge(dev); /* * We (only) consider everything downstream from an external_facing * device to be removable by the user. We're mainly concerned with * consumer platforms with user accessible thunderbolt ports that are * vulnerable to DMA attacks, and we expect those ports to be marked by * the firmware as external_facing. Devices in traditional hotplug * slots can technically be removed, but the expectation is that unless * the port is marked with external_facing, such devices are less * accessible to user / may not be removed by end user, and thus not * exposed as "removable" to userspace. */ if (parent && (parent->external_facing || dev_is_removable(&parent->dev))) dev_set_removable(&dev->dev, DEVICE_REMOVABLE); } /** * pci_ext_cfg_is_aliased - Is ext config space just an alias of std config? * @dev: PCI device * * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that * when forwarding a type1 configuration request the bridge must check that * the extended register address field is zero. The bridge is not permitted * to forward the transactions and must handle it as an Unsupported Request. * Some bridges do not follow this rule and simply drop the extended register * bits, resulting in the standard config space being aliased, every 256 * bytes across the entire configuration space. Test for this condition by * comparing the first dword of each potential alias to the vendor/device ID. * Known offenders: * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03) * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40) */ static bool pci_ext_cfg_is_aliased(struct pci_dev *dev) { #ifdef CONFIG_PCI_QUIRKS int pos; u32 header, tmp; pci_read_config_dword(dev, PCI_VENDOR_ID, &header); for (pos = PCI_CFG_SPACE_SIZE; pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) { if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL || header != tmp) return false; } return true; #else return false; #endif } /** * pci_cfg_space_size_ext - Get the configuration space size of the PCI device * @dev: PCI device * * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices * have 4096 bytes. Even if the device is capable, that doesn't mean we can * access it. Maybe we don't have a way to generate extended config space * accesses, or the device is behind a reverse Express bridge. So we try * reading the dword at 0x100 which must either be 0 or a valid extended * capability header. */ static int pci_cfg_space_size_ext(struct pci_dev *dev) { u32 status; int pos = PCI_CFG_SPACE_SIZE; if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) return PCI_CFG_SPACE_SIZE; if (PCI_POSSIBLE_ERROR(status) || pci_ext_cfg_is_aliased(dev)) return PCI_CFG_SPACE_SIZE; return PCI_CFG_SPACE_EXP_SIZE; } int pci_cfg_space_size(struct pci_dev *dev) { int pos; u32 status; u16 class; #ifdef CONFIG_PCI_IOV /* * Per the SR-IOV specification (rev 1.1, sec 3.5), VFs are required to * implement a PCIe capability and therefore must implement extended * config space. We can skip the NO_EXTCFG test below and the * reachability/aliasing test in pci_cfg_space_size_ext() by virtue of * the fact that the SR-IOV capability on the PF resides in extended * config space and must be accessible and non-aliased to have enabled * support for this VF. This is a micro performance optimization for * systems supporting many VFs. */ if (dev->is_virtfn) return PCI_CFG_SPACE_EXP_SIZE; #endif if (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_EXTCFG) return PCI_CFG_SPACE_SIZE; class = dev->class >> 8; if (class == PCI_CLASS_BRIDGE_HOST) return pci_cfg_space_size_ext(dev); if (pci_is_pcie(dev)) return pci_cfg_space_size_ext(dev); pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); if (!pos) return PCI_CFG_SPACE_SIZE; pci_read_config_dword(dev, pos + PCI_X_STATUS, &status); if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)) return pci_cfg_space_size_ext(dev); return PCI_CFG_SPACE_SIZE; } static u32 pci_class(struct pci_dev *dev) { u32 class; #ifdef CONFIG_PCI_IOV if (dev->is_virtfn) return dev->physfn->sriov->class; #endif pci_read_config_dword(dev, PCI_CLASS_REVISION, &class); return class; } static void pci_subsystem_ids(struct pci_dev *dev, u16 *vendor, u16 *device) { #ifdef CONFIG_PCI_IOV if (dev->is_virtfn) { *vendor = dev->physfn->sriov->subsystem_vendor; *device = dev->physfn->sriov->subsystem_device; return; } #endif pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, vendor); pci_read_config_word(dev, PCI_SUBSYSTEM_ID, device); } static u8 pci_hdr_type(struct pci_dev *dev) { u8 hdr_type; #ifdef CONFIG_PCI_IOV if (dev->is_virtfn) return dev->physfn->sriov->hdr_type; #endif pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type); return hdr_type; } #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) /** * pci_intx_mask_broken - Test PCI_COMMAND_INTX_DISABLE writability * @dev: PCI device * * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this * at enumeration-time to avoid modifying PCI_COMMAND at run-time. */ static int pci_intx_mask_broken(struct pci_dev *dev) { u16 orig, toggle, new; pci_read_config_word(dev, PCI_COMMAND, &orig); toggle = orig ^ PCI_COMMAND_INTX_DISABLE; pci_write_config_word(dev, PCI_COMMAND, toggle); pci_read_config_word(dev, PCI_COMMAND, &new); pci_write_config_word(dev, PCI_COMMAND, orig); /* * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI * r2.3, so strictly speaking, a device is not *broken* if it's not * writable. But we'll live with the misnomer for now. */ if (new != toggle) return 1; return 0; } static void early_dump_pci_device(struct pci_dev *pdev) { u32 value[256 / 4]; int i; pci_info(pdev, "config space:\n"); for (i = 0; i < 256; i += 4) pci_read_config_dword(pdev, i, &value[i / 4]); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, value, 256, false); } /** * pci_setup_device - Fill in class and map information of a device * @dev: the device structure to fill * * Initialize the device structure with information about the device's * vendor,class,memory and IO-space addresses, IRQ lines etc. * Called at initialisation of the PCI subsystem and by CardBus services. * Returns 0 on success and negative if unknown type of device (not normal, * bridge or CardBus). */ int pci_setup_device(struct pci_dev *dev) { u32 class; u16 cmd; u8 hdr_type; int err, pos = 0; struct pci_bus_region region; struct resource *res; hdr_type = pci_hdr_type(dev); dev->sysdata = dev->bus->sysdata; dev->dev.parent = dev->bus->bridge; dev->dev.bus = &pci_bus_type; dev->hdr_type = hdr_type & 0x7f; dev->multifunction = !!(hdr_type & 0x80); dev->error_state = pci_channel_io_normal; set_pcie_port_type(dev); err = pci_set_of_node(dev); if (err) return err; pci_set_acpi_fwnode(dev); pci_dev_assign_slot(dev); /* * Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) * set this higher, assuming the system even supports it. */ dev->dma_mask = 0xffffffff; dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus), dev->bus->number, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); class = pci_class(dev); dev->revision = class & 0xff; dev->class = class >> 8; /* upper 3 bytes */ if (pci_early_dump) early_dump_pci_device(dev); /* Need to have dev->class ready */ dev->cfg_size = pci_cfg_space_size(dev); /* Need to have dev->cfg_size ready */ set_pcie_thunderbolt(dev); set_pcie_untrusted(dev); /* "Unknown power state" */ dev->current_state = PCI_UNKNOWN; /* Early fixups, before probing the BARs */ pci_fixup_device(pci_fixup_early, dev); pci_set_removable(dev); pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", dev->vendor, dev->device, dev->hdr_type, dev->class); /* Device class may be changed after fixup */ class = dev->class >> 8; if (dev->non_compliant_bars && !dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &cmd); if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { pci_info(dev, "device has non-compliant BARs; disabling IO/MEM decoding\n"); cmd &= ~PCI_COMMAND_IO; cmd &= ~PCI_COMMAND_MEMORY; pci_write_config_word(dev, PCI_COMMAND, cmd); } } dev->broken_intx_masking = pci_intx_mask_broken(dev); switch (dev->hdr_type) { /* header type */ case PCI_HEADER_TYPE_NORMAL: /* standard header */ if (class == PCI_CLASS_BRIDGE_PCI) goto bad; pci_read_irq(dev); pci_read_bases(dev, 6, PCI_ROM_ADDRESS); pci_subsystem_ids(dev, &dev->subsystem_vendor, &dev->subsystem_device); /* * Do the ugly legacy mode stuff here rather than broken chip * quirk code. Legacy mode ATA controllers have fixed * addresses. These are not always echoed in BAR0-3, and * BAR0-3 in a few cases contain junk! */ if (class == PCI_CLASS_STORAGE_IDE) { u8 progif; pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); if ((progif & 1) == 0) { region.start = 0x1F0; region.end = 0x1F7; res = &dev->resource[0]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev->bus, res, &region); pci_info(dev, "legacy IDE quirk: reg 0x10: %pR\n", res); region.start = 0x3F6; region.end = 0x3F6; res = &dev->resource[1]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev->bus, res, &region); pci_info(dev, "legacy IDE quirk: reg 0x14: %pR\n", res); } if ((progif & 4) == 0) { region.start = 0x170; region.end = 0x177; res = &dev->resource[2]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev->bus, res, &region); pci_info(dev, "legacy IDE quirk: reg 0x18: %pR\n", res); region.start = 0x376; region.end = 0x376; res = &dev->resource[3]; res->flags = LEGACY_IO_RESOURCE; pcibios_bus_to_resource(dev->bus, res, &region); pci_info(dev, "legacy IDE quirk: reg 0x1c: %pR\n", res); } } break; case PCI_HEADER_TYPE_BRIDGE: /* bridge header */ /* * The PCI-to-PCI bridge spec requires that subtractive * decoding (i.e. transparent) bridge must have programming * interface code of 0x01. */ pci_read_irq(dev); dev->transparent = ((dev->class & 0xff) == 1); pci_read_bases(dev, 2, PCI_ROM_ADDRESS1); pci_read_bridge_windows(dev); set_pcie_hotplug_bridge(dev); pos = pci_find_capability(dev, PCI_CAP_ID_SSVID); if (pos) { pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor); pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device); } break; case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */ if (class != PCI_CLASS_BRIDGE_CARDBUS) goto bad; pci_read_irq(dev); pci_read_bases(dev, 1, 0); pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor); pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device); break; default: /* unknown header */ pci_err(dev, "unknown header type %02x, ignoring device\n", dev->hdr_type); pci_release_of_node(dev); return -EIO; bad: pci_err(dev, "ignoring class %#08x (doesn't match header type %02x)\n", dev->class, dev->hdr_type); dev->class = PCI_CLASS_NOT_DEFINED << 8; } /* We found a fine healthy device, go go go... */ return 0; } static void pci_configure_mps(struct pci_dev *dev) { struct pci_dev *bridge = pci_upstream_bridge(dev); int mps, mpss, p_mps, rc; if (!pci_is_pcie(dev)) return; /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */ if (dev->is_virtfn) return; /* * For Root Complex Integrated Endpoints, program the maximum * supported value unless limited by the PCIE_BUS_PEER2PEER case. */ if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) { if (pcie_bus_config == PCIE_BUS_PEER2PEER) mps = 128; else mps = 128 << dev->pcie_mpss; rc = pcie_set_mps(dev, mps); if (rc) { pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", mps); } return; } if (!bridge || !pci_is_pcie(bridge)) return; mps = pcie_get_mps(dev); p_mps = pcie_get_mps(bridge); if (mps == p_mps) return; if (pcie_bus_config == PCIE_BUS_TUNE_OFF) { pci_warn(dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", mps, pci_name(bridge), p_mps); return; } /* * Fancier MPS configuration is done later by * pcie_bus_configure_settings() */ if (pcie_bus_config != PCIE_BUS_DEFAULT) return; mpss = 128 << dev->pcie_mpss; if (mpss < p_mps && pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) { pcie_set_mps(bridge, mpss); pci_info(dev, "Upstream bridge's Max Payload Size set to %d (was %d, max %d)\n", mpss, p_mps, 128 << bridge->pcie_mpss); p_mps = pcie_get_mps(bridge); } rc = pcie_set_mps(dev, p_mps); if (rc) { pci_warn(dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n", p_mps); return; } pci_info(dev, "Max Payload Size set to %d (was %d, max %d)\n", p_mps, mps, mpss); } int pci_configure_extended_tags(struct pci_dev *dev, void *ign) { struct pci_host_bridge *host; u32 cap; u16 ctl; int ret; if (!pci_is_pcie(dev)) return 0; ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap); if (ret) return 0; if (!(cap & PCI_EXP_DEVCAP_EXT_TAG)) return 0; ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl); if (ret) return 0; host = pci_find_host_bridge(dev->bus); if (!host) return 0; /* * If some device in the hierarchy doesn't handle Extended Tags * correctly, make sure they're disabled. */ if (host->no_ext_tags) { if (ctl & PCI_EXP_DEVCTL_EXT_TAG) { pci_info(dev, "disabling Extended Tags\n"); pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_EXT_TAG); } return 0; } if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) { pci_info(dev, "enabling Extended Tags\n"); pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_EXT_TAG); } return 0; } /** * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable * @dev: PCI device to query * * Returns true if the device has enabled relaxed ordering attribute. */ bool pcie_relaxed_ordering_enabled(struct pci_dev *dev) { u16 v; pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v); return !!(v & PCI_EXP_DEVCTL_RELAX_EN); } EXPORT_SYMBOL(pcie_relaxed_ordering_enabled); static void pci_configure_relaxed_ordering(struct pci_dev *dev) { struct pci_dev *root; /* PCI_EXP_DEVCTL_RELAX_EN is RsvdP in VFs */ if (dev->is_virtfn) return; if (!pcie_relaxed_ordering_enabled(dev)) return; /* * For now, we only deal with Relaxed Ordering issues with Root * Ports. Peer-to-Peer DMA is another can of worms. */ root = pcie_find_root_port(dev); if (!root) return; if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) { pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); pci_info(dev, "Relaxed Ordering disabled because the Root Port didn't support it\n"); } } static void pci_configure_ltr(struct pci_dev *dev) { #ifdef CONFIG_PCIEASPM struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); struct pci_dev *bridge; u32 cap, ctl; if (!pci_is_pcie(dev)) return; /* Read L1 PM substate capabilities */ dev->l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS); pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); if (!(cap & PCI_EXP_DEVCAP2_LTR)) return; pcie_capability_read_dword(dev, PCI_EXP_DEVCTL2, &ctl); if (ctl & PCI_EXP_DEVCTL2_LTR_EN) { if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) { dev->ltr_path = 1; return; } bridge = pci_upstream_bridge(dev); if (bridge && bridge->ltr_path) dev->ltr_path = 1; return; } if (!host->native_ltr) return; /* * Software must not enable LTR in an Endpoint unless the Root * Complex and all intermediate Switches indicate support for LTR. * PCIe r4.0, sec 6.18. */ if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) { pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_LTR_EN); dev->ltr_path = 1; return; } /* * If we're configuring a hot-added device, LTR was likely * disabled in the upstream bridge, so re-enable it before enabling * it in the new device. */ bridge = pci_upstream_bridge(dev); if (bridge && bridge->ltr_path) { pci_bridge_reconfigure_ltr(dev); pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_LTR_EN); dev->ltr_path = 1; } #endif } static void pci_configure_eetlp_prefix(struct pci_dev *dev) { #ifdef CONFIG_PCI_PASID struct pci_dev *bridge; int pcie_type; u32 cap; if (!pci_is_pcie(dev)) return; pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap); if (!(cap & PCI_EXP_DEVCAP2_EE_PREFIX)) return; pcie_type = pci_pcie_type(dev); if (pcie_type == PCI_EXP_TYPE_ROOT_PORT || pcie_type == PCI_EXP_TYPE_RC_END) dev->eetlp_prefix_path = 1; else { bridge = pci_upstream_bridge(dev); if (bridge && bridge->eetlp_prefix_path) dev->eetlp_prefix_path = 1; } #endif } static void pci_configure_serr(struct pci_dev *dev) { u16 control; if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { /* * A bridge will not forward ERR_ messages coming from an * endpoint unless SERR# forwarding is enabled. */ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &control); if (!(control & PCI_BRIDGE_CTL_SERR)) { control |= PCI_BRIDGE_CTL_SERR; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, control); } } } static void pci_configure_device(struct pci_dev *dev) { pci_configure_mps(dev); pci_configure_extended_tags(dev, NULL); pci_configure_relaxed_ordering(dev); pci_configure_ltr(dev); pci_configure_eetlp_prefix(dev); pci_configure_serr(dev); pci_acpi_program_hp_params(dev); } static void pci_release_capabilities(struct pci_dev *dev) { pci_aer_exit(dev); pci_rcec_exit(dev); pci_iov_release(dev); pci_free_cap_save_buffers(dev); } /** * pci_release_dev - Free a PCI device structure when all users of it are * finished * @dev: device that's been disconnected * * Will be called only by the device core when all users of this PCI device are * done. */ static void pci_release_dev(struct device *dev) { struct pci_dev *pci_dev; pci_dev = to_pci_dev(dev); pci_release_capabilities(pci_dev); pci_release_of_node(pci_dev); pcibios_release_device(pci_dev); pci_bus_put(pci_dev->bus); kfree(pci_dev->driver_override); bitmap_free(pci_dev->dma_alias_mask); dev_dbg(dev, "device released\n"); kfree(pci_dev); } struct pci_dev *pci_alloc_dev(struct pci_bus *bus) { struct pci_dev *dev; dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL); if (!dev) return NULL; INIT_LIST_HEAD(&dev->bus_list); dev->dev.type = &pci_dev_type; dev->bus = pci_bus_get(bus); dev->driver_exclusive_resource = (struct resource) { .name = "PCI Exclusive", .start = 0, .end = -1, }; spin_lock_init(&dev->pcie_cap_lock); #ifdef CONFIG_PCI_MSI raw_spin_lock_init(&dev->msi_lock); #endif return dev; } EXPORT_SYMBOL(pci_alloc_dev); static bool pci_bus_crs_vendor_id(u32 l) { return (l & 0xffff) == PCI_VENDOR_ID_PCI_SIG; } static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l, int timeout) { int delay = 1; if (!pci_bus_crs_vendor_id(*l)) return true; /* not a CRS completion */ if (!timeout) return false; /* CRS, but caller doesn't want to wait */ /* * We got the reserved Vendor ID that indicates a completion with * Configuration Request Retry Status (CRS). Retry until we get a * valid Vendor ID or we time out. */ while (pci_bus_crs_vendor_id(*l)) { if (delay > timeout) { pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); return false; } if (delay >= 1000) pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); msleep(delay); delay *= 2; if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) return false; } if (delay >= 1000) pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n", pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1); return true; } bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, int timeout) { if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l)) return false; /* Some broken boards return 0 or ~0 (PCI_ERROR_RESPONSE) if a slot is empty: */ if (PCI_POSSIBLE_ERROR(*l) || *l == 0x00000000 || *l == 0x0000ffff || *l == 0xffff0000) return false; if (pci_bus_crs_vendor_id(*l)) return pci_bus_wait_crs(bus, devfn, l, timeout); return true; } bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l, int timeout) { #ifdef CONFIG_PCI_QUIRKS struct pci_dev *bridge = bus->self; /* * Certain IDT switches have an issue where they improperly trigger * ACS Source Validation errors on completions for config reads. */ if (bridge && bridge->vendor == PCI_VENDOR_ID_IDT && bridge->device == 0x80b5) return pci_idt_bus_quirk(bus, devfn, l, timeout); #endif return pci_bus_generic_read_dev_vendor_id(bus, devfn, l, timeout); } EXPORT_SYMBOL(pci_bus_read_dev_vendor_id); /* * Read the config data for a PCI device, sanity-check it, * and fill in the dev structure. */ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) { struct pci_dev *dev; u32 l; if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000)) return NULL; dev = pci_alloc_dev(bus); if (!dev) return NULL; dev->devfn = devfn; dev->vendor = l & 0xffff; dev->device = (l >> 16) & 0xffff; if (pci_setup_device(dev)) { pci_bus_put(dev->bus); kfree(dev); return NULL; } return dev; } void pcie_report_downtraining(struct pci_dev *dev) { if (!pci_is_pcie(dev)) return; /* Look from the device up to avoid downstream ports with no devices */ if ((pci_pcie_type(dev) != PCI_EXP_TYPE_ENDPOINT) && (pci_pcie_type(dev) != PCI_EXP_TYPE_LEG_END) && (pci_pcie_type(dev) != PCI_EXP_TYPE_UPSTREAM)) return; /* Multi-function PCIe devices share the same link/status */ if (PCI_FUNC(dev->devfn) != 0 || dev->is_virtfn) return; /* Print link status only if the device is constrained by the fabric */ __pcie_print_link_status(dev, false); } static void pci_init_capabilities(struct pci_dev *dev) { pci_ea_init(dev); /* Enhanced Allocation */ pci_msi_init(dev); /* Disable MSI */ pci_msix_init(dev); /* Disable MSI-X */ /* Buffers for saving PCIe and PCI-X capabilities */ pci_allocate_cap_save_buffers(dev); pci_pm_init(dev); /* Power Management */ pci_vpd_init(dev); /* Vital Product Data */ pci_configure_ari(dev); /* Alternative Routing-ID Forwarding */ pci_iov_init(dev); /* Single Root I/O Virtualization */ pci_ats_init(dev); /* Address Translation Services */ pci_pri_init(dev); /* Page Request Interface */ pci_pasid_init(dev); /* Process Address Space ID */ pci_acs_init(dev); /* Access Control Services */ pci_ptm_init(dev); /* Precision Time Measurement */ pci_aer_init(dev); /* Advanced Error Reporting */ pci_dpc_init(dev); /* Downstream Port Containment */ pci_rcec_init(dev); /* Root Complex Event Collector */ pci_doe_init(dev); /* Data Object Exchange */ pcie_report_downtraining(dev); pci_init_reset_methods(dev); } /* * This is the equivalent of pci_host_bridge_msi_domain() that acts on * devices. Firmware interfaces that can select the MSI domain on a * per-device basis should be called from here. */ static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev) { struct irq_domain *d; /* * If a domain has been set through the pcibios_device_add() * callback, then this is the one (platform code knows best). */ d = dev_get_msi_domain(&dev->dev); if (d) return d; /* * Let's see if we have a firmware interface able to provide * the domain. */ d = pci_msi_get_device_domain(dev); if (d) return d; return NULL; } static void pci_set_msi_domain(struct pci_dev *dev) { struct irq_domain *d; /* * If the platform or firmware interfaces cannot supply a * device-specific MSI domain, then inherit the default domain * from the host bridge itself. */ d = pci_dev_msi_domain(dev); if (!d) d = dev_get_msi_domain(&dev->bus->dev); dev_set_msi_domain(&dev->dev, d); } void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) { int ret; pci_configure_device(dev); device_initialize(&dev->dev); dev->dev.release = pci_release_dev; set_dev_node(&dev->dev, pcibus_to_node(bus)); dev->dev.dma_mask = &dev->dma_mask; dev->dev.dma_parms = &dev->dma_parms; dev->dev.coherent_dma_mask = 0xffffffffull; dma_set_max_seg_size(&dev->dev, 65536); dma_set_seg_boundary(&dev->dev, 0xffffffff); pcie_failed_link_retrain(dev); /* Fix up broken headers */ pci_fixup_device(pci_fixup_header, dev); pci_reassigndev_resource_alignment(dev); dev->state_saved = false; pci_init_capabilities(dev); /* * Add the device to our list of discovered devices * and the bus list for fixup functions, etc. */ down_write(&pci_bus_sem); list_add_tail(&dev->bus_list, &bus->devices); up_write(&pci_bus_sem); ret = pcibios_device_add(dev); WARN_ON(ret < 0); /* Set up MSI IRQ domain */ pci_set_msi_domain(dev); /* Notifier could use PCI capabilities */ dev->match_driver = false; ret = device_add(&dev->dev); WARN_ON(ret < 0); } struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn) { struct pci_dev *dev; dev = pci_get_slot(bus, devfn); if (dev) { pci_dev_put(dev); return dev; } dev = pci_scan_device(bus, devfn); if (!dev) return NULL; pci_device_add(dev, bus); return dev; } EXPORT_SYMBOL(pci_scan_single_device); static int next_ari_fn(struct pci_bus *bus, struct pci_dev *dev, int fn) { int pos; u16 cap = 0; unsigned int next_fn; if (!dev) return -ENODEV; pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); if (!pos) return -ENODEV; pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap); next_fn = PCI_ARI_CAP_NFN(cap); if (next_fn <= fn) return -ENODEV; /* protect against malformed list */ return next_fn; } static int next_fn(struct pci_bus *bus, struct pci_dev *dev, int fn) { if (pci_ari_enabled(bus)) return next_ari_fn(bus, dev, fn); if (fn >= 7) return -ENODEV; /* only multifunction devices may have more functions */ if (dev && !dev->multifunction) return -ENODEV; return fn + 1; } static int only_one_child(struct pci_bus *bus) { struct pci_dev *bridge = bus->self; /* * Systems with unusual topologies set PCI_SCAN_ALL_PCIE_DEVS so * we scan for all possible devices, not just Device 0. */ if (pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS)) return 0; /* * A PCIe Downstream Port normally leads to a Link with only Device * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan * only for Device 0 in that situation. */ if (bridge && pci_is_pcie(bridge) && pcie_downstream_port(bridge)) return 1; return 0; } /** * pci_scan_slot - Scan a PCI slot on a bus for devices * @bus: PCI bus to scan * @devfn: slot number to scan (must have zero function) * * Scan a PCI slot on the specified PCI bus for devices, adding * discovered devices to the @bus->devices list. New devices * will not have is_added set. * * Returns the number of new devices found. */ int pci_scan_slot(struct pci_bus *bus, int devfn) { struct pci_dev *dev; int fn = 0, nr = 0; if (only_one_child(bus) && (devfn > 0)) return 0; /* Already scanned the entire slot */ do { dev = pci_scan_single_device(bus, devfn + fn); if (dev) { if (!pci_dev_is_added(dev)) nr++; if (fn > 0) dev->multifunction = 1; } else if (fn == 0) { /* * Function 0 is required unless we are running on * a hypervisor that passes through individual PCI * functions. */ if (!hypervisor_isolated_pci_functions()) break; } fn = next_fn(bus, dev, fn); } while (fn >= 0); /* Only one slot has PCIe device */ if (bus->self && nr) pcie_aspm_init_link_state(bus->self); return nr; } EXPORT_SYMBOL(pci_scan_slot); static int pcie_find_smpss(struct pci_dev *dev, void *data) { u8 *smpss = data; if (!pci_is_pcie(dev)) return 0; /* * We don't have a way to change MPS settings on devices that have * drivers attached. A hot-added device might support only the minimum * MPS setting (MPS=128). Therefore, if the fabric contains a bridge * where devices may be hot-added, we limit the fabric MPS to 128 so * hot-added devices will work correctly. * * However, if we hot-add a device to a slot directly below a Root * Port, it's impossible for there to be other existing devices below * the port. We don't limit the MPS in this case because we can * reconfigure MPS on both the Root Port and the hot-added device, * and there are no other devices involved. * * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA. */ if (dev->is_hotplug_bridge && pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) *smpss = 0; if (*smpss > dev->pcie_mpss) *smpss = dev->pcie_mpss; return 0; } static void pcie_write_mps(struct pci_dev *dev, int mps) { int rc; if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { mps = 128 << dev->pcie_mpss; if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT && dev->bus->self) /* * For "Performance", the assumption is made that * downstream communication will never be larger than * the MRRS. So, the MPS only needs to be configured * for the upstream communication. This being the case, * walk from the top down and set the MPS of the child * to that of the parent bus. * * Configure the device MPS with the smaller of the * device MPSS or the bridge MPS (which is assumed to be * properly configured at this point to the largest * allowable MPS based on its parent bus). */ mps = min(mps, pcie_get_mps(dev->bus->self)); } rc = pcie_set_mps(dev, mps); if (rc) pci_err(dev, "Failed attempting to set the MPS\n"); } static void pcie_write_mrrs(struct pci_dev *dev) { int rc, mrrs; /* * In the "safe" case, do not configure the MRRS. There appear to be * issues with setting MRRS to 0 on a number of devices. */ if (pcie_bus_config != PCIE_BUS_PERFORMANCE) return; /* * For max performance, the MRRS must be set to the largest supported * value. However, it cannot be configured larger than the MPS the * device or the bus can support. This should already be properly * configured by a prior call to pcie_write_mps(). */ mrrs = pcie_get_mps(dev); /* * MRRS is a R/W register. Invalid values can be written, but a * subsequent read will verify if the value is acceptable or not. * If the MRRS value provided is not acceptable (e.g., too large), * shrink the value until it is acceptable to the HW. */ while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) { rc = pcie_set_readrq(dev, mrrs); if (!rc) break; pci_warn(dev, "Failed attempting to set the MRRS\n"); mrrs /= 2; } if (mrrs < 128) pci_err(dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n"); } static int pcie_bus_configure_set(struct pci_dev *dev, void *data) { int mps, orig_mps; if (!pci_is_pcie(dev)) return 0; if (pcie_bus_config == PCIE_BUS_TUNE_OFF || pcie_bus_config == PCIE_BUS_DEFAULT) return 0; mps = 128 << *(u8 *)data; orig_mps = pcie_get_mps(dev); pcie_write_mps(dev, mps); pcie_write_mrrs(dev); pci_info(dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss, orig_mps, pcie_get_readrq(dev)); return 0; } /* * pcie_bus_configure_settings() requires that pci_walk_bus work in a top-down, * parents then children fashion. If this changes, then this code will not * work as designed. */ void pcie_bus_configure_settings(struct pci_bus *bus) { u8 smpss = 0; if (!bus->self) return; if (!pci_is_pcie(bus->self)) return; /* * FIXME - Peer to peer DMA is possible, though the endpoint would need * to be aware of the MPS of the destination. To work around this, * simply force the MPS of the entire system to the smallest possible. */ if (pcie_bus_config == PCIE_BUS_PEER2PEER) smpss = 0; if (pcie_bus_config == PCIE_BUS_SAFE) { smpss = bus->self->pcie_mpss; pcie_find_smpss(bus->self, &smpss); pci_walk_bus(bus, pcie_find_smpss, &smpss); } pcie_bus_configure_set(bus->self, &smpss); pci_walk_bus(bus, pcie_bus_configure_set, &smpss); } EXPORT_SYMBOL_GPL(pcie_bus_configure_settings); /* * Called after each bus is probed, but before its children are examined. This * is marked as __weak because multiple architectures define it. */ void __weak pcibios_fixup_bus(struct pci_bus *bus) { /* nothing to do, expected to be removed in the future */ } /** * pci_scan_child_bus_extend() - Scan devices below a bus * @bus: Bus to scan for devices * @available_buses: Total number of buses available (%0 does not try to * extend beyond the minimal) * * Scans devices below @bus including subordinate buses. Returns new * subordinate number including all the found devices. Passing * @available_buses causes the remaining bus space to be distributed * equally between hotplug-capable bridges to allow future extension of the * hierarchy. */ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, unsigned int available_buses) { unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0; unsigned int start = bus->busn_res.start; unsigned int devfn, cmax, max = start; struct pci_dev *dev; dev_dbg(&bus->dev, "scanning bus\n"); /* Go find them, Rover! */ for (devfn = 0; devfn < 256; devfn += 8) pci_scan_slot(bus, devfn); /* Reserve buses for SR-IOV capability */ used_buses = pci_iov_bus_range(bus); max += used_buses; /* * After performing arch-dependent fixup of the bus, look behind * all PCI-to-PCI bridges on this bus. */ if (!bus->is_added) { dev_dbg(&bus->dev, "fixups for bus\n"); pcibios_fixup_bus(bus); bus->is_added = 1; } /* * Calculate how many hotplug bridges and normal bridges there * are on this bus. We will distribute the additional available * buses between hotplug bridges. */ for_each_pci_bridge(dev, bus) { if (dev->is_hotplug_bridge) hotplug_bridges++; else normal_bridges++; } /* * Scan bridges that are already configured. We don't touch them * unless they are misconfigured (which will be done in the second * scan below). */ for_each_pci_bridge(dev, bus) { cmax = max; max = pci_scan_bridge_extend(bus, dev, max, 0, 0); /* * Reserve one bus for each bridge now to avoid extending * hotplug bridges too much during the second scan below. */ used_buses++; if (max - cmax > 1) used_buses += max - cmax - 1; } /* Scan bridges that need to be reconfigured */ for_each_pci_bridge(dev, bus) { unsigned int buses = 0; if (!hotplug_bridges && normal_bridges == 1) { /* * There is only one bridge on the bus (upstream * port) so it gets all available buses which it * can then distribute to the possible hotplug * bridges below. */ buses = available_buses; } else if (dev->is_hotplug_bridge) { /* * Distribute the extra buses between hotplug * bridges if any. */ buses = available_buses / hotplug_bridges; buses = min(buses, available_buses - used_buses + 1); } cmax = max; max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1); /* One bus is already accounted so don't add it again */ if (max - cmax > 1) used_buses += max - cmax - 1; } /* * Make sure a hotplug bridge has at least the minimum requested * number of buses but allow it to grow up to the maximum available * bus number if there is room. */ if (bus->self && bus->self->is_hotplug_bridge) { used_buses = max_t(unsigned int, available_buses, pci_hotplug_bus_size - 1); if (max - start < used_buses) { max = start + used_buses; /* Do not allocate more buses than we have room left */ if (max > bus->busn_res.end) max = bus->busn_res.end; dev_dbg(&bus->dev, "%pR extended by %#02x\n", &bus->busn_res, max - start); } } /* * We've scanned the bus and so we know all about what's on * the other side of any bridges that may be on this bus plus * any devices. * * Return how far we've got finding sub-buses. */ dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max); return max; } /** * pci_scan_child_bus() - Scan devices below a bus * @bus: Bus to scan for devices * * Scans devices below @bus including subordinate buses. Returns new * subordinate number including all the found devices. */ unsigned int pci_scan_child_bus(struct pci_bus *bus) { return pci_scan_child_bus_extend(bus, 0); } EXPORT_SYMBOL_GPL(pci_scan_child_bus); /** * pcibios_root_bridge_prepare - Platform-specific host bridge setup * @bridge: Host bridge to set up * * Default empty implementation. Replace with an architecture-specific setup * routine, if necessary. */ int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { return 0; } void __weak pcibios_add_bus(struct pci_bus *bus) { } void __weak pcibios_remove_bus(struct pci_bus *bus) { } struct pci_bus *pci_create_root_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata, struct list_head *resources) { int error; struct pci_host_bridge *bridge; bridge = pci_alloc_host_bridge(0); if (!bridge) return NULL; bridge->dev.parent = parent; list_splice_init(resources, &bridge->windows); bridge->sysdata = sysdata; bridge->busnr = bus; bridge->ops = ops; error = pci_register_host_bridge(bridge); if (error < 0) goto err_out; return bridge->bus; err_out: put_device(&bridge->dev); return NULL; } EXPORT_SYMBOL_GPL(pci_create_root_bus); int pci_host_probe(struct pci_host_bridge *bridge) { struct pci_bus *bus, *child; int ret; ret = pci_scan_root_bus_bridge(bridge); if (ret < 0) { dev_err(bridge->dev.parent, "Scanning root bridge failed"); return ret; } bus = bridge->bus; /* * We insert PCI resources into the iomem_resource and * ioport_resource trees in either pci_bus_claim_resources() * or pci_bus_assign_resources(). */ if (pci_has_flag(PCI_PROBE_ONLY)) { pci_bus_claim_resources(bus); } else { pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); } pci_bus_add_devices(bus); return 0; } EXPORT_SYMBOL_GPL(pci_host_probe); int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) { struct resource *res = &b->busn_res; struct resource *parent_res, *conflict; res->start = bus; res->end = bus_max; res->flags = IORESOURCE_BUS; if (!pci_is_root_bus(b)) parent_res = &b->parent->busn_res; else { parent_res = get_pci_domain_busn_res(pci_domain_nr(b)); res->flags |= IORESOURCE_PCI_FIXED; } conflict = request_resource_conflict(parent_res, res); if (conflict) dev_info(&b->dev, "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", res, pci_is_root_bus(b) ? "domain " : "", parent_res, conflict->name, conflict); return conflict == NULL; } int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max) { struct resource *res = &b->busn_res; struct resource old_res = *res; resource_size_t size; int ret; if (res->start > bus_max) return -EINVAL; size = bus_max - res->start + 1; ret = adjust_resource(res, res->start, size); dev_info(&b->dev, "busn_res: %pR end %s updated to %02x\n", &old_res, ret ? "can not be" : "is", bus_max); if (!ret && !res->parent) pci_bus_insert_busn_res(b, res->start, res->end); return ret; } void pci_bus_release_busn_res(struct pci_bus *b) { struct resource *res = &b->busn_res; int ret; if (!res->flags || !res->parent) return; ret = release_resource(res); dev_info(&b->dev, "busn_res: %pR %s released\n", res, ret ? "can not be" : "is"); } int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge) { struct resource_entry *window; bool found = false; struct pci_bus *b; int max, bus, ret; if (!bridge) return -EINVAL; resource_list_for_each_entry(window, &bridge->windows) if (window->res->flags & IORESOURCE_BUS) { bridge->busnr = window->res->start; found = true; break; } ret = pci_register_host_bridge(bridge); if (ret < 0) return ret; b = bridge->bus; bus = bridge->busnr; if (!found) { dev_info(&b->dev, "No busn resource found for root bus, will use [bus %02x-ff]\n", bus); pci_bus_insert_busn_res(b, bus, 255); } max = pci_scan_child_bus(b); if (!found) pci_bus_update_busn_res_end(b, max); return 0; } EXPORT_SYMBOL(pci_scan_root_bus_bridge); struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, struct pci_ops *ops, void *sysdata, struct list_head *resources) { struct resource_entry *window; bool found = false; struct pci_bus *b; int max; resource_list_for_each_entry(window, resources) if (window->res->flags & IORESOURCE_BUS) { found = true; break; } b = pci_create_root_bus(parent, bus, ops, sysdata, resources); if (!b) return NULL; if (!found) { dev_info(&b->dev, "No busn resource found for root bus, will use [bus %02x-ff]\n", bus); pci_bus_insert_busn_res(b, bus, 255); } max = pci_scan_child_bus(b); if (!found) pci_bus_update_busn_res_end(b, max); return b; } EXPORT_SYMBOL(pci_scan_root_bus); struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata) { LIST_HEAD(resources); struct pci_bus *b; pci_add_resource(&resources, &ioport_resource); pci_add_resource(&resources, &iomem_resource); pci_add_resource(&resources, &busn_resource); b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources); if (b) { pci_scan_child_bus(b); } else { pci_free_resource_list(&resources); } return b; } EXPORT_SYMBOL(pci_scan_bus); /** * pci_rescan_bus_bridge_resize - Scan a PCI bus for devices * @bridge: PCI bridge for the bus to scan * * Scan a PCI bus and child buses for new devices, add them, * and enable them, resizing bridge mmio/io resource if necessary * and possible. The caller must ensure the child devices are already * removed for resizing to occur. * * Returns the max number of subordinate bus discovered. */ unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge) { unsigned int max; struct pci_bus *bus = bridge->subordinate; max = pci_scan_child_bus(bus); pci_assign_unassigned_bridge_resources(bridge); pci_bus_add_devices(bus); return max; } /** * pci_rescan_bus - Scan a PCI bus for devices * @bus: PCI bus to scan * * Scan a PCI bus and child buses for new devices, add them, * and enable them. * * Returns the max number of subordinate bus discovered. */ unsigned int pci_rescan_bus(struct pci_bus *bus) { unsigned int max; max = pci_scan_child_bus(bus); pci_assign_unassigned_bus_resources(bus); pci_bus_add_devices(bus); return max; } EXPORT_SYMBOL_GPL(pci_rescan_bus); /* * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal * routines should always be executed under this mutex. */ static DEFINE_MUTEX(pci_rescan_remove_lock); void pci_lock_rescan_remove(void) { mutex_lock(&pci_rescan_remove_lock); } EXPORT_SYMBOL_GPL(pci_lock_rescan_remove); void pci_unlock_rescan_remove(void) { mutex_unlock(&pci_rescan_remove_lock); } EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove); static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) { const struct pci_dev *a = to_pci_dev(d_a); const struct pci_dev *b = to_pci_dev(d_b); if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; if (a->bus->number < b->bus->number) return -1; else if (a->bus->number > b->bus->number) return 1; if (a->devfn < b->devfn) return -1; else if (a->devfn > b->devfn) return 1; return 0; } void __init pci_sort_breadthfirst(void) { bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); } int pci_hp_add_bridge(struct pci_dev *dev) { struct pci_bus *parent = dev->bus; int busnr, start = parent->busn_res.start; unsigned int available_buses = 0; int end = parent->busn_res.end; for (busnr = start; busnr <= end; busnr++) { if (!pci_find_bus(pci_domain_nr(parent), busnr)) break; } if (busnr-- > end) { pci_err(dev, "No bus number available for hot-added bridge\n"); return -1; } /* Scan bridges that are already configured */ busnr = pci_scan_bridge(parent, dev, busnr, 0); /* * Distribute the available bus numbers between hotplug-capable * bridges to make extending the chain later possible. */ available_buses = end - busnr; /* Scan bridges that need to be reconfigured */ pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1); if (!dev->subordinate) return -1; return 0; } EXPORT_SYMBOL_GPL(pci_hp_add_bridge);
linux-master
drivers/pci/probe.c
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2022-2023, Advanced Micro Devices, Inc. */ #include <linux/pci.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/bitfield.h> #include <linux/bits.h> #include "pci.h" #define OF_PCI_ADDRESS_CELLS 3 #define OF_PCI_SIZE_CELLS 2 #define OF_PCI_MAX_INT_PIN 4 struct of_pci_addr_pair { u32 phys_addr[OF_PCI_ADDRESS_CELLS]; u32 size[OF_PCI_SIZE_CELLS]; }; /* * Each entry in the ranges table is a tuple containing the child address, * the parent address, and the size of the region in the child address space. * Thus, for PCI, in each entry parent address is an address on the primary * side and the child address is the corresponding address on the secondary * side. */ struct of_pci_range { u32 child_addr[OF_PCI_ADDRESS_CELLS]; u32 parent_addr[OF_PCI_ADDRESS_CELLS]; u32 size[OF_PCI_SIZE_CELLS]; }; #define OF_PCI_ADDR_SPACE_IO 0x1 #define OF_PCI_ADDR_SPACE_MEM32 0x2 #define OF_PCI_ADDR_SPACE_MEM64 0x3 #define OF_PCI_ADDR_FIELD_NONRELOC BIT(31) #define OF_PCI_ADDR_FIELD_SS GENMASK(25, 24) #define OF_PCI_ADDR_FIELD_PREFETCH BIT(30) #define OF_PCI_ADDR_FIELD_BUS GENMASK(23, 16) #define OF_PCI_ADDR_FIELD_DEV GENMASK(15, 11) #define OF_PCI_ADDR_FIELD_FUNC GENMASK(10, 8) #define OF_PCI_ADDR_FIELD_REG GENMASK(7, 0) enum of_pci_prop_compatible { PROP_COMPAT_PCI_VVVV_DDDD, PROP_COMPAT_PCICLASS_CCSSPP, PROP_COMPAT_PCICLASS_CCSS, PROP_COMPAT_NUM, }; static void of_pci_set_address(struct pci_dev *pdev, u32 *prop, u64 addr, u32 reg_num, u32 flags, bool reloc) { prop[0] = FIELD_PREP(OF_PCI_ADDR_FIELD_BUS, pdev->bus->number) | FIELD_PREP(OF_PCI_ADDR_FIELD_DEV, PCI_SLOT(pdev->devfn)) | FIELD_PREP(OF_PCI_ADDR_FIELD_FUNC, PCI_FUNC(pdev->devfn)); prop[0] |= flags | reg_num; if (!reloc) { prop[0] |= OF_PCI_ADDR_FIELD_NONRELOC; prop[1] = upper_32_bits(addr); prop[2] = lower_32_bits(addr); } } static int of_pci_get_addr_flags(struct resource *res, u32 *flags) { u32 ss; if (res->flags & IORESOURCE_IO) ss = OF_PCI_ADDR_SPACE_IO; else if (res->flags & IORESOURCE_MEM_64) ss = OF_PCI_ADDR_SPACE_MEM64; else if (res->flags & IORESOURCE_MEM) ss = OF_PCI_ADDR_SPACE_MEM32; else return -EINVAL; *flags = 0; if (res->flags & IORESOURCE_PREFETCH) *flags |= OF_PCI_ADDR_FIELD_PREFETCH; *flags |= FIELD_PREP(OF_PCI_ADDR_FIELD_SS, ss); return 0; } static int of_pci_prop_bus_range(struct pci_dev *pdev, struct of_changeset *ocs, struct device_node *np) { u32 bus_range[] = { pdev->subordinate->busn_res.start, pdev->subordinate->busn_res.end }; return of_changeset_add_prop_u32_array(ocs, np, "bus-range", bus_range, ARRAY_SIZE(bus_range)); } static int of_pci_prop_ranges(struct pci_dev *pdev, struct of_changeset *ocs, struct device_node *np) { struct of_pci_range *rp; struct resource *res; int i, j, ret; u32 flags, num; u64 val64; if (pci_is_bridge(pdev)) { num = PCI_BRIDGE_RESOURCE_NUM; res = &pdev->resource[PCI_BRIDGE_RESOURCES]; } else { num = PCI_STD_NUM_BARS; res = &pdev->resource[PCI_STD_RESOURCES]; } rp = kcalloc(num, sizeof(*rp), GFP_KERNEL); if (!rp) return -ENOMEM; for (i = 0, j = 0; j < num; j++) { if (!resource_size(&res[j])) continue; if (of_pci_get_addr_flags(&res[j], &flags)) continue; val64 = res[j].start; of_pci_set_address(pdev, rp[i].parent_addr, val64, 0, flags, false); if (pci_is_bridge(pdev)) { memcpy(rp[i].child_addr, rp[i].parent_addr, sizeof(rp[i].child_addr)); } else { /* * For endpoint device, the lower 64-bits of child * address is always zero. */ rp[i].child_addr[0] = j; } val64 = resource_size(&res[j]); rp[i].size[0] = upper_32_bits(val64); rp[i].size[1] = lower_32_bits(val64); i++; } ret = of_changeset_add_prop_u32_array(ocs, np, "ranges", (u32 *)rp, i * sizeof(*rp) / sizeof(u32)); kfree(rp); return ret; } static int of_pci_prop_reg(struct pci_dev *pdev, struct of_changeset *ocs, struct device_node *np) { struct of_pci_addr_pair reg = { 0 }; /* configuration space */ of_pci_set_address(pdev, reg.phys_addr, 0, 0, 0, true); return of_changeset_add_prop_u32_array(ocs, np, "reg", (u32 *)&reg, sizeof(reg) / sizeof(u32)); } static int of_pci_prop_interrupts(struct pci_dev *pdev, struct of_changeset *ocs, struct device_node *np) { int ret; u8 pin; ret = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); if (ret != 0) return ret; if (!pin) return 0; return of_changeset_add_prop_u32(ocs, np, "interrupts", (u32)pin); } static int of_pci_prop_intr_map(struct pci_dev *pdev, struct of_changeset *ocs, struct device_node *np) { struct of_phandle_args out_irq[OF_PCI_MAX_INT_PIN]; u32 i, addr_sz[OF_PCI_MAX_INT_PIN], map_sz = 0; __be32 laddr[OF_PCI_ADDRESS_CELLS] = { 0 }; u32 int_map_mask[] = { 0xffff00, 0, 0, 7 }; struct device_node *pnode; struct pci_dev *child; u32 *int_map, *mapp; int ret; u8 pin; pnode = pci_device_to_OF_node(pdev->bus->self); if (!pnode) pnode = pci_bus_to_OF_node(pdev->bus); if (!pnode) { pci_err(pdev, "failed to get parent device node"); return -EINVAL; } laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8)); for (pin = 1; pin <= OF_PCI_MAX_INT_PIN; pin++) { i = pin - 1; out_irq[i].np = pnode; out_irq[i].args_count = 1; out_irq[i].args[0] = pin; ret = of_irq_parse_raw(laddr, &out_irq[i]); if (ret) { pci_err(pdev, "parse irq %d failed, ret %d", pin, ret); continue; } ret = of_property_read_u32(out_irq[i].np, "#address-cells", &addr_sz[i]); if (ret) addr_sz[i] = 0; } list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { for (pin = 1; pin <= OF_PCI_MAX_INT_PIN; pin++) { i = pci_swizzle_interrupt_pin(child, pin) - 1; map_sz += 5 + addr_sz[i] + out_irq[i].args_count; } } int_map = kcalloc(map_sz, sizeof(u32), GFP_KERNEL); mapp = int_map; list_for_each_entry(child, &pdev->subordinate->devices, bus_list) { for (pin = 1; pin <= OF_PCI_MAX_INT_PIN; pin++) { *mapp = (child->bus->number << 16) | (child->devfn << 8); mapp += OF_PCI_ADDRESS_CELLS; *mapp = pin; mapp++; i = pci_swizzle_interrupt_pin(child, pin) - 1; *mapp = out_irq[i].np->phandle; mapp++; if (addr_sz[i]) { ret = of_property_read_u32_array(out_irq[i].np, "reg", mapp, addr_sz[i]); if (ret) goto failed; } mapp += addr_sz[i]; memcpy(mapp, out_irq[i].args, out_irq[i].args_count * sizeof(u32)); mapp += out_irq[i].args_count; } } ret = of_changeset_add_prop_u32_array(ocs, np, "interrupt-map", int_map, map_sz); if (ret) goto failed; ret = of_changeset_add_prop_u32(ocs, np, "#interrupt-cells", 1); if (ret) goto failed; ret = of_changeset_add_prop_u32_array(ocs, np, "interrupt-map-mask", int_map_mask, ARRAY_SIZE(int_map_mask)); if (ret) goto failed; kfree(int_map); return 0; failed: kfree(int_map); return ret; } static int of_pci_prop_compatible(struct pci_dev *pdev, struct of_changeset *ocs, struct device_node *np) { const char *compat_strs[PROP_COMPAT_NUM] = { 0 }; int i, ret; compat_strs[PROP_COMPAT_PCI_VVVV_DDDD] = kasprintf(GFP_KERNEL, "pci%x,%x", pdev->vendor, pdev->device); compat_strs[PROP_COMPAT_PCICLASS_CCSSPP] = kasprintf(GFP_KERNEL, "pciclass,%06x", pdev->class); compat_strs[PROP_COMPAT_PCICLASS_CCSS] = kasprintf(GFP_KERNEL, "pciclass,%04x", pdev->class >> 8); ret = of_changeset_add_prop_string_array(ocs, np, "compatible", compat_strs, PROP_COMPAT_NUM); for (i = 0; i < PROP_COMPAT_NUM; i++) kfree(compat_strs[i]); return ret; } int of_pci_add_properties(struct pci_dev *pdev, struct of_changeset *ocs, struct device_node *np) { int ret; /* * The added properties will be released when the * changeset is destroyed. */ if (pci_is_bridge(pdev)) { ret = of_changeset_add_prop_string(ocs, np, "device_type", "pci"); if (ret) return ret; ret = of_pci_prop_bus_range(pdev, ocs, np); if (ret) return ret; ret = of_pci_prop_intr_map(pdev, ocs, np); if (ret) return ret; } ret = of_pci_prop_ranges(pdev, ocs, np); if (ret) return ret; ret = of_changeset_add_prop_u32(ocs, np, "#address-cells", OF_PCI_ADDRESS_CELLS); if (ret) return ret; ret = of_changeset_add_prop_u32(ocs, np, "#size-cells", OF_PCI_SIZE_CELLS); if (ret) return ret; ret = of_pci_prop_reg(pdev, ocs, np); if (ret) return ret; ret = of_pci_prop_compatible(pdev, ocs, np); if (ret) return ret; ret = of_pci_prop_interrupts(pdev, ocs, np); if (ret) return ret; return 0; }
linux-master
drivers/pci/of_property.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI Message Signaled Interrupt (MSI) * * Copyright (C) 2003-2004 Intel * Copyright (C) Tom Long Nguyen ([email protected]) * Copyright (C) 2016 Christoph Hellwig. */ #include <linux/err.h> #include <linux/export.h> #include <linux/irq.h> #include "../pci.h" #include "msi.h" int pci_msi_enable = 1; int pci_msi_ignore_mask; /** * pci_msi_supported - check whether MSI may be enabled on a device * @dev: pointer to the pci_dev data structure of MSI device function * @nvec: how many MSIs have been requested? * * Look at global flags, the device itself, and its parent buses * to determine if MSI/-X are supported for the device. If MSI/-X is * supported return 1, else return 0. **/ static int pci_msi_supported(struct pci_dev *dev, int nvec) { struct pci_bus *bus; /* MSI must be globally enabled and supported by the device */ if (!pci_msi_enable) return 0; if (!dev || dev->no_msi) return 0; /* * You can't ask to have 0 or less MSIs configured. * a) it's stupid .. * b) the list manipulation code assumes nvec >= 1. */ if (nvec < 1) return 0; /* * Any bridge which does NOT route MSI transactions from its * secondary bus to its primary bus must set NO_MSI flag on * the secondary pci_bus. * * The NO_MSI flag can either be set directly by: * - arch-specific PCI host bus controller drivers (deprecated) * - quirks for specific PCI bridges * * or indirectly by platform-specific PCI host bridge drivers by * advertising the 'msi_domain' property, which results in * the NO_MSI flag when no MSI domain is found for this bridge * at probe time. */ for (bus = dev->bus; bus; bus = bus->parent) if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) return 0; return 1; } static void pcim_msi_release(void *pcidev) { struct pci_dev *dev = pcidev; dev->is_msi_managed = false; pci_free_irq_vectors(dev); } /* * Needs to be separate from pcim_release to prevent an ordering problem * vs. msi_device_data_release() in the MSI core code. */ static int pcim_setup_msi_release(struct pci_dev *dev) { int ret; if (!pci_is_managed(dev) || dev->is_msi_managed) return 0; ret = devm_add_action(&dev->dev, pcim_msi_release, dev); if (!ret) dev->is_msi_managed = true; return ret; } /* * Ordering vs. devres: msi device data has to be installed first so that * pcim_msi_release() is invoked before it on device release. */ static int pci_setup_msi_context(struct pci_dev *dev) { int ret = msi_setup_device_data(&dev->dev); if (!ret) ret = pcim_setup_msi_release(dev); return ret; } /* * Helper functions for mask/unmask and MSI message handling */ void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set) { raw_spinlock_t *lock = &to_pci_dev(desc->dev)->msi_lock; unsigned long flags; if (!desc->pci.msi_attrib.can_mask) return; raw_spin_lock_irqsave(lock, flags); desc->pci.msi_mask &= ~clear; desc->pci.msi_mask |= set; pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->pci.mask_pos, desc->pci.msi_mask); raw_spin_unlock_irqrestore(lock, flags); } /** * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts * @data: pointer to irqdata associated to that interrupt */ void pci_msi_mask_irq(struct irq_data *data) { struct msi_desc *desc = irq_data_get_msi_desc(data); __pci_msi_mask_desc(desc, BIT(data->irq - desc->irq)); } EXPORT_SYMBOL_GPL(pci_msi_mask_irq); /** * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts * @data: pointer to irqdata associated to that interrupt */ void pci_msi_unmask_irq(struct irq_data *data) { struct msi_desc *desc = irq_data_get_msi_desc(data); __pci_msi_unmask_desc(desc, BIT(data->irq - desc->irq)); } EXPORT_SYMBOL_GPL(pci_msi_unmask_irq); void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { struct pci_dev *dev = msi_desc_to_pci_dev(entry); BUG_ON(dev->current_state != PCI_D0); if (entry->pci.msi_attrib.is_msix) { void __iomem *base = pci_msix_desc_addr(entry); if (WARN_ON_ONCE(entry->pci.msi_attrib.is_virtual)) return; msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); msg->data = readl(base + PCI_MSIX_ENTRY_DATA); } else { int pos = dev->msi_cap; u16 data; pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, &msg->address_lo); if (entry->pci.msi_attrib.is_64) { pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, &msg->address_hi); pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data); } else { msg->address_hi = 0; pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data); } msg->data = data; } } static inline void pci_write_msg_msi(struct pci_dev *dev, struct msi_desc *desc, struct msi_msg *msg) { int pos = dev->msi_cap; u16 msgctl; pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); msgctl &= ~PCI_MSI_FLAGS_QSIZE; msgctl |= desc->pci.msi_attrib.multiple << 4; pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl); pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, msg->address_lo); if (desc->pci.msi_attrib.is_64) { pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, msg->address_hi); pci_write_config_word(dev, pos + PCI_MSI_DATA_64, msg->data); } else { pci_write_config_word(dev, pos + PCI_MSI_DATA_32, msg->data); } /* Ensure that the writes are visible in the device */ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); } static inline void pci_write_msg_msix(struct msi_desc *desc, struct msi_msg *msg) { void __iomem *base = pci_msix_desc_addr(desc); u32 ctrl = desc->pci.msix_ctrl; bool unmasked = !(ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT); if (desc->pci.msi_attrib.is_virtual) return; /* * The specification mandates that the entry is masked * when the message is modified: * * "If software changes the Address or Data value of an * entry while the entry is unmasked, the result is * undefined." */ if (unmasked) pci_msix_write_vector_ctrl(desc, ctrl | PCI_MSIX_ENTRY_CTRL_MASKBIT); writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); writel(msg->data, base + PCI_MSIX_ENTRY_DATA); if (unmasked) pci_msix_write_vector_ctrl(desc, ctrl); /* Ensure that the writes are visible in the device */ readl(base + PCI_MSIX_ENTRY_DATA); } void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) { struct pci_dev *dev = msi_desc_to_pci_dev(entry); if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) { /* Don't touch the hardware now */ } else if (entry->pci.msi_attrib.is_msix) { pci_write_msg_msix(entry, msg); } else { pci_write_msg_msi(dev, entry, msg); } entry->msg = *msg; if (entry->write_msi_msg) entry->write_msi_msg(entry, entry->write_msi_msg_data); } void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) { struct msi_desc *entry = irq_get_msi_desc(irq); __pci_write_msi_msg(entry, msg); } EXPORT_SYMBOL_GPL(pci_write_msi_msg); /* PCI/MSI specific functionality */ static void pci_intx_for_msi(struct pci_dev *dev, int enable) { if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) pci_intx(dev, enable); } static void pci_msi_set_enable(struct pci_dev *dev, int enable) { u16 control; pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); control &= ~PCI_MSI_FLAGS_ENABLE; if (enable) control |= PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); } static int msi_setup_msi_desc(struct pci_dev *dev, int nvec, struct irq_affinity_desc *masks) { struct msi_desc desc; u16 control; /* MSI Entry Initialization */ memset(&desc, 0, sizeof(desc)); pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); /* Lies, damned lies, and MSIs */ if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING) control |= PCI_MSI_FLAGS_MASKBIT; /* Respect XEN's mask disabling */ if (pci_msi_ignore_mask) control &= ~PCI_MSI_FLAGS_MASKBIT; desc.nvec_used = nvec; desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT); desc.pci.msi_attrib.default_irq = dev->irq; desc.pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); desc.affinity = masks; if (control & PCI_MSI_FLAGS_64BIT) desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64; else desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32; /* Save the initial mask status */ if (desc.pci.msi_attrib.can_mask) pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask); return msi_insert_msi_desc(&dev->dev, &desc); } static int msi_verify_entries(struct pci_dev *dev) { struct msi_desc *entry; if (!dev->no_64bit_msi) return 0; msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { if (entry->msg.address_hi) { pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n", entry->msg.address_hi, entry->msg.address_lo); break; } } return !entry ? 0 : -EIO; } /** * msi_capability_init - configure device's MSI capability structure * @dev: pointer to the pci_dev data structure of MSI device function * @nvec: number of interrupts to allocate * @affd: description of automatic IRQ affinity assignments (may be %NULL) * * Setup the MSI capability structure of the device with the requested * number of interrupts. A return value of zero indicates the successful * setup of an entry with the new MSI IRQ. A negative return value indicates * an error, and a positive return value indicates the number of interrupts * which could have been allocated. */ static int msi_capability_init(struct pci_dev *dev, int nvec, struct irq_affinity *affd) { struct irq_affinity_desc *masks = NULL; struct msi_desc *entry; int ret; /* Reject multi-MSI early on irq domain enabled architectures */ if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY)) return 1; /* * Disable MSI during setup in the hardware, but mark it enabled * so that setup code can evaluate it. */ pci_msi_set_enable(dev, 0); dev->msi_enabled = 1; if (affd) masks = irq_create_affinity_masks(nvec, affd); msi_lock_descs(&dev->dev); ret = msi_setup_msi_desc(dev, nvec, masks); if (ret) goto fail; /* All MSIs are unmasked by default; mask them all */ entry = msi_first_desc(&dev->dev, MSI_DESC_ALL); pci_msi_mask(entry, msi_multi_mask(entry)); /* Configure MSI capability structure */ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); if (ret) goto err; ret = msi_verify_entries(dev); if (ret) goto err; /* Set MSI enabled bits */ pci_intx_for_msi(dev, 0); pci_msi_set_enable(dev, 1); pcibios_free_irq(dev); dev->irq = entry->irq; goto unlock; err: pci_msi_unmask(entry, msi_multi_mask(entry)); pci_free_msi_irqs(dev); fail: dev->msi_enabled = 0; unlock: msi_unlock_descs(&dev->dev); kfree(masks); return ret; } int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, struct irq_affinity *affd) { int nvec; int rc; if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0) return -EINVAL; /* Check whether driver already requested MSI-X IRQs */ if (dev->msix_enabled) { pci_info(dev, "can't enable MSI (MSI-X already enabled)\n"); return -EINVAL; } if (maxvec < minvec) return -ERANGE; if (WARN_ON_ONCE(dev->msi_enabled)) return -EINVAL; nvec = pci_msi_vec_count(dev); if (nvec < 0) return nvec; if (nvec < minvec) return -ENOSPC; if (nvec > maxvec) nvec = maxvec; rc = pci_setup_msi_context(dev); if (rc) return rc; if (!pci_setup_msi_device_domain(dev)) return -ENODEV; for (;;) { if (affd) { nvec = irq_calc_affinity_vectors(minvec, nvec, affd); if (nvec < minvec) return -ENOSPC; } rc = msi_capability_init(dev, nvec, affd); if (rc == 0) return nvec; if (rc < 0) return rc; if (rc < minvec) return -ENOSPC; nvec = rc; } } /** * pci_msi_vec_count - Return the number of MSI vectors a device can send * @dev: device to report about * * This function returns the number of MSI vectors a device requested via * Multiple Message Capable register. It returns a negative errno if the * device is not capable sending MSI interrupts. Otherwise, the call succeeds * and returns a power of two, up to a maximum of 2^5 (32), according to the * MSI specification. **/ int pci_msi_vec_count(struct pci_dev *dev) { int ret; u16 msgctl; if (!dev->msi_cap) return -EINVAL; pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); return ret; } EXPORT_SYMBOL(pci_msi_vec_count); /* * Architecture override returns true when the PCI MSI message should be * written by the generic restore function. */ bool __weak arch_restore_msi_irqs(struct pci_dev *dev) { return true; } void __pci_restore_msi_state(struct pci_dev *dev) { struct msi_desc *entry; u16 control; if (!dev->msi_enabled) return; entry = irq_get_msi_desc(dev->irq); pci_intx_for_msi(dev, 0); pci_msi_set_enable(dev, 0); if (arch_restore_msi_irqs(dev)) __pci_write_msi_msg(entry, &entry->msg); pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); pci_msi_update_mask(entry, 0, 0); control &= ~PCI_MSI_FLAGS_QSIZE; control |= (entry->pci.msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); } void pci_msi_shutdown(struct pci_dev *dev) { struct msi_desc *desc; if (!pci_msi_enable || !dev || !dev->msi_enabled) return; pci_msi_set_enable(dev, 0); pci_intx_for_msi(dev, 1); dev->msi_enabled = 0; /* Return the device with MSI unmasked as initial states */ desc = msi_first_desc(&dev->dev, MSI_DESC_ALL); if (!WARN_ON_ONCE(!desc)) pci_msi_unmask(desc, msi_multi_mask(desc)); /* Restore dev->irq to its default pin-assertion IRQ */ dev->irq = desc->pci.msi_attrib.default_irq; pcibios_alloc_irq(dev); } /* PCI/MSI-X specific functionality */ static void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) { u16 ctrl; pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); ctrl &= ~clear; ctrl |= set; pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); } static void __iomem *msix_map_region(struct pci_dev *dev, unsigned int nr_entries) { resource_size_t phys_addr; u32 table_offset; unsigned long flags; u8 bir; pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, &table_offset); bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); flags = pci_resource_flags(dev, bir); if (!flags || (flags & IORESOURCE_UNSET)) return NULL; table_offset &= PCI_MSIX_TABLE_OFFSET; phys_addr = pci_resource_start(dev, bir) + table_offset; return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); } /** * msix_prepare_msi_desc - Prepare a half initialized MSI descriptor for operation * @dev: The PCI device for which the descriptor is prepared * @desc: The MSI descriptor for preparation * * This is separate from msix_setup_msi_descs() below to handle dynamic * allocations for MSI-X after initial enablement. * * Ideally the whole MSI-X setup would work that way, but there is no way to * support this for the legacy arch_setup_msi_irqs() mechanism and for the * fake irq domains like the x86 XEN one. Sigh... * * The descriptor is zeroed and only @desc::msi_index and @desc::affinity * are set. When called from msix_setup_msi_descs() then the is_virtual * attribute is initialized as well. * * Fill in the rest. */ void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc) { desc->nvec_used = 1; desc->pci.msi_attrib.is_msix = 1; desc->pci.msi_attrib.is_64 = 1; desc->pci.msi_attrib.default_irq = dev->irq; desc->pci.mask_base = dev->msix_base; desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask && !desc->pci.msi_attrib.is_virtual; if (desc->pci.msi_attrib.can_mask) { void __iomem *addr = pci_msix_desc_addr(desc); desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); } } static int msix_setup_msi_descs(struct pci_dev *dev, struct msix_entry *entries, int nvec, struct irq_affinity_desc *masks) { int ret = 0, i, vec_count = pci_msix_vec_count(dev); struct irq_affinity_desc *curmsk; struct msi_desc desc; memset(&desc, 0, sizeof(desc)); for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) { desc.msi_index = entries ? entries[i].entry : i; desc.affinity = masks ? curmsk : NULL; desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count; msix_prepare_msi_desc(dev, &desc); ret = msi_insert_msi_desc(&dev->dev, &desc); if (ret) break; } return ret; } static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries) { struct msi_desc *desc; if (entries) { msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) { entries->vector = desc->irq; entries++; } } } static void msix_mask_all(void __iomem *base, int tsize) { u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; int i; if (pci_msi_ignore_mask) return; for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); } static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries, int nvec, struct irq_affinity *affd) { struct irq_affinity_desc *masks = NULL; int ret; if (affd) masks = irq_create_affinity_masks(nvec, affd); msi_lock_descs(&dev->dev); ret = msix_setup_msi_descs(dev, entries, nvec, masks); if (ret) goto out_free; ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); if (ret) goto out_free; /* Check if all MSI entries honor device restrictions */ ret = msi_verify_entries(dev); if (ret) goto out_free; msix_update_entries(dev, entries); goto out_unlock; out_free: pci_free_msi_irqs(dev); out_unlock: msi_unlock_descs(&dev->dev); kfree(masks); return ret; } /** * msix_capability_init - configure device's MSI-X capability * @dev: pointer to the pci_dev data structure of MSI-X device function * @entries: pointer to an array of struct msix_entry entries * @nvec: number of @entries * @affd: Optional pointer to enable automatic affinity assignment * * Setup the MSI-X capability structure of device function with a * single MSI-X IRQ. A return of zero indicates the successful setup of * requested MSI-X entries with allocated IRQs or non-zero for otherwise. **/ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, int nvec, struct irq_affinity *affd) { int ret, tsize; u16 control; /* * Some devices require MSI-X to be enabled before the MSI-X * registers can be accessed. Mask all the vectors to prevent * interrupts coming in before they're fully set up. */ pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); /* Mark it enabled so setup functions can query it */ dev->msix_enabled = 1; pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); /* Request & Map MSI-X table region */ tsize = msix_table_size(control); dev->msix_base = msix_map_region(dev, tsize); if (!dev->msix_base) { ret = -ENOMEM; goto out_disable; } ret = msix_setup_interrupts(dev, entries, nvec, affd); if (ret) goto out_disable; /* Disable INTX */ pci_intx_for_msi(dev, 0); /* * Ensure that all table entries are masked to prevent * stale entries from firing in a crash kernel. * * Done late to deal with a broken Marvell NVME device * which takes the MSI-X mask bits into account even * when MSI-X is disabled, which prevents MSI delivery. */ msix_mask_all(dev->msix_base, tsize); pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); pcibios_free_irq(dev); return 0; out_disable: dev->msix_enabled = 0; pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0); return ret; } static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *entries, int nvec) { bool nogap; int i, j; if (!entries) return true; nogap = pci_msi_domain_supports(dev, MSI_FLAG_MSIX_CONTIGUOUS, DENY_LEGACY); for (i = 0; i < nvec; i++) { /* Check for duplicate entries */ for (j = i + 1; j < nvec; j++) { if (entries[i].entry == entries[j].entry) return false; } /* Check for unsupported gaps */ if (nogap && entries[i].entry != i) return false; } return true; } int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec, struct irq_affinity *affd, int flags) { int hwsize, rc, nvec = maxvec; if (maxvec < minvec) return -ERANGE; if (dev->msi_enabled) { pci_info(dev, "can't enable MSI-X (MSI already enabled)\n"); return -EINVAL; } if (WARN_ON_ONCE(dev->msix_enabled)) return -EINVAL; /* Check MSI-X early on irq domain enabled architectures */ if (!pci_msi_domain_supports(dev, MSI_FLAG_PCI_MSIX, ALLOW_LEGACY)) return -ENOTSUPP; if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0) return -EINVAL; hwsize = pci_msix_vec_count(dev); if (hwsize < 0) return hwsize; if (!pci_msix_validate_entries(dev, entries, nvec)) return -EINVAL; if (hwsize < nvec) { /* Keep the IRQ virtual hackery working */ if (flags & PCI_IRQ_VIRTUAL) hwsize = nvec; else nvec = hwsize; } if (nvec < minvec) return -ENOSPC; rc = pci_setup_msi_context(dev); if (rc) return rc; if (!pci_setup_msix_device_domain(dev, hwsize)) return -ENODEV; for (;;) { if (affd) { nvec = irq_calc_affinity_vectors(minvec, nvec, affd); if (nvec < minvec) return -ENOSPC; } rc = msix_capability_init(dev, entries, nvec, affd); if (rc == 0) return nvec; if (rc < 0) return rc; if (rc < minvec) return -ENOSPC; nvec = rc; } } void __pci_restore_msix_state(struct pci_dev *dev) { struct msi_desc *entry; bool write_msg; if (!dev->msix_enabled) return; /* route the table */ pci_intx_for_msi(dev, 0); pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); write_msg = arch_restore_msi_irqs(dev); msi_lock_descs(&dev->dev); msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { if (write_msg) __pci_write_msi_msg(entry, &entry->msg); pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); } msi_unlock_descs(&dev->dev); pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); } void pci_msix_shutdown(struct pci_dev *dev) { struct msi_desc *desc; if (!pci_msi_enable || !dev || !dev->msix_enabled) return; if (pci_dev_is_disconnected(dev)) { dev->msix_enabled = 0; return; } /* Return the device with MSI-X masked as initial states */ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) pci_msix_mask(desc); pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); pci_intx_for_msi(dev, 1); dev->msix_enabled = 0; pcibios_alloc_irq(dev); } /* Common interfaces */ void pci_free_msi_irqs(struct pci_dev *dev) { pci_msi_teardown_msi_irqs(dev); if (dev->msix_base) { iounmap(dev->msix_base); dev->msix_base = NULL; } } /* Misc. infrastructure */ struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) { return to_pci_dev(desc->dev); } EXPORT_SYMBOL(msi_desc_to_pci_dev); void pci_no_msi(void) { pci_msi_enable = 0; }
linux-master
drivers/pci/msi/msi.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI MSI/MSI-X — Exported APIs for device drivers * * Copyright (C) 2003-2004 Intel * Copyright (C) Tom Long Nguyen ([email protected]) * Copyright (C) 2016 Christoph Hellwig. * Copyright (C) 2022 Linutronix GmbH */ #include <linux/export.h> #include <linux/irq.h> #include "msi.h" /** * pci_enable_msi() - Enable MSI interrupt mode on device * @dev: the PCI device to operate on * * Legacy device driver API to enable MSI interrupts mode on device and * allocate a single interrupt vector. On success, the allocated vector * Linux IRQ will be saved at @dev->irq. The driver must invoke * pci_disable_msi() on cleanup. * * NOTE: The newer pci_alloc_irq_vectors() / pci_free_irq_vectors() API * pair should, in general, be used instead. * * Return: 0 on success, errno otherwise */ int pci_enable_msi(struct pci_dev *dev) { int rc = __pci_enable_msi_range(dev, 1, 1, NULL); if (rc < 0) return rc; return 0; } EXPORT_SYMBOL(pci_enable_msi); /** * pci_disable_msi() - Disable MSI interrupt mode on device * @dev: the PCI device to operate on * * Legacy device driver API to disable MSI interrupt mode on device, * free earlier allocated interrupt vectors, and restore INTx emulation. * The PCI device Linux IRQ (@dev->irq) is restored to its default * pin-assertion IRQ. This is the cleanup pair of pci_enable_msi(). * * NOTE: The newer pci_alloc_irq_vectors() / pci_free_irq_vectors() API * pair should, in general, be used instead. */ void pci_disable_msi(struct pci_dev *dev) { if (!pci_msi_enabled() || !dev || !dev->msi_enabled) return; msi_lock_descs(&dev->dev); pci_msi_shutdown(dev); pci_free_msi_irqs(dev); msi_unlock_descs(&dev->dev); } EXPORT_SYMBOL(pci_disable_msi); /** * pci_msix_vec_count() - Get number of MSI-X interrupt vectors on device * @dev: the PCI device to operate on * * Return: number of MSI-X interrupt vectors available on this device * (i.e., the device's MSI-X capability structure "table size"), -EINVAL * if the device is not MSI-X capable, other errnos otherwise. */ int pci_msix_vec_count(struct pci_dev *dev) { u16 control; if (!dev->msix_cap) return -EINVAL; pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); return msix_table_size(control); } EXPORT_SYMBOL(pci_msix_vec_count); /** * pci_enable_msix_range() - Enable MSI-X interrupt mode on device * @dev: the PCI device to operate on * @entries: input/output parameter, array of MSI-X configuration entries * @minvec: minimum required number of MSI-X vectors * @maxvec: maximum desired number of MSI-X vectors * * Legacy device driver API to enable MSI-X interrupt mode on device and * configure its MSI-X capability structure as appropriate. The passed * @entries array must have each of its members "entry" field set to a * desired (valid) MSI-X vector number, where the range of valid MSI-X * vector numbers can be queried through pci_msix_vec_count(). If * successful, the driver must invoke pci_disable_msix() on cleanup. * * NOTE: The newer pci_alloc_irq_vectors() / pci_free_irq_vectors() API * pair should, in general, be used instead. * * Return: number of MSI-X vectors allocated (which might be smaller * than @maxvecs), where Linux IRQ numbers for such allocated vectors * are saved back in the @entries array elements' "vector" field. Return * -ENOSPC if less than @minvecs interrupt vectors are available. * Return -EINVAL if one of the passed @entries members "entry" field * was invalid or a duplicate, or if plain MSI interrupts mode was * earlier enabled on device. Return other errnos otherwise. */ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, int maxvec) { return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL, 0); } EXPORT_SYMBOL(pci_enable_msix_range); /** * pci_msix_can_alloc_dyn - Query whether dynamic allocation after enabling * MSI-X is supported * * @dev: PCI device to operate on * * Return: True if supported, false otherwise */ bool pci_msix_can_alloc_dyn(struct pci_dev *dev) { if (!dev->msix_cap) return false; return pci_msi_domain_supports(dev, MSI_FLAG_PCI_MSIX_ALLOC_DYN, DENY_LEGACY); } EXPORT_SYMBOL_GPL(pci_msix_can_alloc_dyn); /** * pci_msix_alloc_irq_at - Allocate an MSI-X interrupt after enabling MSI-X * at a given MSI-X vector index or any free vector index * * @dev: PCI device to operate on * @index: Index to allocate. If @index == MSI_ANY_INDEX this allocates * the next free index in the MSI-X table * @affdesc: Optional pointer to an affinity descriptor structure. NULL otherwise * * Return: A struct msi_map * * On success msi_map::index contains the allocated index (>= 0) and * msi_map::virq contains the allocated Linux interrupt number (> 0). * * On fail msi_map::index contains the error code and msi_map::virq * is set to 0. */ struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index, const struct irq_affinity_desc *affdesc) { struct msi_map map = { .index = -ENOTSUPP }; if (!dev->msix_enabled) return map; if (!pci_msix_can_alloc_dyn(dev)) return map; return msi_domain_alloc_irq_at(&dev->dev, MSI_DEFAULT_DOMAIN, index, affdesc, NULL); } EXPORT_SYMBOL_GPL(pci_msix_alloc_irq_at); /** * pci_msix_free_irq - Free an interrupt on a PCI/MSIX interrupt domain * * @dev: The PCI device to operate on * @map: A struct msi_map describing the interrupt to free * * Undo an interrupt vector allocation. Does not disable MSI-X. */ void pci_msix_free_irq(struct pci_dev *dev, struct msi_map map) { if (WARN_ON_ONCE(map.index < 0 || map.virq <= 0)) return; if (WARN_ON_ONCE(!pci_msix_can_alloc_dyn(dev))) return; msi_domain_free_irqs_range(&dev->dev, MSI_DEFAULT_DOMAIN, map.index, map.index); } EXPORT_SYMBOL_GPL(pci_msix_free_irq); /** * pci_disable_msix() - Disable MSI-X interrupt mode on device * @dev: the PCI device to operate on * * Legacy device driver API to disable MSI-X interrupt mode on device, * free earlier-allocated interrupt vectors, and restore INTx. * The PCI device Linux IRQ (@dev->irq) is restored to its default pin * assertion IRQ. This is the cleanup pair of pci_enable_msix_range(). * * NOTE: The newer pci_alloc_irq_vectors() / pci_free_irq_vectors() API * pair should, in general, be used instead. */ void pci_disable_msix(struct pci_dev *dev) { if (!pci_msi_enabled() || !dev || !dev->msix_enabled) return; msi_lock_descs(&dev->dev); pci_msix_shutdown(dev); pci_free_msi_irqs(dev); msi_unlock_descs(&dev->dev); } EXPORT_SYMBOL(pci_disable_msix); /** * pci_alloc_irq_vectors() - Allocate multiple device interrupt vectors * @dev: the PCI device to operate on * @min_vecs: minimum required number of vectors (must be >= 1) * @max_vecs: maximum desired number of vectors * @flags: One or more of: * * * %PCI_IRQ_MSIX Allow trying MSI-X vector allocations * * %PCI_IRQ_MSI Allow trying MSI vector allocations * * * %PCI_IRQ_LEGACY Allow trying legacy INTx interrupts, if * and only if @min_vecs == 1 * * * %PCI_IRQ_AFFINITY Auto-manage IRQs affinity by spreading * the vectors around available CPUs * * Allocate up to @max_vecs interrupt vectors on device. MSI-X irq * vector allocation has a higher precedence over plain MSI, which has a * higher precedence over legacy INTx emulation. * * Upon a successful allocation, the caller should use pci_irq_vector() * to get the Linux IRQ number to be passed to request_threaded_irq(). * The driver must call pci_free_irq_vectors() on cleanup. * * Return: number of allocated vectors (which might be smaller than * @max_vecs), -ENOSPC if less than @min_vecs interrupt vectors are * available, other errnos otherwise. */ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags) { return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, flags, NULL); } EXPORT_SYMBOL(pci_alloc_irq_vectors); /** * pci_alloc_irq_vectors_affinity() - Allocate multiple device interrupt * vectors with affinity requirements * @dev: the PCI device to operate on * @min_vecs: minimum required number of vectors (must be >= 1) * @max_vecs: maximum desired number of vectors * @flags: allocation flags, as in pci_alloc_irq_vectors() * @affd: affinity requirements (can be %NULL). * * Same as pci_alloc_irq_vectors(), but with the extra @affd parameter. * Check that function docs, and &struct irq_affinity, for more details. */ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, unsigned int max_vecs, unsigned int flags, struct irq_affinity *affd) { struct irq_affinity msi_default_affd = {0}; int nvecs = -ENOSPC; if (flags & PCI_IRQ_AFFINITY) { if (!affd) affd = &msi_default_affd; } else { if (WARN_ON(affd)) affd = NULL; } if (flags & PCI_IRQ_MSIX) { nvecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, affd, flags); if (nvecs > 0) return nvecs; } if (flags & PCI_IRQ_MSI) { nvecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); if (nvecs > 0) return nvecs; } /* use legacy IRQ if allowed */ if (flags & PCI_IRQ_LEGACY) { if (min_vecs == 1 && dev->irq) { /* * Invoke the affinity spreading logic to ensure that * the device driver can adjust queue configuration * for the single interrupt case. */ if (affd) irq_create_affinity_masks(1, affd); pci_intx(dev, 1); return 1; } } return nvecs; } EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); /** * pci_irq_vector() - Get Linux IRQ number of a device interrupt vector * @dev: the PCI device to operate on * @nr: device-relative interrupt vector index (0-based); has different * meanings, depending on interrupt mode: * * * MSI-X the index in the MSI-X vector table * * MSI the index of the enabled MSI vectors * * INTx must be 0 * * Return: the Linux IRQ number, or -EINVAL if @nr is out of range */ int pci_irq_vector(struct pci_dev *dev, unsigned int nr) { unsigned int irq; if (!dev->msi_enabled && !dev->msix_enabled) return !nr ? dev->irq : -EINVAL; irq = msi_get_virq(&dev->dev, nr); return irq ? irq : -EINVAL; } EXPORT_SYMBOL(pci_irq_vector); /** * pci_irq_get_affinity() - Get a device interrupt vector affinity * @dev: the PCI device to operate on * @nr: device-relative interrupt vector index (0-based); has different * meanings, depending on interrupt mode: * * * MSI-X the index in the MSI-X vector table * * MSI the index of the enabled MSI vectors * * INTx must be 0 * * Return: MSI/MSI-X vector affinity, NULL if @nr is out of range or if * the MSI(-X) vector was allocated without explicit affinity * requirements (e.g., by pci_enable_msi(), pci_enable_msix_range(), or * pci_alloc_irq_vectors() without the %PCI_IRQ_AFFINITY flag). Return a * generic set of CPU IDs representing all possible CPUs available * during system boot if the device is in legacy INTx mode. */ const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) { int idx, irq = pci_irq_vector(dev, nr); struct msi_desc *desc; if (WARN_ON_ONCE(irq <= 0)) return NULL; desc = irq_get_msi_desc(irq); /* Non-MSI does not have the information handy */ if (!desc) return cpu_possible_mask; /* MSI[X] interrupts can be allocated without affinity descriptor */ if (!desc->affinity) return NULL; /* * MSI has a mask array in the descriptor. * MSI-X has a single mask. */ idx = dev->msi_enabled ? nr : 0; return &desc->affinity[idx].mask; } EXPORT_SYMBOL(pci_irq_get_affinity); /** * pci_ims_alloc_irq - Allocate an interrupt on a PCI/IMS interrupt domain * @dev: The PCI device to operate on * @icookie: Pointer to an IMS implementation specific cookie for this * IMS instance (PASID, queue ID, pointer...). * The cookie content is copied into the MSI descriptor for the * interrupt chip callbacks or domain specific setup functions. * @affdesc: Optional pointer to an interrupt affinity descriptor * * There is no index for IMS allocations as IMS is an implementation * specific storage and does not have any direct associations between * index, which might be a pure software construct, and device * functionality. This association is established by the driver either via * the index - if there is a hardware table - or in case of purely software * managed IMS implementation the association happens via the * irq_write_msi_msg() callback of the implementation specific interrupt * chip, which utilizes the provided @icookie to store the MSI message in * the appropriate place. * * Return: A struct msi_map * * On success msi_map::index contains the allocated index (>= 0) and * msi_map::virq the allocated Linux interrupt number (> 0). * * On fail msi_map::index contains the error code and msi_map::virq * is set to 0. */ struct msi_map pci_ims_alloc_irq(struct pci_dev *dev, union msi_instance_cookie *icookie, const struct irq_affinity_desc *affdesc) { return msi_domain_alloc_irq_at(&dev->dev, MSI_SECONDARY_DOMAIN, MSI_ANY_INDEX, affdesc, icookie); } EXPORT_SYMBOL_GPL(pci_ims_alloc_irq); /** * pci_ims_free_irq - Allocate an interrupt on a PCI/IMS interrupt domain * which was allocated via pci_ims_alloc_irq() * @dev: The PCI device to operate on * @map: A struct msi_map describing the interrupt to free as * returned from pci_ims_alloc_irq() */ void pci_ims_free_irq(struct pci_dev *dev, struct msi_map map) { if (WARN_ON_ONCE(map.index < 0 || map.virq <= 0)) return; msi_domain_free_irqs_range(&dev->dev, MSI_SECONDARY_DOMAIN, map.index, map.index); } EXPORT_SYMBOL_GPL(pci_ims_free_irq); /** * pci_free_irq_vectors() - Free previously allocated IRQs for a device * @dev: the PCI device to operate on * * Undo the interrupt vector allocations and possible device MSI/MSI-X * enablement earlier done through pci_alloc_irq_vectors_affinity() or * pci_alloc_irq_vectors(). */ void pci_free_irq_vectors(struct pci_dev *dev) { pci_disable_msix(dev); pci_disable_msi(dev); } EXPORT_SYMBOL(pci_free_irq_vectors); /** * pci_restore_msi_state() - Restore cached MSI(-X) state on device * @dev: the PCI device to operate on * * Write the Linux-cached MSI(-X) state back on device. This is * typically useful upon system resume, or after an error-recovery PCI * adapter reset. */ void pci_restore_msi_state(struct pci_dev *dev) { __pci_restore_msi_state(dev); __pci_restore_msix_state(dev); } EXPORT_SYMBOL_GPL(pci_restore_msi_state); /** * pci_msi_enabled() - Are MSI(-X) interrupts enabled system-wide? * * Return: true if MSI has not been globally disabled through ACPI FADT, * PCI bridge quirks, or the "pci=nomsi" kernel command-line option. */ int pci_msi_enabled(void) { return pci_msi_enable; } EXPORT_SYMBOL(pci_msi_enabled);
linux-master
drivers/pci/msi/api.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI Message Signaled Interrupt (MSI) - irqdomain support */ #include <linux/acpi_iort.h> #include <linux/irqdomain.h> #include <linux/of_irq.h> #include "msi.h" int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { struct irq_domain *domain; domain = dev_get_msi_domain(&dev->dev); if (domain && irq_domain_is_hierarchy(domain)) return msi_domain_alloc_irqs_all_locked(&dev->dev, MSI_DEFAULT_DOMAIN, nvec); return pci_msi_legacy_setup_msi_irqs(dev, nvec, type); } void pci_msi_teardown_msi_irqs(struct pci_dev *dev) { struct irq_domain *domain; domain = dev_get_msi_domain(&dev->dev); if (domain && irq_domain_is_hierarchy(domain)) { msi_domain_free_irqs_all_locked(&dev->dev, MSI_DEFAULT_DOMAIN); } else { pci_msi_legacy_teardown_msi_irqs(dev); msi_free_msi_descs(&dev->dev); } } /** * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space * @irq_data: Pointer to interrupt data of the MSI interrupt * @msg: Pointer to the message */ static void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) { struct msi_desc *desc = irq_data_get_msi_desc(irq_data); /* * For MSI-X desc->irq is always equal to irq_data->irq. For * MSI only the first interrupt of MULTI MSI passes the test. */ if (desc->irq == irq_data->irq) __pci_write_msi_msg(desc, msg); } /** * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source * @desc: Pointer to the MSI descriptor * * The ID number is only used within the irqdomain. */ static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc) { struct pci_dev *dev = msi_desc_to_pci_dev(desc); return (irq_hw_number_t)desc->msi_index | pci_dev_id(dev) << 11 | (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; } static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) { arg->desc = desc; arg->hwirq = pci_msi_domain_calc_hwirq(desc); } static struct msi_domain_ops pci_msi_domain_ops_default = { .set_desc = pci_msi_domain_set_desc, }; static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info) { struct msi_domain_ops *ops = info->ops; if (ops == NULL) { info->ops = &pci_msi_domain_ops_default; } else { if (ops->set_desc == NULL) ops->set_desc = pci_msi_domain_set_desc; } } static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) { struct irq_chip *chip = info->chip; BUG_ON(!chip); if (!chip->irq_write_msi_msg) chip->irq_write_msi_msg = pci_msi_domain_write_msg; if (!chip->irq_mask) chip->irq_mask = pci_msi_mask_irq; if (!chip->irq_unmask) chip->irq_unmask = pci_msi_unmask_irq; } /** * pci_msi_create_irq_domain - Create a MSI interrupt domain * @fwnode: Optional fwnode of the interrupt controller * @info: MSI domain info * @parent: Parent irq domain * * Updates the domain and chip ops and creates a MSI interrupt domain. * * Returns: * A domain pointer or NULL in case of failure. */ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, struct msi_domain_info *info, struct irq_domain *parent) { if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE)) info->flags &= ~MSI_FLAG_LEVEL_CAPABLE; if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) pci_msi_domain_update_dom_ops(info); if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) pci_msi_domain_update_chip_ops(info); /* Let the core code free MSI descriptors when freeing interrupts */ info->flags |= MSI_FLAG_FREE_MSI_DESCS; info->flags |= MSI_FLAG_ACTIVATE_EARLY | MSI_FLAG_DEV_SYSFS; if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) info->flags |= MSI_FLAG_MUST_REACTIVATE; /* PCI-MSI is oneshot-safe */ info->chip->flags |= IRQCHIP_ONESHOT_SAFE; /* Let the core update the bus token */ info->bus_token = DOMAIN_BUS_PCI_MSI; return msi_create_irq_domain(fwnode, info, parent); } EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain); /* * Per device MSI[-X] domain functionality */ static void pci_device_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) { arg->desc = desc; arg->hwirq = desc->msi_index; } static void pci_irq_mask_msi(struct irq_data *data) { struct msi_desc *desc = irq_data_get_msi_desc(data); pci_msi_mask(desc, BIT(data->irq - desc->irq)); } static void pci_irq_unmask_msi(struct irq_data *data) { struct msi_desc *desc = irq_data_get_msi_desc(data); pci_msi_unmask(desc, BIT(data->irq - desc->irq)); } #ifdef CONFIG_GENERIC_IRQ_RESERVATION_MODE # define MSI_REACTIVATE MSI_FLAG_MUST_REACTIVATE #else # define MSI_REACTIVATE 0 #endif #define MSI_COMMON_FLAGS (MSI_FLAG_FREE_MSI_DESCS | \ MSI_FLAG_ACTIVATE_EARLY | \ MSI_FLAG_DEV_SYSFS | \ MSI_REACTIVATE) static const struct msi_domain_template pci_msi_template = { .chip = { .name = "PCI-MSI", .irq_mask = pci_irq_mask_msi, .irq_unmask = pci_irq_unmask_msi, .irq_write_msi_msg = pci_msi_domain_write_msg, .flags = IRQCHIP_ONESHOT_SAFE, }, .ops = { .set_desc = pci_device_domain_set_desc, }, .info = { .flags = MSI_COMMON_FLAGS | MSI_FLAG_MULTI_PCI_MSI, .bus_token = DOMAIN_BUS_PCI_DEVICE_MSI, }, }; static void pci_irq_mask_msix(struct irq_data *data) { pci_msix_mask(irq_data_get_msi_desc(data)); } static void pci_irq_unmask_msix(struct irq_data *data) { pci_msix_unmask(irq_data_get_msi_desc(data)); } static void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, struct msi_desc *desc) { /* Don't fiddle with preallocated MSI descriptors */ if (!desc->pci.mask_base) msix_prepare_msi_desc(to_pci_dev(desc->dev), desc); } static const struct msi_domain_template pci_msix_template = { .chip = { .name = "PCI-MSIX", .irq_mask = pci_irq_mask_msix, .irq_unmask = pci_irq_unmask_msix, .irq_write_msi_msg = pci_msi_domain_write_msg, .flags = IRQCHIP_ONESHOT_SAFE, }, .ops = { .prepare_desc = pci_msix_prepare_desc, .set_desc = pci_device_domain_set_desc, }, .info = { .flags = MSI_COMMON_FLAGS | MSI_FLAG_PCI_MSIX | MSI_FLAG_PCI_MSIX_ALLOC_DYN, .bus_token = DOMAIN_BUS_PCI_DEVICE_MSIX, }, }; static bool pci_match_device_domain(struct pci_dev *pdev, enum irq_domain_bus_token bus_token) { return msi_match_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN, bus_token); } static bool pci_create_device_domain(struct pci_dev *pdev, const struct msi_domain_template *tmpl, unsigned int hwsize) { struct irq_domain *domain = dev_get_msi_domain(&pdev->dev); if (!domain || !irq_domain_is_msi_parent(domain)) return true; return msi_create_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN, tmpl, hwsize, NULL, NULL); } /** * pci_setup_msi_device_domain - Setup a device MSI interrupt domain * @pdev: The PCI device to create the domain on * * Return: * True when: * - The device does not have a MSI parent irq domain associated, * which keeps the legacy architecture specific and the global * PCI/MSI domain models working * - The MSI domain exists already * - The MSI domain was successfully allocated * False when: * - MSI-X is enabled * - The domain creation fails. * * The created MSI domain is preserved until: * - The device is removed * - MSI is disabled and a MSI-X domain is created */ bool pci_setup_msi_device_domain(struct pci_dev *pdev) { if (WARN_ON_ONCE(pdev->msix_enabled)) return false; if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSI)) return true; if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSIX)) msi_remove_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN); return pci_create_device_domain(pdev, &pci_msi_template, 1); } /** * pci_setup_msix_device_domain - Setup a device MSI-X interrupt domain * @pdev: The PCI device to create the domain on * @hwsize: The size of the MSI-X vector table * * Return: * True when: * - The device does not have a MSI parent irq domain associated, * which keeps the legacy architecture specific and the global * PCI/MSI domain models working * - The MSI-X domain exists already * - The MSI-X domain was successfully allocated * False when: * - MSI is enabled * - The domain creation fails. * * The created MSI-X domain is preserved until: * - The device is removed * - MSI-X is disabled and a MSI domain is created */ bool pci_setup_msix_device_domain(struct pci_dev *pdev, unsigned int hwsize) { if (WARN_ON_ONCE(pdev->msi_enabled)) return false; if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSIX)) return true; if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSI)) msi_remove_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN); return pci_create_device_domain(pdev, &pci_msix_template, hwsize); } /** * pci_msi_domain_supports - Check for support of a particular feature flag * @pdev: The PCI device to operate on * @feature_mask: The feature mask to check for (full match) * @mode: If ALLOW_LEGACY this grants the feature when there is no irq domain * associated to the device. If DENY_LEGACY the lack of an irq domain * makes the feature unsupported */ bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask, enum support_mode mode) { struct msi_domain_info *info; struct irq_domain *domain; unsigned int supported; domain = dev_get_msi_domain(&pdev->dev); if (!domain || !irq_domain_is_hierarchy(domain)) return mode == ALLOW_LEGACY; if (!irq_domain_is_msi_parent(domain)) { /* * For "global" PCI/MSI interrupt domains the associated * msi_domain_info::flags is the authoritative source of * information. */ info = domain->host_data; supported = info->flags; } else { /* * For MSI parent domains the supported feature set * is available in the parent ops. This makes checks * possible before actually instantiating the * per device domain because the parent is never * expanding the PCI/MSI functionality. */ supported = domain->msi_parent_ops->supported_flags; } return (supported & feature_mask) == feature_mask; } /** * pci_create_ims_domain - Create a secondary IMS domain for a PCI device * @pdev: The PCI device to operate on * @template: The MSI info template which describes the domain * @hwsize: The size of the hardware entry table or 0 if the domain * is purely software managed * @data: Optional pointer to domain specific data to be stored * in msi_domain_info::data * * Return: True on success, false otherwise * * An IMS domain is expected to have the following constraints: * - The index space is managed by the core code * * - There is no requirement for consecutive index ranges * * - The interrupt chip must provide the following callbacks: * - irq_mask() * - irq_unmask() * - irq_write_msi_msg() * * - The interrupt chip must provide the following optional callbacks * when the irq_mask(), irq_unmask() and irq_write_msi_msg() callbacks * cannot operate directly on hardware, e.g. in the case that the * interrupt message store is in queue memory: * - irq_bus_lock() * - irq_bus_unlock() * * These callbacks are invoked from preemptible task context and are * allowed to sleep. In this case the mandatory callbacks above just * store the information. The irq_bus_unlock() callback is supposed * to make the change effective before returning. * * - Interrupt affinity setting is handled by the underlying parent * interrupt domain and communicated to the IMS domain via * irq_write_msi_msg(). * * The domain is automatically destroyed when the PCI device is removed. */ bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template, unsigned int hwsize, void *data) { struct irq_domain *domain = dev_get_msi_domain(&pdev->dev); if (!domain || !irq_domain_is_msi_parent(domain)) return false; if (template->info.bus_token != DOMAIN_BUS_PCI_DEVICE_IMS || !(template->info.flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS) || !(template->info.flags & MSI_FLAG_FREE_MSI_DESCS) || !template->chip.irq_mask || !template->chip.irq_unmask || !template->chip.irq_write_msi_msg || template->chip.irq_set_affinity) return false; return msi_create_device_irq_domain(&pdev->dev, MSI_SECONDARY_DOMAIN, template, hwsize, data, NULL); } EXPORT_SYMBOL_GPL(pci_create_ims_domain); /* * Users of the generic MSI infrastructure expect a device to have a single ID, * so with DMA aliases we have to pick the least-worst compromise. Devices with * DMA phantom functions tend to still emit MSIs from the real function number, * so we ignore those and only consider topological aliases where either the * alias device or RID appears on a different bus number. We also make the * reasonable assumption that bridges are walked in an upstream direction (so * the last one seen wins), and the much braver assumption that the most likely * case is that of PCI->PCIe so we should always use the alias RID. This echoes * the logic from intel_irq_remapping's set_msi_sid(), which presumably works * well enough in practice; in the face of the horrible PCIe<->PCI-X conditions * for taking ownership all we can really do is close our eyes and hope... */ static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) { u32 *pa = data; u8 bus = PCI_BUS_NUM(*pa); if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus) *pa = alias; return 0; } /** * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID) * @domain: The interrupt domain * @pdev: The PCI device. * * The RID for a device is formed from the alias, with a firmware * supplied mapping applied * * Returns: The RID. */ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) { struct device_node *of_node; u32 rid = pci_dev_id(pdev); pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); of_node = irq_domain_get_of_node(domain); rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) : iort_msi_map_id(&pdev->dev, rid); return rid; } /** * pci_msi_get_device_domain - Get the MSI domain for a given PCI device * @pdev: The PCI device * * Use the firmware data to find a device-specific MSI domain * (i.e. not one that is set as a default). * * Returns: The corresponding MSI domain or NULL if none has been found. */ struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) { struct irq_domain *dom; u32 rid = pci_dev_id(pdev); pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI); if (!dom) dom = iort_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI); return dom; }
linux-master
drivers/pci/msi/irqdomain.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI Message Signaled Interrupt (MSI). * * Legacy architecture specific setup and teardown mechanism. */ #include "msi.h" /* Arch hooks */ int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) { return -EINVAL; } void __weak arch_teardown_msi_irq(unsigned int irq) { } int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { struct msi_desc *desc; int ret; /* * If an architecture wants to support multiple MSI, it needs to * override arch_setup_msi_irqs() */ if (type == PCI_CAP_ID_MSI && nvec > 1) return 1; msi_for_each_desc(desc, &dev->dev, MSI_DESC_NOTASSOCIATED) { ret = arch_setup_msi_irq(dev, desc); if (ret) return ret < 0 ? ret : -ENOSPC; } return 0; } void __weak arch_teardown_msi_irqs(struct pci_dev *dev) { struct msi_desc *desc; int i; msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) { for (i = 0; i < desc->nvec_used; i++) arch_teardown_msi_irq(desc->irq + i); } } static int pci_msi_setup_check_result(struct pci_dev *dev, int type, int ret) { struct msi_desc *desc; int avail = 0; if (type != PCI_CAP_ID_MSIX || ret >= 0) return ret; /* Scan the MSI descriptors for successfully allocated ones. */ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) avail++; return avail ? avail : ret; } int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) { int ret = arch_setup_msi_irqs(dev, nvec, type); ret = pci_msi_setup_check_result(dev, type, ret); if (!ret) ret = msi_device_populate_sysfs(&dev->dev); return ret; } void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev) { msi_device_destroy_sysfs(&dev->dev); arch_teardown_msi_irqs(dev); }
linux-master
drivers/pci/msi/legacy.c
// SPDX-License-Identifier: GPL-2.0 /* * MSI[X} related functions which are available unconditionally. */ #include "../pci.h" /* * Disable the MSI[X] hardware to avoid screaming interrupts during boot. * This is the power on reset default so usually this should be a noop. */ void pci_msi_init(struct pci_dev *dev) { u16 ctrl; dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); if (!dev->msi_cap) return; pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl); if (ctrl & PCI_MSI_FLAGS_ENABLE) { pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, ctrl & ~PCI_MSI_FLAGS_ENABLE); } if (!(ctrl & PCI_MSI_FLAGS_64BIT)) dev->no_64bit_msi = 1; } void pci_msix_init(struct pci_dev *dev) { u16 ctrl; dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); if (!dev->msix_cap) return; pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); if (ctrl & PCI_MSIX_FLAGS_ENABLE) { pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl & ~PCI_MSIX_FLAGS_ENABLE); } }
linux-master
drivers/pci/msi/pcidev_msi.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCI Hot Plug Controller Driver for System z * * Copyright 2012 IBM Corp. * * Author(s): * Jan Glauber <[email protected]> */ #define KMSG_COMPONENT "zpci" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <asm/pci_debug.h> #include <asm/sclp.h> #define SLOT_NAME_SIZE 10 static int enable_slot(struct hotplug_slot *hotplug_slot) { struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, hotplug_slot); int rc; if (zdev->state != ZPCI_FN_STATE_STANDBY) return -EIO; rc = sclp_pci_configure(zdev->fid); zpci_dbg(3, "conf fid:%x, rc:%d\n", zdev->fid, rc); if (rc) return rc; zdev->state = ZPCI_FN_STATE_CONFIGURED; return zpci_scan_configured_device(zdev, zdev->fh); } static int disable_slot(struct hotplug_slot *hotplug_slot) { struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, hotplug_slot); struct pci_dev *pdev; if (zdev->state != ZPCI_FN_STATE_CONFIGURED) return -EIO; pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn); if (pdev && pci_num_vf(pdev)) { pci_dev_put(pdev); return -EBUSY; } pci_dev_put(pdev); return zpci_deconfigure_device(zdev); } static int reset_slot(struct hotplug_slot *hotplug_slot, bool probe) { struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, hotplug_slot); if (zdev->state != ZPCI_FN_STATE_CONFIGURED) return -EIO; /* * We can't take the zdev->lock as reset_slot may be called during * probing and/or device removal which already happens under the * zdev->lock. Instead the user should use the higher level * pci_reset_function() or pci_bus_reset() which hold the PCI device * lock preventing concurrent removal. If not using these functions * holding the PCI device lock is required. */ /* As long as the function is configured we can reset */ if (probe) return 0; return zpci_hot_reset_device(zdev); } static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct zpci_dev *zdev = container_of(hotplug_slot, struct zpci_dev, hotplug_slot); *value = zpci_is_device_configured(zdev) ? 1 : 0; return 0; } static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { /* if the slot exits it always contains a function */ *value = 1; return 0; } static const struct hotplug_slot_ops s390_hotplug_slot_ops = { .enable_slot = enable_slot, .disable_slot = disable_slot, .reset_slot = reset_slot, .get_power_status = get_power_status, .get_adapter_status = get_adapter_status, }; int zpci_init_slot(struct zpci_dev *zdev) { char name[SLOT_NAME_SIZE]; struct zpci_bus *zbus = zdev->zbus; zdev->hotplug_slot.ops = &s390_hotplug_slot_ops; snprintf(name, SLOT_NAME_SIZE, "%08x", zdev->fid); return pci_hp_register(&zdev->hotplug_slot, zbus->bus, zdev->devfn, name); } void zpci_exit_slot(struct zpci_dev *zdev) { pci_hp_deregister(&zdev->hotplug_slot); }
linux-master
drivers/pci/hotplug/s390_pci_hpc.c
// SPDX-License-Identifier: GPL-2.0+ /* * CompactPCI Hot Plug Driver PCI functions * * Copyright (C) 2002,2005 by SOMA Networks, Inc. * * All rights reserved. * * Send feedback to <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/proc_fs.h> #include "../pci.h" #include "cpci_hotplug.h" #define MY_NAME "cpci_hotplug" #define dbg(format, arg...) \ do { \ if (cpci_debug) \ printk(KERN_DEBUG "%s: " format "\n", \ MY_NAME, ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME, ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME, ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME, ## arg) u8 cpci_get_attention_status(struct slot *slot) { int hs_cap; u16 hs_csr; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return 0; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return 0; return hs_csr & 0x0008 ? 1 : 0; } int cpci_set_attention_status(struct slot *slot, int status) { int hs_cap; u16 hs_csr; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return 0; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return 0; if (status) hs_csr |= HS_CSR_LOO; else hs_csr &= ~HS_CSR_LOO; if (pci_bus_write_config_word(slot->bus, slot->devfn, hs_cap + 2, hs_csr)) return 0; return 1; } u16 cpci_get_hs_csr(struct slot *slot) { int hs_cap; u16 hs_csr; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return 0xFFFF; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return 0xFFFF; return hs_csr; } int cpci_check_and_clear_ins(struct slot *slot) { int hs_cap; u16 hs_csr; int ins = 0; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return 0; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return 0; if (hs_csr & HS_CSR_INS) { /* Clear INS (by setting it) */ if (pci_bus_write_config_word(slot->bus, slot->devfn, hs_cap + 2, hs_csr)) ins = 0; else ins = 1; } return ins; } int cpci_check_ext(struct slot *slot) { int hs_cap; u16 hs_csr; int ext = 0; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return 0; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return 0; if (hs_csr & HS_CSR_EXT) ext = 1; return ext; } int cpci_clear_ext(struct slot *slot) { int hs_cap; u16 hs_csr; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return -ENODEV; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return -ENODEV; if (hs_csr & HS_CSR_EXT) { /* Clear EXT (by setting it) */ if (pci_bus_write_config_word(slot->bus, slot->devfn, hs_cap + 2, hs_csr)) return -ENODEV; } return 0; } int cpci_led_on(struct slot *slot) { int hs_cap; u16 hs_csr; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return -ENODEV; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return -ENODEV; if ((hs_csr & HS_CSR_LOO) != HS_CSR_LOO) { hs_csr |= HS_CSR_LOO; if (pci_bus_write_config_word(slot->bus, slot->devfn, hs_cap + 2, hs_csr)) { err("Could not set LOO for slot %s", slot_name(slot)); return -ENODEV; } } return 0; } int cpci_led_off(struct slot *slot) { int hs_cap; u16 hs_csr; hs_cap = pci_bus_find_capability(slot->bus, slot->devfn, PCI_CAP_ID_CHSWP); if (!hs_cap) return -ENODEV; if (pci_bus_read_config_word(slot->bus, slot->devfn, hs_cap + 2, &hs_csr)) return -ENODEV; if (hs_csr & HS_CSR_LOO) { hs_csr &= ~HS_CSR_LOO; if (pci_bus_write_config_word(slot->bus, slot->devfn, hs_cap + 2, hs_csr)) { err("Could not clear LOO for slot %s", slot_name(slot)); return -ENODEV; } } return 0; } /* * Device configuration functions */ int cpci_configure_slot(struct slot *slot) { struct pci_dev *dev; struct pci_bus *parent; int ret = 0; dbg("%s - enter", __func__); pci_lock_rescan_remove(); if (slot->dev == NULL) { dbg("pci_dev null, finding %02x:%02x:%x", slot->bus->number, PCI_SLOT(slot->devfn), PCI_FUNC(slot->devfn)); slot->dev = pci_get_slot(slot->bus, slot->devfn); } /* Still NULL? Well then scan for it! */ if (slot->dev == NULL) { int n; dbg("pci_dev still null"); /* * This will generate pci_dev structures for all functions, but * we will only call this case when lookup fails. */ n = pci_scan_slot(slot->bus, slot->devfn); dbg("%s: pci_scan_slot returned %d", __func__, n); slot->dev = pci_get_slot(slot->bus, slot->devfn); if (slot->dev == NULL) { err("Could not find PCI device for slot %02x", slot->number); ret = -ENODEV; goto out; } } parent = slot->dev->bus; for_each_pci_bridge(dev, parent) { if (PCI_SLOT(dev->devfn) == PCI_SLOT(slot->devfn)) pci_hp_add_bridge(dev); } pci_assign_unassigned_bridge_resources(parent->self); pci_bus_add_devices(parent); out: pci_unlock_rescan_remove(); dbg("%s - exit", __func__); return ret; } int cpci_unconfigure_slot(struct slot *slot) { struct pci_dev *dev, *temp; dbg("%s - enter", __func__); if (!slot->dev) { err("No device for slot %02x\n", slot->number); return -ENODEV; } pci_lock_rescan_remove(); list_for_each_entry_safe(dev, temp, &slot->bus->devices, bus_list) { if (PCI_SLOT(dev->devfn) != PCI_SLOT(slot->devfn)) continue; pci_dev_get(dev); pci_stop_and_remove_bus_device(dev); pci_dev_put(dev); } pci_dev_put(slot->dev); slot->dev = NULL; pci_unlock_rescan_remove(); dbg("%s - exit", __func__); return 0; }
linux-master
drivers/pci/hotplug/cpci_hotplug_pci.c
// SPDX-License-Identifier: GPL-2.0+ /* * Standard Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * Send feedback to <[email protected]>, <[email protected]> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include "../pci.h" #include "shpchp.h" int shpchp_configure_device(struct slot *p_slot) { struct pci_dev *dev; struct controller *ctrl = p_slot->ctrl; struct pci_dev *bridge = ctrl->pci_dev; struct pci_bus *parent = bridge->subordinate; int num, ret = 0; pci_lock_rescan_remove(); dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); if (dev) { ctrl_err(ctrl, "Device %s already exists at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev), pci_domain_nr(parent), p_slot->bus, p_slot->device); pci_dev_put(dev); ret = -EINVAL; goto out; } num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); if (num == 0) { ctrl_err(ctrl, "No new device found\n"); ret = -ENODEV; goto out; } for_each_pci_bridge(dev, parent) { if (PCI_SLOT(dev->devfn) == p_slot->device) pci_hp_add_bridge(dev); } pci_assign_unassigned_bridge_resources(bridge); pcie_bus_configure_settings(parent); pci_bus_add_devices(parent); out: pci_unlock_rescan_remove(); return ret; } void shpchp_unconfigure_device(struct slot *p_slot) { struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; struct pci_dev *dev, *temp; struct controller *ctrl = p_slot->ctrl; ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n", __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device); pci_lock_rescan_remove(); list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) { if (PCI_SLOT(dev->devfn) != p_slot->device) continue; pci_dev_get(dev); pci_stop_and_remove_bus_device(dev); pci_dev_put(dev); } pci_unlock_rescan_remove(); }
linux-master
drivers/pci/hotplug/shpchp_pci.c
// SPDX-License-Identifier: GPL-2.0+ /* * Common ACPI functions for hot plug platforms * * Copyright (C) 2006 Intel Corporation * * All rights reserved. * * Send feedback to <[email protected]> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/acpi.h> #include <linux/pci-acpi.h> #include <linux/slab.h> #define MY_NAME "acpi_pcihp" #define dbg(fmt, arg...) do { if (debug_acpi) printk(KERN_DEBUG "%s: %s: " fmt, MY_NAME, __func__, ## arg); } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format, MY_NAME, ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME, ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME, ## arg) #define METHOD_NAME__SUN "_SUN" #define METHOD_NAME_OSHP "OSHP" static bool debug_acpi; /* acpi_run_oshp - get control of hotplug from the firmware * * @handle - the handle of the hotplug controller. */ static acpi_status acpi_run_oshp(acpi_handle handle) { acpi_status status; struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); /* run OSHP */ status = acpi_evaluate_object(handle, METHOD_NAME_OSHP, NULL, NULL); if (ACPI_FAILURE(status)) if (status != AE_NOT_FOUND) printk(KERN_ERR "%s:%s OSHP fails=0x%x\n", __func__, (char *)string.pointer, status); else dbg("%s:%s OSHP not found\n", __func__, (char *)string.pointer); else pr_debug("%s:%s OSHP passes\n", __func__, (char *)string.pointer); kfree(string.pointer); return status; } /** * acpi_get_hp_hw_control_from_firmware * @pdev: the pci_dev of the bridge that has a hotplug controller * * Attempt to take hotplug control from firmware. */ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev) { const struct pci_host_bridge *host; const struct acpi_pci_root *root; acpi_status status; acpi_handle chandle, handle; struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; /* * If there's no ACPI host bridge (i.e., ACPI support is compiled * into the kernel but the hardware platform doesn't support ACPI), * there's nothing to do here. */ host = pci_find_host_bridge(pdev->bus); root = acpi_pci_find_root(ACPI_HANDLE(&host->dev)); if (!root) return 0; /* * If _OSC exists, it determines whether we're allowed to manage * the SHPC. We executed it while enumerating the host bridge. */ if (root->osc_support_set) { if (host->native_shpc_hotplug) return 0; return -ENODEV; } /* * In the absence of _OSC, we're always allowed to manage the SHPC. * However, if an OSHP method is present, we must execute it so the * firmware can transfer control to the OS, e.g., direct interrupts * to the OS instead of to the firmware. * * N.B. The PCI Firmware Spec (r3.2, sec 4.8) does not endorse * searching up the ACPI hierarchy, so the loops below are suspect. */ handle = ACPI_HANDLE(&pdev->dev); if (!handle) { /* * This hotplug controller was not listed in the ACPI name * space at all. Try to get ACPI handle of parent PCI bus. */ struct pci_bus *pbus; for (pbus = pdev->bus; pbus; pbus = pbus->parent) { handle = acpi_pci_get_bridge_handle(pbus); if (handle) break; } } while (handle) { acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); pci_info(pdev, "Requesting control of SHPC hotplug via OSHP (%s)\n", (char *)string.pointer); status = acpi_run_oshp(handle); if (ACPI_SUCCESS(status)) goto got_one; if (acpi_is_root_bridge(handle)) break; chandle = handle; status = acpi_get_parent(chandle, &handle); if (ACPI_FAILURE(status)) break; } pci_info(pdev, "Cannot get control of SHPC hotplug\n"); kfree(string.pointer); return -ENODEV; got_one: pci_info(pdev, "Gained control of SHPC hotplug (%s)\n", (char *)string.pointer); kfree(string.pointer); return 0; } EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); static int pcihp_is_ejectable(acpi_handle handle) { acpi_status status; unsigned long long removable; if (!acpi_has_method(handle, "_ADR")) return 0; if (acpi_has_method(handle, "_EJ0")) return 1; status = acpi_evaluate_integer(handle, "_RMV", NULL, &removable); if (ACPI_SUCCESS(status) && removable) return 1; return 0; } /** * acpi_pci_check_ejectable - check if handle is ejectable ACPI PCI slot * @pbus: the PCI bus of the PCI slot corresponding to 'handle' * @handle: ACPI handle to check * * Return 1 if handle is ejectable PCI slot, 0 otherwise. */ int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle) { acpi_handle bridge_handle, parent_handle; bridge_handle = acpi_pci_get_bridge_handle(pbus); if (!bridge_handle) return 0; if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle)))) return 0; if (bridge_handle != parent_handle) return 0; return pcihp_is_ejectable(handle); } EXPORT_SYMBOL_GPL(acpi_pci_check_ejectable); static acpi_status check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv) { int *found = (int *)context; if (pcihp_is_ejectable(handle)) { *found = 1; return AE_CTRL_TERMINATE; } return AE_OK; } /** * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots * @handle: handle of the PCI bus to scan * * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. */ int acpi_pci_detect_ejectable(acpi_handle handle) { int found = 0; if (!handle) return found; acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, check_hotplug, NULL, (void *)&found, NULL); return found; } EXPORT_SYMBOL_GPL(acpi_pci_detect_ejectable); module_param(debug_acpi, bool, 0644); MODULE_PARM_DESC(debug_acpi, "Debugging mode for ACPI enabled or not");
linux-master
drivers/pci/hotplug/acpi_pcihp.c
// SPDX-License-Identifier: GPL-2.0+ /* * CompactPCI Hot Plug Driver * * Copyright (C) 2002,2005 SOMA Networks, Inc. * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/atomic.h> #include <linux/delay.h> #include <linux/kthread.h> #include "cpci_hotplug.h" #define DRIVER_AUTHOR "Scott Murray <[email protected]>" #define DRIVER_DESC "CompactPCI Hot Plug Core" #define MY_NAME "cpci_hotplug" #define dbg(format, arg...) \ do { \ if (cpci_debug) \ printk(KERN_DEBUG "%s: " format "\n", \ MY_NAME, ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME, ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME, ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME, ## arg) /* local variables */ static DECLARE_RWSEM(list_rwsem); static LIST_HEAD(slot_list); static int slots; static atomic_t extracting; int cpci_debug; static struct cpci_hp_controller *controller; static struct task_struct *cpci_thread; static int thread_finished; static int enable_slot(struct hotplug_slot *slot); static int disable_slot(struct hotplug_slot *slot); static int set_attention_status(struct hotplug_slot *slot, u8 value); static int get_power_status(struct hotplug_slot *slot, u8 *value); static int get_attention_status(struct hotplug_slot *slot, u8 *value); static int get_adapter_status(struct hotplug_slot *slot, u8 *value); static int get_latch_status(struct hotplug_slot *slot, u8 *value); static const struct hotplug_slot_ops cpci_hotplug_slot_ops = { .enable_slot = enable_slot, .disable_slot = disable_slot, .set_attention_status = set_attention_status, .get_power_status = get_power_status, .get_attention_status = get_attention_status, .get_adapter_status = get_adapter_status, .get_latch_status = get_latch_status, }; static int enable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = to_slot(hotplug_slot); int retval = 0; dbg("%s - physical_slot = %s", __func__, slot_name(slot)); if (controller->ops->set_power) retval = controller->ops->set_power(slot, 1); return retval; } static int disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = to_slot(hotplug_slot); int retval = 0; dbg("%s - physical_slot = %s", __func__, slot_name(slot)); down_write(&list_rwsem); /* Unconfigure device */ dbg("%s - unconfiguring slot %s", __func__, slot_name(slot)); retval = cpci_unconfigure_slot(slot); if (retval) { err("%s - could not unconfigure slot %s", __func__, slot_name(slot)); goto disable_error; } dbg("%s - finished unconfiguring slot %s", __func__, slot_name(slot)); /* Clear EXT (by setting it) */ if (cpci_clear_ext(slot)) { err("%s - could not clear EXT for slot %s", __func__, slot_name(slot)); retval = -ENODEV; goto disable_error; } cpci_led_on(slot); if (controller->ops->set_power) { retval = controller->ops->set_power(slot, 0); if (retval) goto disable_error; } slot->adapter_status = 0; if (slot->extracting) { slot->extracting = 0; atomic_dec(&extracting); } disable_error: up_write(&list_rwsem); return retval; } static u8 cpci_get_power_status(struct slot *slot) { u8 power = 1; if (controller->ops->get_power) power = controller->ops->get_power(slot); return power; } static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); *value = cpci_get_power_status(slot); return 0; } static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); *value = cpci_get_attention_status(slot); return 0; } static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { return cpci_set_attention_status(to_slot(hotplug_slot), status); } static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); *value = slot->adapter_status; return 0; } static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); *value = slot->latch_status; return 0; } static void release_slot(struct slot *slot) { pci_dev_put(slot->dev); kfree(slot); } #define SLOT_NAME_SIZE 6 int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last) { struct slot *slot; char name[SLOT_NAME_SIZE]; int status; int i; if (!(controller && bus)) return -ENODEV; /* * Create a structure for each slot, and register that slot * with the pci_hotplug subsystem. */ for (i = first; i <= last; ++i) { slot = kzalloc(sizeof(struct slot), GFP_KERNEL); if (!slot) { status = -ENOMEM; goto error; } slot->bus = bus; slot->number = i; slot->devfn = PCI_DEVFN(i, 0); snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i); slot->hotplug_slot.ops = &cpci_hotplug_slot_ops; dbg("registering slot %s", name); status = pci_hp_register(&slot->hotplug_slot, bus, i, name); if (status) { err("pci_hp_register failed with error %d", status); goto error_slot; } dbg("slot registered with name: %s", slot_name(slot)); /* Add slot to our internal list */ down_write(&list_rwsem); list_add(&slot->slot_list, &slot_list); slots++; up_write(&list_rwsem); } return 0; error_slot: kfree(slot); error: return status; } EXPORT_SYMBOL_GPL(cpci_hp_register_bus); int cpci_hp_unregister_bus(struct pci_bus *bus) { struct slot *slot; struct slot *tmp; int status = 0; down_write(&list_rwsem); if (!slots) { up_write(&list_rwsem); return -1; } list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) { if (slot->bus == bus) { list_del(&slot->slot_list); slots--; dbg("deregistering slot %s", slot_name(slot)); pci_hp_deregister(&slot->hotplug_slot); release_slot(slot); } } up_write(&list_rwsem); return status; } EXPORT_SYMBOL_GPL(cpci_hp_unregister_bus); /* This is the interrupt mode interrupt handler */ static irqreturn_t cpci_hp_intr(int irq, void *data) { dbg("entered cpci_hp_intr"); /* Check to see if it was our interrupt */ if ((controller->irq_flags & IRQF_SHARED) && !controller->ops->check_irq(controller->dev_id)) { dbg("exited cpci_hp_intr, not our interrupt"); return IRQ_NONE; } /* Disable ENUM interrupt */ controller->ops->disable_irq(); /* Trigger processing by the event thread */ wake_up_process(cpci_thread); return IRQ_HANDLED; } /* * According to PICMG 2.1 R2.0, section 6.3.2, upon * initialization, the system driver shall clear the * INS bits of the cold-inserted devices. */ static int init_slots(int clear_ins) { struct slot *slot; struct pci_dev *dev; dbg("%s - enter", __func__); down_read(&list_rwsem); if (!slots) { up_read(&list_rwsem); return -1; } list_for_each_entry(slot, &slot_list, slot_list) { dbg("%s - looking at slot %s", __func__, slot_name(slot)); if (clear_ins && cpci_check_and_clear_ins(slot)) dbg("%s - cleared INS for slot %s", __func__, slot_name(slot)); dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0)); if (dev) { slot->adapter_status = 1; slot->latch_status = 1; slot->dev = dev; } } up_read(&list_rwsem); dbg("%s - exit", __func__); return 0; } static int check_slots(void) { struct slot *slot; int extracted; int inserted; u16 hs_csr; down_read(&list_rwsem); if (!slots) { up_read(&list_rwsem); err("no slots registered, shutting down"); return -1; } extracted = inserted = 0; list_for_each_entry(slot, &slot_list, slot_list) { dbg("%s - looking at slot %s", __func__, slot_name(slot)); if (cpci_check_and_clear_ins(slot)) { /* * Some broken hardware (e.g. PLX 9054AB) asserts * ENUM# twice... */ if (slot->dev) { warn("slot %s already inserted", slot_name(slot)); inserted++; continue; } /* Process insertion */ dbg("%s - slot %s inserted", __func__, slot_name(slot)); /* GSM, debug */ hs_csr = cpci_get_hs_csr(slot); dbg("%s - slot %s HS_CSR (1) = %04x", __func__, slot_name(slot), hs_csr); /* Configure device */ dbg("%s - configuring slot %s", __func__, slot_name(slot)); if (cpci_configure_slot(slot)) { err("%s - could not configure slot %s", __func__, slot_name(slot)); continue; } dbg("%s - finished configuring slot %s", __func__, slot_name(slot)); /* GSM, debug */ hs_csr = cpci_get_hs_csr(slot); dbg("%s - slot %s HS_CSR (2) = %04x", __func__, slot_name(slot), hs_csr); slot->latch_status = 1; slot->adapter_status = 1; cpci_led_off(slot); /* GSM, debug */ hs_csr = cpci_get_hs_csr(slot); dbg("%s - slot %s HS_CSR (3) = %04x", __func__, slot_name(slot), hs_csr); inserted++; } else if (cpci_check_ext(slot)) { /* Process extraction request */ dbg("%s - slot %s extracted", __func__, slot_name(slot)); /* GSM, debug */ hs_csr = cpci_get_hs_csr(slot); dbg("%s - slot %s HS_CSR = %04x", __func__, slot_name(slot), hs_csr); if (!slot->extracting) { slot->latch_status = 0; slot->extracting = 1; atomic_inc(&extracting); } extracted++; } else if (slot->extracting) { hs_csr = cpci_get_hs_csr(slot); if (hs_csr == 0xffff) { /* * Hmmm, we're likely hosed at this point, should we * bother trying to tell the driver or not? */ err("card in slot %s was improperly removed", slot_name(slot)); slot->adapter_status = 0; slot->extracting = 0; atomic_dec(&extracting); } } } up_read(&list_rwsem); dbg("inserted=%d, extracted=%d, extracting=%d", inserted, extracted, atomic_read(&extracting)); if (inserted || extracted) return extracted; else if (!atomic_read(&extracting)) { err("cannot find ENUM# source, shutting down"); return -1; } return 0; } /* This is the interrupt mode worker thread body */ static int event_thread(void *data) { int rc; dbg("%s - event thread started", __func__); while (1) { dbg("event thread sleeping"); set_current_state(TASK_INTERRUPTIBLE); schedule(); if (kthread_should_stop()) break; do { rc = check_slots(); if (rc > 0) { /* Give userspace a chance to handle extraction */ msleep(500); } else if (rc < 0) { dbg("%s - error checking slots", __func__); thread_finished = 1; goto out; } } while (atomic_read(&extracting) && !kthread_should_stop()); if (kthread_should_stop()) break; /* Re-enable ENUM# interrupt */ dbg("%s - re-enabling irq", __func__); controller->ops->enable_irq(); } out: return 0; } /* This is the polling mode worker thread body */ static int poll_thread(void *data) { int rc; while (1) { if (kthread_should_stop() || signal_pending(current)) break; if (controller->ops->query_enum()) { do { rc = check_slots(); if (rc > 0) { /* Give userspace a chance to handle extraction */ msleep(500); } else if (rc < 0) { dbg("%s - error checking slots", __func__); thread_finished = 1; goto out; } } while (atomic_read(&extracting) && !kthread_should_stop()); } msleep(100); } out: return 0; } static int cpci_start_thread(void) { if (controller->irq) cpci_thread = kthread_run(event_thread, NULL, "cpci_hp_eventd"); else cpci_thread = kthread_run(poll_thread, NULL, "cpci_hp_polld"); if (IS_ERR(cpci_thread)) { err("Can't start up our thread"); return PTR_ERR(cpci_thread); } thread_finished = 0; return 0; } static void cpci_stop_thread(void) { kthread_stop(cpci_thread); thread_finished = 1; } int cpci_hp_register_controller(struct cpci_hp_controller *new_controller) { int status = 0; if (controller) return -1; if (!(new_controller && new_controller->ops)) return -EINVAL; if (new_controller->irq) { if (!(new_controller->ops->enable_irq && new_controller->ops->disable_irq)) status = -EINVAL; if (request_irq(new_controller->irq, cpci_hp_intr, new_controller->irq_flags, MY_NAME, new_controller->dev_id)) { err("Can't get irq %d for the hotplug cPCI controller", new_controller->irq); status = -ENODEV; } dbg("%s - acquired controller irq %d", __func__, new_controller->irq); } if (!status) controller = new_controller; return status; } EXPORT_SYMBOL_GPL(cpci_hp_register_controller); static void cleanup_slots(void) { struct slot *slot; struct slot *tmp; /* * Unregister all of our slots with the pci_hotplug subsystem, * and free up all memory that we had allocated. */ down_write(&list_rwsem); if (!slots) goto cleanup_null; list_for_each_entry_safe(slot, tmp, &slot_list, slot_list) { list_del(&slot->slot_list); pci_hp_deregister(&slot->hotplug_slot); release_slot(slot); } cleanup_null: up_write(&list_rwsem); } int cpci_hp_unregister_controller(struct cpci_hp_controller *old_controller) { int status = 0; if (controller) { if (!thread_finished) cpci_stop_thread(); if (controller->irq) free_irq(controller->irq, controller->dev_id); controller = NULL; cleanup_slots(); } else status = -ENODEV; return status; } EXPORT_SYMBOL_GPL(cpci_hp_unregister_controller); int cpci_hp_start(void) { static int first = 1; int status; dbg("%s - enter", __func__); if (!controller) return -ENODEV; down_read(&list_rwsem); if (list_empty(&slot_list)) { up_read(&list_rwsem); return -ENODEV; } up_read(&list_rwsem); status = init_slots(first); if (first) first = 0; if (status) return status; status = cpci_start_thread(); if (status) return status; dbg("%s - thread started", __func__); if (controller->irq) { /* Start enum interrupt processing */ dbg("%s - enabling irq", __func__); controller->ops->enable_irq(); } dbg("%s - exit", __func__); return 0; } EXPORT_SYMBOL_GPL(cpci_hp_start); int cpci_hp_stop(void) { if (!controller) return -ENODEV; if (controller->irq) { /* Stop enum interrupt processing */ dbg("%s - disabling irq", __func__); controller->ops->disable_irq(); } cpci_stop_thread(); return 0; } EXPORT_SYMBOL_GPL(cpci_hp_stop); int __init cpci_hotplug_init(int debug) { cpci_debug = debug; return 0; }
linux-master
drivers/pci/hotplug/cpci_hotplug_core.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCI Express Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * Send feedback to <[email protected]>, <[email protected]> * */ #define dev_fmt(fmt) "pciehp: " fmt #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include "../pci.h" #include "pciehp.h" /** * pciehp_configure_device() - enumerate PCI devices below a hotplug bridge * @ctrl: PCIe hotplug controller * * Enumerate PCI devices below a hotplug bridge and add them to the system. * Return 0 on success, %-EEXIST if the devices are already enumerated or * %-ENODEV if enumeration failed. */ int pciehp_configure_device(struct controller *ctrl) { struct pci_dev *dev; struct pci_dev *bridge = ctrl->pcie->port; struct pci_bus *parent = bridge->subordinate; int num, ret = 0; pci_lock_rescan_remove(); dev = pci_get_slot(parent, PCI_DEVFN(0, 0)); if (dev) { /* * The device is already there. Either configured by the * boot firmware or a previous hotplug event. */ ctrl_dbg(ctrl, "Device %s already exists at %04x:%02x:00, skipping hot-add\n", pci_name(dev), pci_domain_nr(parent), parent->number); pci_dev_put(dev); ret = -EEXIST; goto out; } num = pci_scan_slot(parent, PCI_DEVFN(0, 0)); if (num == 0) { ctrl_err(ctrl, "No new device found\n"); ret = -ENODEV; goto out; } for_each_pci_bridge(dev, parent) pci_hp_add_bridge(dev); pci_assign_unassigned_bridge_resources(bridge); pcie_bus_configure_settings(parent); /* * Release reset_lock during driver binding * to avoid AB-BA deadlock with device_lock. */ up_read(&ctrl->reset_lock); pci_bus_add_devices(parent); down_read_nested(&ctrl->reset_lock, ctrl->depth); out: pci_unlock_rescan_remove(); return ret; } /** * pciehp_unconfigure_device() - remove PCI devices below a hotplug bridge * @ctrl: PCIe hotplug controller * @presence: whether the card is still present in the slot; * true for safe removal via sysfs or an Attention Button press, * false for surprise removal * * Unbind PCI devices below a hotplug bridge from their drivers and remove * them from the system. Safely removed devices are quiesced. Surprise * removed devices are marked as such to prevent further accesses. */ void pciehp_unconfigure_device(struct controller *ctrl, bool presence) { struct pci_dev *dev, *temp; struct pci_bus *parent = ctrl->pcie->port->subordinate; u16 command; ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:00\n", __func__, pci_domain_nr(parent), parent->number); if (!presence) pci_walk_bus(parent, pci_dev_set_disconnected, NULL); pci_lock_rescan_remove(); /* * Stopping an SR-IOV PF device removes all the associated VFs, * which will update the bus->devices list and confuse the * iterator. Therefore, iterate in reverse so we remove the VFs * first, then the PF. We do the same in pci_stop_bus_device(). */ list_for_each_entry_safe_reverse(dev, temp, &parent->devices, bus_list) { pci_dev_get(dev); /* * Release reset_lock during driver unbinding * to avoid AB-BA deadlock with device_lock. */ up_read(&ctrl->reset_lock); pci_stop_and_remove_bus_device(dev); down_read_nested(&ctrl->reset_lock, ctrl->depth); /* * Ensure that no new Requests will be generated from * the device. */ if (presence) { pci_read_config_word(dev, PCI_COMMAND, &command); command &= ~(PCI_COMMAND_MASTER | PCI_COMMAND_SERR); command |= PCI_COMMAND_INTX_DISABLE; pci_write_config_word(dev, PCI_COMMAND, command); } pci_dev_put(dev); } pci_unlock_rescan_remove(); }
linux-master
drivers/pci/hotplug/pciehp_pci.c
// SPDX-License-Identifier: GPL-2.0+ /* * IBM Hot Plug Controller Driver * * Written By: Chuck Cole, Jyoti Shah, Tong Yu, Irene Zubarev, IBM Corporation * * Copyright (C) 2001,2003 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001-2003 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/wait.h> #include "../pci.h" #include <asm/pci_x86.h> /* for struct irq_routing_table */ #include <asm/io_apic.h> #include "ibmphp.h" #define attn_on(sl) ibmphp_hpc_writeslot(sl, HPC_SLOT_ATTNON) #define attn_off(sl) ibmphp_hpc_writeslot(sl, HPC_SLOT_ATTNOFF) #define attn_LED_blink(sl) ibmphp_hpc_writeslot(sl, HPC_SLOT_BLINKLED) #define get_ctrl_revision(sl, rev) ibmphp_hpc_readslot(sl, READ_REVLEVEL, rev) #define get_hpc_options(sl, opt) ibmphp_hpc_readslot(sl, READ_HPCOPTIONS, opt) #define DRIVER_VERSION "0.6" #define DRIVER_DESC "IBM Hot Plug PCI Controller Driver" int ibmphp_debug; static bool debug; module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC); struct pci_bus *ibmphp_pci_bus; static int max_slots; static int irqs[16]; /* PIC mode IRQs we're using so far (in case MPS * tables don't provide default info for empty slots */ static int init_flag; static inline int get_cur_bus_info(struct slot **sl) { int rc = 1; struct slot *slot_cur = *sl; debug("options = %x\n", slot_cur->ctrl->options); debug("revision = %x\n", slot_cur->ctrl->revision); if (READ_BUS_STATUS(slot_cur->ctrl)) rc = ibmphp_hpc_readslot(slot_cur, READ_BUSSTATUS, NULL); if (rc) return rc; slot_cur->bus_on->current_speed = CURRENT_BUS_SPEED(slot_cur->busstatus); if (READ_BUS_MODE(slot_cur->ctrl)) slot_cur->bus_on->current_bus_mode = CURRENT_BUS_MODE(slot_cur->busstatus); else slot_cur->bus_on->current_bus_mode = 0xFF; debug("busstatus = %x, bus_speed = %x, bus_mode = %x\n", slot_cur->busstatus, slot_cur->bus_on->current_speed, slot_cur->bus_on->current_bus_mode); *sl = slot_cur; return 0; } static inline int slot_update(struct slot **sl) { int rc; rc = ibmphp_hpc_readslot(*sl, READ_ALLSTAT, NULL); if (rc) return rc; if (!init_flag) rc = get_cur_bus_info(sl); return rc; } static int __init get_max_slots(void) { struct slot *slot_cur; u8 slot_count = 0; list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) { /* sometimes the hot-pluggable slots start with 4 (not always from 1) */ slot_count = max(slot_count, slot_cur->number); } return slot_count; } /* This routine will put the correct slot->device information per slot. It's * called from initialization of the slot structures. It will also assign * interrupt numbers per each slot. * Parameters: struct slot * Returns 0 or errors */ int ibmphp_init_devno(struct slot **cur_slot) { struct irq_routing_table *rtable; int len; int loop; int i; rtable = pcibios_get_irq_routing_table(); if (!rtable) { err("no BIOS routing table...\n"); return -ENOMEM; } len = (rtable->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); if (!len) { kfree(rtable); return -1; } for (loop = 0; loop < len; loop++) { if ((*cur_slot)->number == rtable->slots[loop].slot && (*cur_slot)->bus == rtable->slots[loop].bus) { (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn); for (i = 0; i < 4; i++) (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus, (int) (*cur_slot)->device, i); debug("(*cur_slot)->irq[0] = %x\n", (*cur_slot)->irq[0]); debug("(*cur_slot)->irq[1] = %x\n", (*cur_slot)->irq[1]); debug("(*cur_slot)->irq[2] = %x\n", (*cur_slot)->irq[2]); debug("(*cur_slot)->irq[3] = %x\n", (*cur_slot)->irq[3]); debug("rtable->exclusive_irqs = %x\n", rtable->exclusive_irqs); debug("rtable->slots[loop].irq[0].bitmap = %x\n", rtable->slots[loop].irq[0].bitmap); debug("rtable->slots[loop].irq[1].bitmap = %x\n", rtable->slots[loop].irq[1].bitmap); debug("rtable->slots[loop].irq[2].bitmap = %x\n", rtable->slots[loop].irq[2].bitmap); debug("rtable->slots[loop].irq[3].bitmap = %x\n", rtable->slots[loop].irq[3].bitmap); debug("rtable->slots[loop].irq[0].link = %x\n", rtable->slots[loop].irq[0].link); debug("rtable->slots[loop].irq[1].link = %x\n", rtable->slots[loop].irq[1].link); debug("rtable->slots[loop].irq[2].link = %x\n", rtable->slots[loop].irq[2].link); debug("rtable->slots[loop].irq[3].link = %x\n", rtable->slots[loop].irq[3].link); debug("end of init_devno\n"); kfree(rtable); return 0; } } kfree(rtable); return -1; } static inline int power_on(struct slot *slot_cur) { u8 cmd = HPC_SLOT_ON; int retval; retval = ibmphp_hpc_writeslot(slot_cur, cmd); if (retval) { err("power on failed\n"); return retval; } if (CTLR_RESULT(slot_cur->ctrl->status)) { err("command not completed successfully in power_on\n"); return -EIO; } msleep(3000); /* For ServeRAID cards, and some 66 PCI */ return 0; } static inline int power_off(struct slot *slot_cur) { u8 cmd = HPC_SLOT_OFF; int retval; retval = ibmphp_hpc_writeslot(slot_cur, cmd); if (retval) { err("power off failed\n"); return retval; } if (CTLR_RESULT(slot_cur->ctrl->status)) { err("command not completed successfully in power_off\n"); retval = -EIO; } return retval; } static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value) { int rc = 0; struct slot *pslot; u8 cmd = 0x00; /* avoid compiler warning */ debug("set_attention_status - Entry hotplug_slot[%lx] value[%x]\n", (ulong) hotplug_slot, value); ibmphp_lock_operations(); if (hotplug_slot) { switch (value) { case HPC_SLOT_ATTN_OFF: cmd = HPC_SLOT_ATTNOFF; break; case HPC_SLOT_ATTN_ON: cmd = HPC_SLOT_ATTNON; break; case HPC_SLOT_ATTN_BLINK: cmd = HPC_SLOT_BLINKLED; break; default: rc = -ENODEV; err("set_attention_status - Error : invalid input [%x]\n", value); break; } if (rc == 0) { pslot = to_slot(hotplug_slot); rc = ibmphp_hpc_writeslot(pslot, cmd); } } else rc = -ENODEV; ibmphp_unlock_operations(); debug("set_attention_status - Exit rc[%d]\n", rc); return rc; } static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) { int rc = -ENODEV; struct slot *pslot; struct slot myslot; debug("get_attention_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); ibmphp_lock_operations(); if (hotplug_slot) { pslot = to_slot(hotplug_slot); memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &myslot.status); if (!rc) rc = ibmphp_hpc_readslot(pslot, READ_EXTSLOTSTATUS, &myslot.ext_status); if (!rc) *value = SLOT_ATTN(myslot.status, myslot.ext_status); } ibmphp_unlock_operations(); debug("get_attention_status - Exit rc[%d] value[%x]\n", rc, *value); return rc; } static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) { int rc = -ENODEV; struct slot *pslot; struct slot myslot; debug("get_latch_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); ibmphp_lock_operations(); if (hotplug_slot) { pslot = to_slot(hotplug_slot); memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &myslot.status); if (!rc) *value = SLOT_LATCH(myslot.status); } ibmphp_unlock_operations(); debug("get_latch_status - Exit rc[%d] rc[%x] value[%x]\n", rc, rc, *value); return rc; } static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) { int rc = -ENODEV; struct slot *pslot; struct slot myslot; debug("get_power_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); ibmphp_lock_operations(); if (hotplug_slot) { pslot = to_slot(hotplug_slot); memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &myslot.status); if (!rc) *value = SLOT_PWRGD(myslot.status); } ibmphp_unlock_operations(); debug("get_power_status - Exit rc[%d] rc[%x] value[%x]\n", rc, rc, *value); return rc; } static int get_adapter_present(struct hotplug_slot *hotplug_slot, u8 *value) { int rc = -ENODEV; struct slot *pslot; u8 present; struct slot myslot; debug("get_adapter_status - Entry hotplug_slot[%lx] pvalue[%lx]\n", (ulong) hotplug_slot, (ulong) value); ibmphp_lock_operations(); if (hotplug_slot) { pslot = to_slot(hotplug_slot); memcpy(&myslot, pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &myslot.status); if (!rc) { present = SLOT_PRESENT(myslot.status); if (present == HPC_SLOT_EMPTY) *value = 0; else *value = 1; } } ibmphp_unlock_operations(); debug("get_adapter_present - Exit rc[%d] value[%x]\n", rc, *value); return rc; } static int get_max_bus_speed(struct slot *slot) { int rc = 0; u8 mode = 0; enum pci_bus_speed speed; struct pci_bus *bus = slot->hotplug_slot.pci_slot->bus; debug("%s - Entry slot[%p]\n", __func__, slot); ibmphp_lock_operations(); mode = slot->supported_bus_mode; speed = slot->supported_speed; ibmphp_unlock_operations(); switch (speed) { case BUS_SPEED_33: break; case BUS_SPEED_66: if (mode == BUS_MODE_PCIX) speed += 0x01; break; case BUS_SPEED_100: case BUS_SPEED_133: speed += 0x01; break; default: /* Note (will need to change): there would be soon 256, 512 also */ rc = -ENODEV; } if (!rc) bus->max_bus_speed = speed; debug("%s - Exit rc[%d] speed[%x]\n", __func__, rc, speed); return rc; } /**************************************************************************** * This routine will initialize the ops data structure used in the validate * function. It will also power off empty slots that are powered on since BIOS * leaves those on, albeit disconnected ****************************************************************************/ static int __init init_ops(void) { struct slot *slot_cur; int retval; int rc; list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) { debug("BEFORE GETTING SLOT STATUS, slot # %x\n", slot_cur->number); if (slot_cur->ctrl->revision == 0xFF) if (get_ctrl_revision(slot_cur, &slot_cur->ctrl->revision)) return -1; if (slot_cur->bus_on->current_speed == 0xFF) if (get_cur_bus_info(&slot_cur)) return -1; get_max_bus_speed(slot_cur); if (slot_cur->ctrl->options == 0xFF) if (get_hpc_options(slot_cur, &slot_cur->ctrl->options)) return -1; retval = slot_update(&slot_cur); if (retval) return retval; debug("status = %x\n", slot_cur->status); debug("ext_status = %x\n", slot_cur->ext_status); debug("SLOT_POWER = %x\n", SLOT_POWER(slot_cur->status)); debug("SLOT_PRESENT = %x\n", SLOT_PRESENT(slot_cur->status)); debug("SLOT_LATCH = %x\n", SLOT_LATCH(slot_cur->status)); if ((SLOT_PWRGD(slot_cur->status)) && !(SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) { debug("BEFORE POWER OFF COMMAND\n"); rc = power_off(slot_cur); if (rc) return rc; /* retval = slot_update(&slot_cur); * if (retval) * return retval; * ibmphp_update_slot_info(slot_cur); */ } } init_flag = 0; return 0; } /* This operation will check whether the slot is within the bounds and * the operation is valid to perform on that slot * Parameters: slot, operation * Returns: 0 or error codes */ static int validate(struct slot *slot_cur, int opn) { int number; int retval; if (!slot_cur) return -ENODEV; number = slot_cur->number; if ((number > max_slots) || (number < 0)) return -EBADSLT; debug("slot_number in validate is %d\n", slot_cur->number); retval = slot_update(&slot_cur); if (retval) return retval; switch (opn) { case ENABLE: if (!(SLOT_PWRGD(slot_cur->status)) && (SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) return 0; break; case DISABLE: if ((SLOT_PWRGD(slot_cur->status)) && (SLOT_PRESENT(slot_cur->status)) && !(SLOT_LATCH(slot_cur->status))) return 0; break; default: break; } err("validate failed....\n"); return -EINVAL; } /**************************************************************************** * This routine is for updating the data structures in the hotplug core * Parameters: struct slot * Returns: 0 or error ****************************************************************************/ int ibmphp_update_slot_info(struct slot *slot_cur) { struct pci_bus *bus = slot_cur->hotplug_slot.pci_slot->bus; u8 bus_speed; u8 mode; bus_speed = slot_cur->bus_on->current_speed; mode = slot_cur->bus_on->current_bus_mode; switch (bus_speed) { case BUS_SPEED_33: break; case BUS_SPEED_66: if (mode == BUS_MODE_PCIX) bus_speed += 0x01; else if (mode == BUS_MODE_PCI) ; else bus_speed = PCI_SPEED_UNKNOWN; break; case BUS_SPEED_100: case BUS_SPEED_133: bus_speed += 0x01; break; default: bus_speed = PCI_SPEED_UNKNOWN; } bus->cur_bus_speed = bus_speed; // To do: bus_names return 0; } /****************************************************************************** * This function will return the pci_func, given bus and devfunc, or NULL. It * is called from visit routines ******************************************************************************/ static struct pci_func *ibm_slot_find(u8 busno, u8 device, u8 function) { struct pci_func *func_cur; struct slot *slot_cur; list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) { if (slot_cur->func) { func_cur = slot_cur->func; while (func_cur) { if ((func_cur->busno == busno) && (func_cur->device == device) && (func_cur->function == function)) return func_cur; func_cur = func_cur->next; } } } return NULL; } /************************************************************* * This routine frees up memory used by struct slot, including * the pointers to pci_func, bus, hotplug_slot, controller, * and deregistering from the hotplug core *************************************************************/ static void free_slots(void) { struct slot *slot_cur, *next; debug("%s -- enter\n", __func__); list_for_each_entry_safe(slot_cur, next, &ibmphp_slot_head, ibm_slot_list) { pci_hp_del(&slot_cur->hotplug_slot); slot_cur->ctrl = NULL; slot_cur->bus_on = NULL; /* * We don't want to actually remove the resources, * since ibmphp_free_resources() will do just that. */ ibmphp_unconfigure_card(&slot_cur, -1); pci_hp_destroy(&slot_cur->hotplug_slot); kfree(slot_cur); } debug("%s -- exit\n", __func__); } static void ibm_unconfigure_device(struct pci_func *func) { struct pci_dev *temp; u8 j; debug("inside %s\n", __func__); debug("func->device = %x, func->function = %x\n", func->device, func->function); debug("func->device << 3 | 0x0 = %x\n", func->device << 3 | 0x0); pci_lock_rescan_remove(); for (j = 0; j < 0x08; j++) { temp = pci_get_domain_bus_and_slot(0, func->busno, (func->device << 3) | j); if (temp) { pci_stop_and_remove_bus_device(temp); pci_dev_put(temp); } } pci_dev_put(func->dev); pci_unlock_rescan_remove(); } /* * The following function is to fix kernel bug regarding * getting bus entries, here we manually add those primary * bus entries to kernel bus structure whenever apply */ static u8 bus_structure_fixup(u8 busno) { struct pci_bus *bus, *b; struct pci_dev *dev; u16 l; if (pci_find_bus(0, busno) || !(ibmphp_find_same_bus_num(busno))) return 1; bus = kmalloc(sizeof(*bus), GFP_KERNEL); if (!bus) return 1; dev = kmalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { kfree(bus); return 1; } bus->number = busno; bus->ops = ibmphp_pci_bus->ops; dev->bus = bus; for (dev->devfn = 0; dev->devfn < 256; dev->devfn += 8) { if (!pci_read_config_word(dev, PCI_VENDOR_ID, &l) && (l != 0x0000) && (l != 0xffff)) { debug("%s - Inside bus_structure_fixup()\n", __func__); b = pci_scan_bus(busno, ibmphp_pci_bus->ops, NULL); if (!b) continue; pci_bus_add_devices(b); break; } } kfree(dev); kfree(bus); return 0; } static int ibm_configure_device(struct pci_func *func) { struct pci_bus *child; int num; int flag = 0; /* this is to make sure we don't double scan the bus, for bridged devices primarily */ pci_lock_rescan_remove(); if (!(bus_structure_fixup(func->busno))) flag = 1; if (func->dev == NULL) func->dev = pci_get_domain_bus_and_slot(0, func->busno, PCI_DEVFN(func->device, func->function)); if (func->dev == NULL) { struct pci_bus *bus = pci_find_bus(0, func->busno); if (!bus) goto out; num = pci_scan_slot(bus, PCI_DEVFN(func->device, func->function)); if (num) pci_bus_add_devices(bus); func->dev = pci_get_domain_bus_and_slot(0, func->busno, PCI_DEVFN(func->device, func->function)); if (func->dev == NULL) { err("ERROR... : pci_dev still NULL\n"); goto out; } } if (!(flag) && (func->dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)) { pci_hp_add_bridge(func->dev); child = func->dev->subordinate; if (child) pci_bus_add_devices(child); } out: pci_unlock_rescan_remove(); return 0; } /******************************************************* * Returns whether the bus is empty or not *******************************************************/ static int is_bus_empty(struct slot *slot_cur) { int rc; struct slot *tmp_slot; u8 i = slot_cur->bus_on->slot_min; while (i <= slot_cur->bus_on->slot_max) { if (i == slot_cur->number) { i++; continue; } tmp_slot = ibmphp_get_slot_from_physical_num(i); if (!tmp_slot) return 0; rc = slot_update(&tmp_slot); if (rc) return 0; if (SLOT_PRESENT(tmp_slot->status) && SLOT_PWRGD(tmp_slot->status)) return 0; i++; } return 1; } /*********************************************************** * If the HPC permits and the bus currently empty, tries to set the * bus speed and mode at the maximum card and bus capability * Parameters: slot * Returns: bus is set (0) or error code ***********************************************************/ static int set_bus(struct slot *slot_cur) { int rc; u8 speed; u8 cmd = 0x0; int retval; static const struct pci_device_id ciobx[] = { { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, 0x0101) }, { }, }; debug("%s - entry slot # %d\n", __func__, slot_cur->number); if (SET_BUS_STATUS(slot_cur->ctrl) && is_bus_empty(slot_cur)) { rc = slot_update(&slot_cur); if (rc) return rc; speed = SLOT_SPEED(slot_cur->ext_status); debug("ext_status = %x, speed = %x\n", slot_cur->ext_status, speed); switch (speed) { case HPC_SLOT_SPEED_33: cmd = HPC_BUS_33CONVMODE; break; case HPC_SLOT_SPEED_66: if (SLOT_PCIX(slot_cur->ext_status)) { if ((slot_cur->supported_speed >= BUS_SPEED_66) && (slot_cur->supported_bus_mode == BUS_MODE_PCIX)) cmd = HPC_BUS_66PCIXMODE; else if (!SLOT_BUS_MODE(slot_cur->ext_status)) /* if max slot/bus capability is 66 pci and there's no bus mode mismatch, then the adapter supports 66 pci */ cmd = HPC_BUS_66CONVMODE; else cmd = HPC_BUS_33CONVMODE; } else { if (slot_cur->supported_speed >= BUS_SPEED_66) cmd = HPC_BUS_66CONVMODE; else cmd = HPC_BUS_33CONVMODE; } break; case HPC_SLOT_SPEED_133: switch (slot_cur->supported_speed) { case BUS_SPEED_33: cmd = HPC_BUS_33CONVMODE; break; case BUS_SPEED_66: if (slot_cur->supported_bus_mode == BUS_MODE_PCIX) cmd = HPC_BUS_66PCIXMODE; else cmd = HPC_BUS_66CONVMODE; break; case BUS_SPEED_100: cmd = HPC_BUS_100PCIXMODE; break; case BUS_SPEED_133: /* This is to take care of the bug in CIOBX chip */ if (pci_dev_present(ciobx)) ibmphp_hpc_writeslot(slot_cur, HPC_BUS_100PCIXMODE); cmd = HPC_BUS_133PCIXMODE; break; default: err("Wrong bus speed\n"); return -ENODEV; } break; default: err("wrong slot speed\n"); return -ENODEV; } debug("setting bus speed for slot %d, cmd %x\n", slot_cur->number, cmd); retval = ibmphp_hpc_writeslot(slot_cur, cmd); if (retval) { err("setting bus speed failed\n"); return retval; } if (CTLR_RESULT(slot_cur->ctrl->status)) { err("command not completed successfully in set_bus\n"); return -EIO; } } /* This is for x440, once Brandon fixes the firmware, will not need this delay */ msleep(1000); debug("%s -Exit\n", __func__); return 0; } /* This routine checks the bus limitations that the slot is on from the BIOS. * This is used in deciding whether or not to power up the slot. * (electrical/spec limitations. For example, >1 133 MHz or >2 66 PCI cards on * same bus) * Parameters: slot * Returns: 0 = no limitations, -EINVAL = exceeded limitations on the bus */ static int check_limitations(struct slot *slot_cur) { u8 i; struct slot *tmp_slot; u8 count = 0; u8 limitation = 0; for (i = slot_cur->bus_on->slot_min; i <= slot_cur->bus_on->slot_max; i++) { tmp_slot = ibmphp_get_slot_from_physical_num(i); if (!tmp_slot) return -ENODEV; if ((SLOT_PWRGD(tmp_slot->status)) && !(SLOT_CONNECT(tmp_slot->status))) count++; } get_cur_bus_info(&slot_cur); switch (slot_cur->bus_on->current_speed) { case BUS_SPEED_33: limitation = slot_cur->bus_on->slots_at_33_conv; break; case BUS_SPEED_66: if (slot_cur->bus_on->current_bus_mode == BUS_MODE_PCIX) limitation = slot_cur->bus_on->slots_at_66_pcix; else limitation = slot_cur->bus_on->slots_at_66_conv; break; case BUS_SPEED_100: limitation = slot_cur->bus_on->slots_at_100_pcix; break; case BUS_SPEED_133: limitation = slot_cur->bus_on->slots_at_133_pcix; break; } if ((count + 1) > limitation) return -EINVAL; return 0; } static inline void print_card_capability(struct slot *slot_cur) { info("capability of the card is "); if ((slot_cur->ext_status & CARD_INFO) == PCIX133) info(" 133 MHz PCI-X\n"); else if ((slot_cur->ext_status & CARD_INFO) == PCIX66) info(" 66 MHz PCI-X\n"); else if ((slot_cur->ext_status & CARD_INFO) == PCI66) info(" 66 MHz PCI\n"); else info(" 33 MHz PCI\n"); } /* This routine will power on the slot, configure the device(s) and find the * drivers for them. * Parameters: hotplug_slot * Returns: 0 or failure codes */ static int enable_slot(struct hotplug_slot *hs) { int rc, i, rcpr; struct slot *slot_cur; u8 function; struct pci_func *tmp_func; ibmphp_lock_operations(); debug("ENABLING SLOT........\n"); slot_cur = to_slot(hs); rc = validate(slot_cur, ENABLE); if (rc) { err("validate function failed\n"); goto error_nopower; } attn_LED_blink(slot_cur); rc = set_bus(slot_cur); if (rc) { err("was not able to set the bus\n"); goto error_nopower; } /*-----------------debugging------------------------------*/ get_cur_bus_info(&slot_cur); debug("the current bus speed right after set_bus = %x\n", slot_cur->bus_on->current_speed); /*----------------------------------------------------------*/ rc = check_limitations(slot_cur); if (rc) { err("Adding this card exceeds the limitations of this bus.\n"); err("(i.e., >1 133MHz cards running on same bus, or >2 66 PCI cards running on same bus.\n"); err("Try hot-adding into another bus\n"); rc = -EINVAL; goto error_nopower; } rc = power_on(slot_cur); if (rc) { err("something wrong when powering up... please see below for details\n"); /* need to turn off before on, otherwise, blinking overwrites */ attn_off(slot_cur); attn_on(slot_cur); if (slot_update(&slot_cur)) { attn_off(slot_cur); attn_on(slot_cur); rc = -ENODEV; goto exit; } /* Check to see the error of why it failed */ if ((SLOT_POWER(slot_cur->status)) && !(SLOT_PWRGD(slot_cur->status))) err("power fault occurred trying to power up\n"); else if (SLOT_BUS_SPEED(slot_cur->status)) { err("bus speed mismatch occurred. please check current bus speed and card capability\n"); print_card_capability(slot_cur); } else if (SLOT_BUS_MODE(slot_cur->ext_status)) { err("bus mode mismatch occurred. please check current bus mode and card capability\n"); print_card_capability(slot_cur); } ibmphp_update_slot_info(slot_cur); goto exit; } debug("after power_on\n"); /*-----------------------debugging---------------------------*/ get_cur_bus_info(&slot_cur); debug("the current bus speed right after power_on = %x\n", slot_cur->bus_on->current_speed); /*----------------------------------------------------------*/ rc = slot_update(&slot_cur); if (rc) goto error_power; rc = -EINVAL; if (SLOT_POWER(slot_cur->status) && !(SLOT_PWRGD(slot_cur->status))) { err("power fault occurred trying to power up...\n"); goto error_power; } if (SLOT_POWER(slot_cur->status) && (SLOT_BUS_SPEED(slot_cur->status))) { err("bus speed mismatch occurred. please check current bus speed and card capability\n"); print_card_capability(slot_cur); goto error_power; } /* Don't think this case will happen after above checks... * but just in case, for paranoia sake */ if (!(SLOT_POWER(slot_cur->status))) { err("power on failed...\n"); goto error_power; } slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL); if (!slot_cur->func) { /* do update_slot_info here? */ rc = -ENOMEM; goto error_power; } slot_cur->func->busno = slot_cur->bus; slot_cur->func->device = slot_cur->device; for (i = 0; i < 4; i++) slot_cur->func->irq[i] = slot_cur->irq[i]; debug("b4 configure_card, slot_cur->bus = %x, slot_cur->device = %x\n", slot_cur->bus, slot_cur->device); if (ibmphp_configure_card(slot_cur->func, slot_cur->number)) { err("configure_card was unsuccessful...\n"); /* true because don't need to actually deallocate resources, * just remove references */ ibmphp_unconfigure_card(&slot_cur, 1); debug("after unconfigure_card\n"); slot_cur->func = NULL; rc = -ENOMEM; goto error_power; } function = 0x00; do { tmp_func = ibm_slot_find(slot_cur->bus, slot_cur->func->device, function++); if (tmp_func && !(tmp_func->dev)) ibm_configure_device(tmp_func); } while (tmp_func); attn_off(slot_cur); if (slot_update(&slot_cur)) { rc = -EFAULT; goto exit; } ibmphp_print_test(); rc = ibmphp_update_slot_info(slot_cur); exit: ibmphp_unlock_operations(); return rc; error_nopower: attn_off(slot_cur); /* need to turn off if was blinking b4 */ attn_on(slot_cur); error_cont: rcpr = slot_update(&slot_cur); if (rcpr) { rc = rcpr; goto exit; } ibmphp_update_slot_info(slot_cur); goto exit; error_power: attn_off(slot_cur); /* need to turn off if was blinking b4 */ attn_on(slot_cur); rcpr = power_off(slot_cur); if (rcpr) { rc = rcpr; goto exit; } goto error_cont; } /************************************************************** * HOT REMOVING ADAPTER CARD * * INPUT: POINTER TO THE HOTPLUG SLOT STRUCTURE * * OUTPUT: SUCCESS 0 ; FAILURE: UNCONFIGURE , VALIDATE * * DISABLE POWER , * **************************************************************/ static int ibmphp_disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = to_slot(hotplug_slot); int rc; ibmphp_lock_operations(); rc = ibmphp_do_disable_slot(slot); ibmphp_unlock_operations(); return rc; } int ibmphp_do_disable_slot(struct slot *slot_cur) { int rc; u8 flag; debug("DISABLING SLOT...\n"); if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) return -ENODEV; flag = slot_cur->flag; slot_cur->flag = 1; if (flag == 1) { rc = validate(slot_cur, DISABLE); /* checking if powered off already & valid slot # */ if (rc) goto error; } attn_LED_blink(slot_cur); if (slot_cur->func == NULL) { /* We need this for functions that were there on bootup */ slot_cur->func = kzalloc(sizeof(struct pci_func), GFP_KERNEL); if (!slot_cur->func) { rc = -ENOMEM; goto error; } slot_cur->func->busno = slot_cur->bus; slot_cur->func->device = slot_cur->device; } ibm_unconfigure_device(slot_cur->func); /* * If we got here from latch suddenly opening on operating card or * a power fault, there's no power to the card, so cannot * read from it to determine what resources it occupied. This operation * is forbidden anyhow. The best we can do is remove it from kernel * lists at least */ if (!flag) { attn_off(slot_cur); return 0; } rc = ibmphp_unconfigure_card(&slot_cur, 0); slot_cur->func = NULL; debug("in disable_slot. after unconfigure_card\n"); if (rc) { err("could not unconfigure card.\n"); goto error; } rc = ibmphp_hpc_writeslot(slot_cur, HPC_SLOT_OFF); if (rc) goto error; attn_off(slot_cur); rc = slot_update(&slot_cur); if (rc) goto exit; rc = ibmphp_update_slot_info(slot_cur); ibmphp_print_test(); exit: return rc; error: /* Need to turn off if was blinking b4 */ attn_off(slot_cur); attn_on(slot_cur); if (slot_update(&slot_cur)) { rc = -EFAULT; goto exit; } if (flag) ibmphp_update_slot_info(slot_cur); goto exit; } const struct hotplug_slot_ops ibmphp_hotplug_slot_ops = { .set_attention_status = set_attention_status, .enable_slot = enable_slot, .disable_slot = ibmphp_disable_slot, .hardware_test = NULL, .get_power_status = get_power_status, .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_present, }; static void ibmphp_unload(void) { free_slots(); debug("after slots\n"); ibmphp_free_resources(); debug("after resources\n"); ibmphp_free_bus_info_queue(); debug("after bus info\n"); ibmphp_free_ebda_hpc_queue(); debug("after ebda hpc\n"); ibmphp_free_ebda_pci_rsrc_queue(); debug("after ebda pci rsrc\n"); kfree(ibmphp_pci_bus); } static int __init ibmphp_init(void) { struct pci_bus *bus; int i = 0; int rc = 0; init_flag = 1; info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); ibmphp_pci_bus = kmalloc(sizeof(*ibmphp_pci_bus), GFP_KERNEL); if (!ibmphp_pci_bus) { rc = -ENOMEM; goto exit; } bus = pci_find_bus(0, 0); if (!bus) { err("Can't find the root pci bus, can not continue\n"); rc = -ENODEV; goto error; } memcpy(ibmphp_pci_bus, bus, sizeof(*ibmphp_pci_bus)); ibmphp_debug = debug; for (i = 0; i < 16; i++) irqs[i] = 0; rc = ibmphp_access_ebda(); if (rc) goto error; debug("after ibmphp_access_ebda()\n"); rc = ibmphp_rsrc_init(); if (rc) goto error; debug("AFTER Resource & EBDA INITIALIZATIONS\n"); max_slots = get_max_slots(); rc = ibmphp_register_pci(); if (rc) goto error; if (init_ops()) { rc = -ENODEV; goto error; } ibmphp_print_test(); rc = ibmphp_hpc_start_poll_thread(); if (rc) goto error; exit: return rc; error: ibmphp_unload(); goto exit; } static void __exit ibmphp_exit(void) { ibmphp_hpc_stop_poll_thread(); debug("after polling\n"); ibmphp_unload(); debug("done\n"); } module_init(ibmphp_init); module_exit(ibmphp_exit);
linux-master
drivers/pci/hotplug/ibmphp_core.c
// SPDX-License-Identifier: GPL-2.0+ /* * Compaq Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/uaccess.h> #include "cpqphp.h" #include "cpqphp_nvram.h" #define ROM_INT15_PHY_ADDR 0x0FF859 #define READ_EV 0xD8A4 #define WRITE_EV 0xD8A5 struct register_foo { union { unsigned long lword; /* eax */ unsigned short word; /* ax */ struct { unsigned char low; /* al */ unsigned char high; /* ah */ } byte; } data; unsigned char opcode; /* see below */ unsigned long length; /* if the reg. is a pointer, how much data */ } __attribute__ ((packed)); struct all_reg { struct register_foo eax_reg; struct register_foo ebx_reg; struct register_foo ecx_reg; struct register_foo edx_reg; struct register_foo edi_reg; struct register_foo esi_reg; struct register_foo eflags_reg; } __attribute__ ((packed)); struct ev_hrt_header { u8 Version; u8 num_of_ctrl; u8 next; }; struct ev_hrt_ctrl { u8 bus; u8 device; u8 function; u8 mem_avail; u8 p_mem_avail; u8 io_avail; u8 bus_avail; u8 next; }; static u8 evbuffer_init; static u8 evbuffer_length; static u8 evbuffer[1024]; static void __iomem *compaq_int15_entry_point; /* lock for ordering int15_bios_call() */ static DEFINE_SPINLOCK(int15_lock); /* This is a series of function that deals with * setting & getting the hotplug resource table in some environment variable. */ /* * We really shouldn't be doing this unless there is a _very_ good reason to!!! * greg k-h */ static u32 add_byte(u32 **p_buffer, u8 value, u32 *used, u32 *avail) { u8 **tByte; if ((*used + 1) > *avail) return(1); *((u8 *)*p_buffer) = value; tByte = (u8 **)p_buffer; (*tByte)++; *used += 1; return(0); } static u32 add_dword(u32 **p_buffer, u32 value, u32 *used, u32 *avail) { if ((*used + 4) > *avail) return(1); **p_buffer = value; (*p_buffer)++; *used += 4; return(0); } /* * check_for_compaq_ROM * * this routine verifies that the ROM OEM string is 'COMPAQ' * * returns 0 for non-Compaq ROM, 1 for Compaq ROM */ static int check_for_compaq_ROM(void __iomem *rom_start) { u8 temp1, temp2, temp3, temp4, temp5, temp6; int result = 0; temp1 = readb(rom_start + 0xffea + 0); temp2 = readb(rom_start + 0xffea + 1); temp3 = readb(rom_start + 0xffea + 2); temp4 = readb(rom_start + 0xffea + 3); temp5 = readb(rom_start + 0xffea + 4); temp6 = readb(rom_start + 0xffea + 5); if ((temp1 == 'C') && (temp2 == 'O') && (temp3 == 'M') && (temp4 == 'P') && (temp5 == 'A') && (temp6 == 'Q')) { result = 1; } dbg("%s - returned %d\n", __func__, result); return result; } static u32 access_EV(u16 operation, u8 *ev_name, u8 *buffer, u32 *buf_size) { unsigned long flags; int op = operation; int ret_val; if (!compaq_int15_entry_point) return -ENODEV; spin_lock_irqsave(&int15_lock, flags); __asm__ ( "xorl %%ebx,%%ebx\n" \ "xorl %%edx,%%edx\n" \ "pushf\n" \ "push %%cs\n" \ "cli\n" \ "call *%6\n" : "=c" (*buf_size), "=a" (ret_val) : "a" (op), "c" (*buf_size), "S" (ev_name), "D" (buffer), "m" (compaq_int15_entry_point) : "%ebx", "%edx"); spin_unlock_irqrestore(&int15_lock, flags); return((ret_val & 0xFF00) >> 8); } /* * load_HRT * * Read the hot plug Resource Table from NVRAM */ static int load_HRT(void __iomem *rom_start) { u32 available; u32 temp_dword; u8 temp_byte = 0xFF; u32 rc; if (!check_for_compaq_ROM(rom_start)) return -ENODEV; available = 1024; /* Now load the EV */ temp_dword = available; rc = access_EV(READ_EV, "CQTHPS", evbuffer, &temp_dword); evbuffer_length = temp_dword; /* We're maintaining the resource lists so write FF to invalidate old * info */ temp_dword = 1; rc = access_EV(WRITE_EV, "CQTHPS", &temp_byte, &temp_dword); return rc; } /* * store_HRT * * Save the hot plug Resource Table in NVRAM */ static u32 store_HRT(void __iomem *rom_start) { u32 *buffer; u32 *pFill; u32 usedbytes; u32 available; u32 temp_dword; u32 rc; u8 loop; u8 numCtrl = 0; struct controller *ctrl; struct pci_resource *resNode; struct ev_hrt_header *p_EV_header; struct ev_hrt_ctrl *p_ev_ctrl; available = 1024; if (!check_for_compaq_ROM(rom_start)) return(1); buffer = (u32 *) evbuffer; if (!buffer) return(1); pFill = buffer; usedbytes = 0; p_EV_header = (struct ev_hrt_header *) pFill; ctrl = cpqhp_ctrl_list; /* The revision of this structure */ rc = add_byte(&pFill, 1 + ctrl->push_flag, &usedbytes, &available); if (rc) return(rc); /* The number of controllers */ rc = add_byte(&pFill, 1, &usedbytes, &available); if (rc) return(rc); while (ctrl) { p_ev_ctrl = (struct ev_hrt_ctrl *) pFill; numCtrl++; /* The bus number */ rc = add_byte(&pFill, ctrl->bus, &usedbytes, &available); if (rc) return(rc); /* The device Number */ rc = add_byte(&pFill, PCI_SLOT(ctrl->pci_dev->devfn), &usedbytes, &available); if (rc) return(rc); /* The function Number */ rc = add_byte(&pFill, PCI_FUNC(ctrl->pci_dev->devfn), &usedbytes, &available); if (rc) return(rc); /* Skip the number of available entries */ rc = add_dword(&pFill, 0, &usedbytes, &available); if (rc) return(rc); /* Figure out memory Available */ resNode = ctrl->mem_head; loop = 0; while (resNode) { loop++; /* base */ rc = add_dword(&pFill, resNode->base, &usedbytes, &available); if (rc) return(rc); /* length */ rc = add_dword(&pFill, resNode->length, &usedbytes, &available); if (rc) return(rc); resNode = resNode->next; } /* Fill in the number of entries */ p_ev_ctrl->mem_avail = loop; /* Figure out prefetchable memory Available */ resNode = ctrl->p_mem_head; loop = 0; while (resNode) { loop++; /* base */ rc = add_dword(&pFill, resNode->base, &usedbytes, &available); if (rc) return(rc); /* length */ rc = add_dword(&pFill, resNode->length, &usedbytes, &available); if (rc) return(rc); resNode = resNode->next; } /* Fill in the number of entries */ p_ev_ctrl->p_mem_avail = loop; /* Figure out IO Available */ resNode = ctrl->io_head; loop = 0; while (resNode) { loop++; /* base */ rc = add_dword(&pFill, resNode->base, &usedbytes, &available); if (rc) return(rc); /* length */ rc = add_dword(&pFill, resNode->length, &usedbytes, &available); if (rc) return(rc); resNode = resNode->next; } /* Fill in the number of entries */ p_ev_ctrl->io_avail = loop; /* Figure out bus Available */ resNode = ctrl->bus_head; loop = 0; while (resNode) { loop++; /* base */ rc = add_dword(&pFill, resNode->base, &usedbytes, &available); if (rc) return(rc); /* length */ rc = add_dword(&pFill, resNode->length, &usedbytes, &available); if (rc) return(rc); resNode = resNode->next; } /* Fill in the number of entries */ p_ev_ctrl->bus_avail = loop; ctrl = ctrl->next; } p_EV_header->num_of_ctrl = numCtrl; /* Now store the EV */ temp_dword = usedbytes; rc = access_EV(WRITE_EV, "CQTHPS", (u8 *) buffer, &temp_dword); dbg("usedbytes = 0x%x, length = 0x%x\n", usedbytes, temp_dword); evbuffer_length = temp_dword; if (rc) { err(msg_unable_to_save); return(1); } return(0); } void compaq_nvram_init(void __iomem *rom_start) { if (rom_start) compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); dbg("int15 entry = %p\n", compaq_int15_entry_point); } int compaq_nvram_load(void __iomem *rom_start, struct controller *ctrl) { u8 bus, device, function; u8 nummem, numpmem, numio, numbus; u32 rc; u8 *p_byte; struct pci_resource *mem_node; struct pci_resource *p_mem_node; struct pci_resource *io_node; struct pci_resource *bus_node; struct ev_hrt_ctrl *p_ev_ctrl; struct ev_hrt_header *p_EV_header; if (!evbuffer_init) { /* Read the resource list information in from NVRAM */ if (load_HRT(rom_start)) memset(evbuffer, 0, 1024); evbuffer_init = 1; } /* If we saved information in NVRAM, use it now */ p_EV_header = (struct ev_hrt_header *) evbuffer; /* The following code is for systems where version 1.0 of this * driver has been loaded, but doesn't support the hardware. * In that case, the driver would incorrectly store something * in NVRAM. */ if ((p_EV_header->Version == 2) || ((p_EV_header->Version == 1) && !ctrl->push_flag)) { p_byte = &(p_EV_header->next); p_ev_ctrl = (struct ev_hrt_ctrl *) &(p_EV_header->next); p_byte += 3; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) return 2; bus = p_ev_ctrl->bus; device = p_ev_ctrl->device; function = p_ev_ctrl->function; while ((bus != ctrl->bus) || (device != PCI_SLOT(ctrl->pci_dev->devfn)) || (function != PCI_FUNC(ctrl->pci_dev->devfn))) { nummem = p_ev_ctrl->mem_avail; numpmem = p_ev_ctrl->p_mem_avail; numio = p_ev_ctrl->io_avail; numbus = p_ev_ctrl->bus_avail; p_byte += 4; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) return 2; /* Skip forward to the next entry */ p_byte += (nummem + numpmem + numio + numbus) * 8; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) return 2; p_ev_ctrl = (struct ev_hrt_ctrl *) p_byte; p_byte += 3; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) return 2; bus = p_ev_ctrl->bus; device = p_ev_ctrl->device; function = p_ev_ctrl->function; } nummem = p_ev_ctrl->mem_avail; numpmem = p_ev_ctrl->p_mem_avail; numio = p_ev_ctrl->io_avail; numbus = p_ev_ctrl->bus_avail; p_byte += 4; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) return 2; while (nummem--) { mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL); if (!mem_node) break; mem_node->base = *(u32 *)p_byte; dbg("mem base = %8.8x\n", mem_node->base); p_byte += 4; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) { kfree(mem_node); return 2; } mem_node->length = *(u32 *)p_byte; dbg("mem length = %8.8x\n", mem_node->length); p_byte += 4; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) { kfree(mem_node); return 2; } mem_node->next = ctrl->mem_head; ctrl->mem_head = mem_node; } while (numpmem--) { p_mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL); if (!p_mem_node) break; p_mem_node->base = *(u32 *)p_byte; dbg("pre-mem base = %8.8x\n", p_mem_node->base); p_byte += 4; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) { kfree(p_mem_node); return 2; } p_mem_node->length = *(u32 *)p_byte; dbg("pre-mem length = %8.8x\n", p_mem_node->length); p_byte += 4; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) { kfree(p_mem_node); return 2; } p_mem_node->next = ctrl->p_mem_head; ctrl->p_mem_head = p_mem_node; } while (numio--) { io_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL); if (!io_node) break; io_node->base = *(u32 *)p_byte; dbg("io base = %8.8x\n", io_node->base); p_byte += 4; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) { kfree(io_node); return 2; } io_node->length = *(u32 *)p_byte; dbg("io length = %8.8x\n", io_node->length); p_byte += 4; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) { kfree(io_node); return 2; } io_node->next = ctrl->io_head; ctrl->io_head = io_node; } while (numbus--) { bus_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL); if (!bus_node) break; bus_node->base = *(u32 *)p_byte; p_byte += 4; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) { kfree(bus_node); return 2; } bus_node->length = *(u32 *)p_byte; p_byte += 4; if (p_byte > ((u8 *)p_EV_header + evbuffer_length)) { kfree(bus_node); return 2; } bus_node->next = ctrl->bus_head; ctrl->bus_head = bus_node; } /* If all of the following fail, we don't have any resources for * hot plug add */ rc = 1; rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->io_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); if (rc) return(rc); } else { if ((evbuffer[0] != 0) && (!ctrl->push_flag)) return 1; } return 0; } int compaq_nvram_store(void __iomem *rom_start) { int rc = 1; if (rom_start == NULL) return -ENODEV; if (evbuffer_init) { rc = store_HRT(rom_start); if (rc) err(msg_unable_to_save); } return rc; }
linux-master
drivers/pci/hotplug/cpqphp_nvram.c
// SPDX-License-Identifier: GPL-2.0+ /* * Compaq Hot Plug Controller Driver * * Copyright (c) 1995,2001 Compaq Computer Corporation * Copyright (c) 2001,2003 Greg Kroah-Hartman ([email protected]) * Copyright (c) 2001 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/pci.h> #include "shpchp.h" /* A few routines that create sysfs entries for the hot plug controller */ static ssize_t show_ctrl(struct device *dev, struct device_attribute *attr, char *buf) { struct pci_dev *pdev; struct resource *res; struct pci_bus *bus; size_t len = 0; int busnr; pdev = to_pci_dev(dev); bus = pdev->subordinate; len += sysfs_emit_at(buf, len, "Free resources: memory\n"); pci_bus_for_each_resource(bus, res) { if (res && (res->flags & IORESOURCE_MEM) && !(res->flags & IORESOURCE_PREFETCH)) { len += sysfs_emit_at(buf, len, "start = %8.8llx, length = %8.8llx\n", (unsigned long long)res->start, (unsigned long long)resource_size(res)); } } len += sysfs_emit_at(buf, len, "Free resources: prefetchable memory\n"); pci_bus_for_each_resource(bus, res) { if (res && (res->flags & IORESOURCE_MEM) && (res->flags & IORESOURCE_PREFETCH)) { len += sysfs_emit_at(buf, len, "start = %8.8llx, length = %8.8llx\n", (unsigned long long)res->start, (unsigned long long)resource_size(res)); } } len += sysfs_emit_at(buf, len, "Free resources: IO\n"); pci_bus_for_each_resource(bus, res) { if (res && (res->flags & IORESOURCE_IO)) { len += sysfs_emit_at(buf, len, "start = %8.8llx, length = %8.8llx\n", (unsigned long long)res->start, (unsigned long long)resource_size(res)); } } len += sysfs_emit_at(buf, len, "Free resources: bus numbers\n"); for (busnr = bus->busn_res.start; busnr <= bus->busn_res.end; busnr++) { if (!pci_find_bus(pci_domain_nr(bus), busnr)) break; } if (busnr < bus->busn_res.end) len += sysfs_emit_at(buf, len, "start = %8.8x, length = %8.8x\n", busnr, (int)(bus->busn_res.end - busnr)); return len; } static DEVICE_ATTR(ctrl, S_IRUGO, show_ctrl, NULL); int shpchp_create_ctrl_files(struct controller *ctrl) { return device_create_file(&ctrl->pci_dev->dev, &dev_attr_ctrl); } void shpchp_remove_ctrl_files(struct controller *ctrl) { device_remove_file(&ctrl->pci_dev->dev, &dev_attr_ctrl); }
linux-master
drivers/pci/hotplug/shpchp_sysfs.c
// SPDX-License-Identifier: GPL-2.0+ /* * cpcihp_generic.c * * Generic port I/O CompactPCI driver * * Copyright 2002 SOMA Networks, Inc. * Copyright 2001 Intel San Luis Obispo * Copyright 2000,2001 MontaVista Software Inc. * * This generic CompactPCI hotplug driver should allow using the PCI hotplug * mechanism on any CompactPCI board that exposes the #ENUM signal as a bit * in a system register that can be read through standard port I/O. * * Send feedback to <[email protected]> */ #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/string.h> #include "cpci_hotplug.h" #define DRIVER_VERSION "0.1" #define DRIVER_AUTHOR "Scott Murray <[email protected]>" #define DRIVER_DESC "Generic port I/O CompactPCI Hot Plug Driver" #if !defined(MODULE) #define MY_NAME "cpcihp_generic" #else #define MY_NAME THIS_MODULE->name #endif #define dbg(format, arg...) \ do { \ if (debug) \ printk(KERN_DEBUG "%s: " format "\n", \ MY_NAME, ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME, ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME, ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME, ## arg) /* local variables */ static bool debug; static char *bridge; static u8 bridge_busnr; static u8 bridge_slot; static struct pci_bus *bus; static u8 first_slot; static u8 last_slot; static u16 port; static unsigned int enum_bit; static u8 enum_mask; static struct cpci_hp_controller_ops generic_hpc_ops; static struct cpci_hp_controller generic_hpc; static int __init validate_parameters(void) { char *str; char *p; unsigned long tmp; if (!bridge) { info("not configured, disabling."); return -EINVAL; } str = bridge; if (!*str) return -EINVAL; tmp = simple_strtoul(str, &p, 16); if (p == str || tmp > 0xff) { err("Invalid hotplug bus bridge device bus number"); return -EINVAL; } bridge_busnr = (u8) tmp; dbg("bridge_busnr = 0x%02x", bridge_busnr); if (*p != ':') { err("Invalid hotplug bus bridge device"); return -EINVAL; } str = p + 1; tmp = simple_strtoul(str, &p, 16); if (p == str || tmp > 0x1f) { err("Invalid hotplug bus bridge device slot number"); return -EINVAL; } bridge_slot = (u8) tmp; dbg("bridge_slot = 0x%02x", bridge_slot); dbg("first_slot = 0x%02x", first_slot); dbg("last_slot = 0x%02x", last_slot); if (!(first_slot && last_slot)) { err("Need to specify first_slot and last_slot"); return -EINVAL; } if (last_slot < first_slot) { err("first_slot must be less than last_slot"); return -EINVAL; } dbg("port = 0x%04x", port); dbg("enum_bit = 0x%02x", enum_bit); if (enum_bit > 7) { err("Invalid #ENUM bit"); return -EINVAL; } enum_mask = 1 << enum_bit; return 0; } static int query_enum(void) { u8 value; value = inb_p(port); return ((value & enum_mask) == enum_mask); } static int __init cpcihp_generic_init(void) { int status; struct resource *r; struct pci_dev *dev; info(DRIVER_DESC " version: " DRIVER_VERSION); status = validate_parameters(); if (status) return status; r = request_region(port, 1, "#ENUM hotswap signal register"); if (!r) return -EBUSY; dev = pci_get_domain_bus_and_slot(0, bridge_busnr, PCI_DEVFN(bridge_slot, 0)); if (!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) { err("Invalid bridge device %s", bridge); pci_dev_put(dev); return -EINVAL; } bus = dev->subordinate; pci_dev_put(dev); memset(&generic_hpc, 0, sizeof(struct cpci_hp_controller)); generic_hpc_ops.query_enum = query_enum; generic_hpc.ops = &generic_hpc_ops; status = cpci_hp_register_controller(&generic_hpc); if (status != 0) { err("Could not register cPCI hotplug controller"); return -ENODEV; } dbg("registered controller"); status = cpci_hp_register_bus(bus, first_slot, last_slot); if (status != 0) { err("Could not register cPCI hotplug bus"); goto init_bus_register_error; } dbg("registered bus"); status = cpci_hp_start(); if (status != 0) { err("Could not started cPCI hotplug system"); goto init_start_error; } dbg("started cpci hp system"); return 0; init_start_error: cpci_hp_unregister_bus(bus); init_bus_register_error: cpci_hp_unregister_controller(&generic_hpc); err("status = %d", status); return status; } static void __exit cpcihp_generic_exit(void) { cpci_hp_stop(); cpci_hp_unregister_bus(bus); cpci_hp_unregister_controller(&generic_hpc); release_region(port, 1); } module_init(cpcihp_generic_init); module_exit(cpcihp_generic_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); module_param(bridge, charp, 0); MODULE_PARM_DESC(bridge, "Hotswap bus bridge device, <bus>:<slot> (bus and slot are in hexadecimal)"); module_param(first_slot, byte, 0); MODULE_PARM_DESC(first_slot, "Hotswap bus first slot number"); module_param(last_slot, byte, 0); MODULE_PARM_DESC(last_slot, "Hotswap bus last slot number"); module_param_hw(port, ushort, ioport, 0); MODULE_PARM_DESC(port, "#ENUM signal I/O port"); module_param(enum_bit, uint, 0); MODULE_PARM_DESC(enum_bit, "#ENUM signal bit (0-7)");
linux-master
drivers/pci/hotplug/cpcihp_generic.c
// SPDX-License-Identifier: GPL-2.0+ /* * IBM Hot Plug Controller Driver * * Written By: Jyoti Shah, IBM Corporation * * Copyright (C) 2001-2003 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * <[email protected]> * */ #include <linux/wait.h> #include <linux/time.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/kthread.h> #include "ibmphp.h" static int to_debug = 0; #define debug_polling(fmt, arg...) do { if (to_debug) debug(fmt, arg); } while (0) //---------------------------------------------------------------------------- // timeout values //---------------------------------------------------------------------------- #define CMD_COMPLETE_TOUT_SEC 60 // give HPC 60 sec to finish cmd #define HPC_CTLR_WORKING_TOUT 60 // give HPC 60 sec to finish cmd #define HPC_GETACCESS_TIMEOUT 60 // seconds #define POLL_INTERVAL_SEC 2 // poll HPC every 2 seconds #define POLL_LATCH_CNT 5 // poll latch 5 times, then poll slots //---------------------------------------------------------------------------- // Winnipeg Architected Register Offsets //---------------------------------------------------------------------------- #define WPG_I2CMBUFL_OFFSET 0x08 // I2C Message Buffer Low #define WPG_I2CMOSUP_OFFSET 0x10 // I2C Master Operation Setup Reg #define WPG_I2CMCNTL_OFFSET 0x20 // I2C Master Control Register #define WPG_I2CPARM_OFFSET 0x40 // I2C Parameter Register #define WPG_I2CSTAT_OFFSET 0x70 // I2C Status Register //---------------------------------------------------------------------------- // Winnipeg Store Type commands (Add this commands to the register offset) //---------------------------------------------------------------------------- #define WPG_I2C_AND 0x1000 // I2C AND operation #define WPG_I2C_OR 0x2000 // I2C OR operation //---------------------------------------------------------------------------- // Command set for I2C Master Operation Setup Register //---------------------------------------------------------------------------- #define WPG_READATADDR_MASK 0x00010000 // read,bytes,I2C shifted,index #define WPG_WRITEATADDR_MASK 0x40010000 // write,bytes,I2C shifted,index #define WPG_READDIRECT_MASK 0x10010000 #define WPG_WRITEDIRECT_MASK 0x60010000 //---------------------------------------------------------------------------- // bit masks for I2C Master Control Register //---------------------------------------------------------------------------- #define WPG_I2CMCNTL_STARTOP_MASK 0x00000002 // Start the Operation //---------------------------------------------------------------------------- // //---------------------------------------------------------------------------- #define WPG_I2C_IOREMAP_SIZE 0x2044 // size of linear address interval //---------------------------------------------------------------------------- // command index //---------------------------------------------------------------------------- #define WPG_1ST_SLOT_INDEX 0x01 // index - 1st slot for ctlr #define WPG_CTLR_INDEX 0x0F // index - ctlr #define WPG_1ST_EXTSLOT_INDEX 0x10 // index - 1st ext slot for ctlr #define WPG_1ST_BUS_INDEX 0x1F // index - 1st bus for ctlr //---------------------------------------------------------------------------- // macro utilities //---------------------------------------------------------------------------- // if bits 20,22,25,26,27,29,30 are OFF return 1 #define HPC_I2CSTATUS_CHECK(s) ((u8)((s & 0x00000A76) ? 0 : 1)) //---------------------------------------------------------------------------- // global variables //---------------------------------------------------------------------------- static DEFINE_MUTEX(sem_hpcaccess); // lock access to HPC static DEFINE_MUTEX(operations_mutex); // lock all operations and // access to data structures static DECLARE_COMPLETION(exit_complete); // make sure polling thread goes away static struct task_struct *ibmphp_poll_thread; //---------------------------------------------------------------------------- // local function prototypes //---------------------------------------------------------------------------- static u8 i2c_ctrl_read(struct controller *, void __iomem *, u8); static u8 i2c_ctrl_write(struct controller *, void __iomem *, u8, u8); static u8 hpc_writecmdtoindex(u8, u8); static u8 hpc_readcmdtoindex(u8, u8); static void get_hpc_access(void); static void free_hpc_access(void); static int poll_hpc(void *data); static int process_changeinstatus(struct slot *, struct slot *); static int process_changeinlatch(u8, u8, struct controller *); static int hpc_wait_ctlr_notworking(int, struct controller *, void __iomem *, u8 *); //---------------------------------------------------------------------------- /*---------------------------------------------------------------------- * Name: i2c_ctrl_read * * Action: read from HPC over I2C * *---------------------------------------------------------------------*/ static u8 i2c_ctrl_read(struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 index) { u8 status; int i; void __iomem *wpg_addr; // base addr + offset unsigned long wpg_data; // data to/from WPG LOHI format unsigned long ultemp; unsigned long data; // actual data HILO format debug_polling("%s - Entry WPGBbar[%p] index[%x] \n", __func__, WPGBbar, index); //-------------------------------------------------------------------- // READ - step 1 // read at address, byte length, I2C address (shifted), index // or read direct, byte length, index if (ctlr_ptr->ctlr_type == 0x02) { data = WPG_READATADDR_MASK; // fill in I2C address ultemp = (unsigned long)ctlr_ptr->u.wpeg_ctlr.i2c_addr; ultemp = ultemp >> 1; data |= (ultemp << 8); // fill in index data |= (unsigned long)index; } else if (ctlr_ptr->ctlr_type == 0x04) { data = WPG_READDIRECT_MASK; // fill in index ultemp = (unsigned long)index; ultemp = ultemp << 8; data |= ultemp; } else { err("this controller type is not supported \n"); return HPC_ERROR; } wpg_data = swab32(data); // swap data before writing wpg_addr = WPGBbar + WPG_I2CMOSUP_OFFSET; writel(wpg_data, wpg_addr); //-------------------------------------------------------------------- // READ - step 2 : clear the message buffer data = 0x00000000; wpg_data = swab32(data); wpg_addr = WPGBbar + WPG_I2CMBUFL_OFFSET; writel(wpg_data, wpg_addr); //-------------------------------------------------------------------- // READ - step 3 : issue start operation, I2C master control bit 30:ON // 2020 : [20] OR operation at [20] offset 0x20 data = WPG_I2CMCNTL_STARTOP_MASK; wpg_data = swab32(data); wpg_addr = WPGBbar + WPG_I2CMCNTL_OFFSET + WPG_I2C_OR; writel(wpg_data, wpg_addr); //-------------------------------------------------------------------- // READ - step 4 : wait until start operation bit clears i = CMD_COMPLETE_TOUT_SEC; while (i) { msleep(10); wpg_addr = WPGBbar + WPG_I2CMCNTL_OFFSET; wpg_data = readl(wpg_addr); data = swab32(wpg_data); if (!(data & WPG_I2CMCNTL_STARTOP_MASK)) break; i--; } if (i == 0) { debug("%s - Error : WPG timeout\n", __func__); return HPC_ERROR; } //-------------------------------------------------------------------- // READ - step 5 : read I2C status register i = CMD_COMPLETE_TOUT_SEC; while (i) { msleep(10); wpg_addr = WPGBbar + WPG_I2CSTAT_OFFSET; wpg_data = readl(wpg_addr); data = swab32(wpg_data); if (HPC_I2CSTATUS_CHECK(data)) break; i--; } if (i == 0) { debug("ctrl_read - Exit Error:I2C timeout\n"); return HPC_ERROR; } //-------------------------------------------------------------------- // READ - step 6 : get DATA wpg_addr = WPGBbar + WPG_I2CMBUFL_OFFSET; wpg_data = readl(wpg_addr); data = swab32(wpg_data); status = (u8) data; debug_polling("%s - Exit index[%x] status[%x]\n", __func__, index, status); return (status); } /*---------------------------------------------------------------------- * Name: i2c_ctrl_write * * Action: write to HPC over I2C * * Return 0 or error codes *---------------------------------------------------------------------*/ static u8 i2c_ctrl_write(struct controller *ctlr_ptr, void __iomem *WPGBbar, u8 index, u8 cmd) { u8 rc; void __iomem *wpg_addr; // base addr + offset unsigned long wpg_data; // data to/from WPG LOHI format unsigned long ultemp; unsigned long data; // actual data HILO format int i; debug_polling("%s - Entry WPGBbar[%p] index[%x] cmd[%x]\n", __func__, WPGBbar, index, cmd); rc = 0; //-------------------------------------------------------------------- // WRITE - step 1 // write at address, byte length, I2C address (shifted), index // or write direct, byte length, index data = 0x00000000; if (ctlr_ptr->ctlr_type == 0x02) { data = WPG_WRITEATADDR_MASK; // fill in I2C address ultemp = (unsigned long)ctlr_ptr->u.wpeg_ctlr.i2c_addr; ultemp = ultemp >> 1; data |= (ultemp << 8); // fill in index data |= (unsigned long)index; } else if (ctlr_ptr->ctlr_type == 0x04) { data = WPG_WRITEDIRECT_MASK; // fill in index ultemp = (unsigned long)index; ultemp = ultemp << 8; data |= ultemp; } else { err("this controller type is not supported \n"); return HPC_ERROR; } wpg_data = swab32(data); // swap data before writing wpg_addr = WPGBbar + WPG_I2CMOSUP_OFFSET; writel(wpg_data, wpg_addr); //-------------------------------------------------------------------- // WRITE - step 2 : clear the message buffer data = 0x00000000 | (unsigned long)cmd; wpg_data = swab32(data); wpg_addr = WPGBbar + WPG_I2CMBUFL_OFFSET; writel(wpg_data, wpg_addr); //-------------------------------------------------------------------- // WRITE - step 3 : issue start operation,I2C master control bit 30:ON // 2020 : [20] OR operation at [20] offset 0x20 data = WPG_I2CMCNTL_STARTOP_MASK; wpg_data = swab32(data); wpg_addr = WPGBbar + WPG_I2CMCNTL_OFFSET + WPG_I2C_OR; writel(wpg_data, wpg_addr); //-------------------------------------------------------------------- // WRITE - step 4 : wait until start operation bit clears i = CMD_COMPLETE_TOUT_SEC; while (i) { msleep(10); wpg_addr = WPGBbar + WPG_I2CMCNTL_OFFSET; wpg_data = readl(wpg_addr); data = swab32(wpg_data); if (!(data & WPG_I2CMCNTL_STARTOP_MASK)) break; i--; } if (i == 0) { debug("%s - Exit Error:WPG timeout\n", __func__); rc = HPC_ERROR; } //-------------------------------------------------------------------- // WRITE - step 5 : read I2C status register i = CMD_COMPLETE_TOUT_SEC; while (i) { msleep(10); wpg_addr = WPGBbar + WPG_I2CSTAT_OFFSET; wpg_data = readl(wpg_addr); data = swab32(wpg_data); if (HPC_I2CSTATUS_CHECK(data)) break; i--; } if (i == 0) { debug("ctrl_read - Error : I2C timeout\n"); rc = HPC_ERROR; } debug_polling("%s Exit rc[%x]\n", __func__, rc); return (rc); } //------------------------------------------------------------ // Read from ISA type HPC //------------------------------------------------------------ static u8 isa_ctrl_read(struct controller *ctlr_ptr, u8 offset) { u16 start_address; u8 data; start_address = ctlr_ptr->u.isa_ctlr.io_start; data = inb(start_address + offset); return data; } //-------------------------------------------------------------- // Write to ISA type HPC //-------------------------------------------------------------- static void isa_ctrl_write(struct controller *ctlr_ptr, u8 offset, u8 data) { u16 start_address; u16 port_address; start_address = ctlr_ptr->u.isa_ctlr.io_start; port_address = start_address + (u16) offset; outb(data, port_address); } static u8 pci_ctrl_read(struct controller *ctrl, u8 offset) { u8 data = 0x00; debug("inside pci_ctrl_read\n"); if (ctrl->ctrl_dev) pci_read_config_byte(ctrl->ctrl_dev, HPC_PCI_OFFSET + offset, &data); return data; } static u8 pci_ctrl_write(struct controller *ctrl, u8 offset, u8 data) { u8 rc = -ENODEV; debug("inside pci_ctrl_write\n"); if (ctrl->ctrl_dev) { pci_write_config_byte(ctrl->ctrl_dev, HPC_PCI_OFFSET + offset, data); rc = 0; } return rc; } static u8 ctrl_read(struct controller *ctlr, void __iomem *base, u8 offset) { u8 rc; switch (ctlr->ctlr_type) { case 0: rc = isa_ctrl_read(ctlr, offset); break; case 1: rc = pci_ctrl_read(ctlr, offset); break; case 2: case 4: rc = i2c_ctrl_read(ctlr, base, offset); break; default: return -ENODEV; } return rc; } static u8 ctrl_write(struct controller *ctlr, void __iomem *base, u8 offset, u8 data) { u8 rc = 0; switch (ctlr->ctlr_type) { case 0: isa_ctrl_write(ctlr, offset, data); break; case 1: rc = pci_ctrl_write(ctlr, offset, data); break; case 2: case 4: rc = i2c_ctrl_write(ctlr, base, offset, data); break; default: return -ENODEV; } return rc; } /*---------------------------------------------------------------------- * Name: hpc_writecmdtoindex() * * Action: convert a write command to proper index within a controller * * Return index, HPC_ERROR *---------------------------------------------------------------------*/ static u8 hpc_writecmdtoindex(u8 cmd, u8 index) { u8 rc; switch (cmd) { case HPC_CTLR_ENABLEIRQ: // 0x00.N.15 case HPC_CTLR_CLEARIRQ: // 0x06.N.15 case HPC_CTLR_RESET: // 0x07.N.15 case HPC_CTLR_IRQSTEER: // 0x08.N.15 case HPC_CTLR_DISABLEIRQ: // 0x01.N.15 case HPC_ALLSLOT_ON: // 0x11.N.15 case HPC_ALLSLOT_OFF: // 0x12.N.15 rc = 0x0F; break; case HPC_SLOT_OFF: // 0x02.Y.0-14 case HPC_SLOT_ON: // 0x03.Y.0-14 case HPC_SLOT_ATTNOFF: // 0x04.N.0-14 case HPC_SLOT_ATTNON: // 0x05.N.0-14 case HPC_SLOT_BLINKLED: // 0x13.N.0-14 rc = index; break; case HPC_BUS_33CONVMODE: case HPC_BUS_66CONVMODE: case HPC_BUS_66PCIXMODE: case HPC_BUS_100PCIXMODE: case HPC_BUS_133PCIXMODE: rc = index + WPG_1ST_BUS_INDEX - 1; break; default: err("hpc_writecmdtoindex - Error invalid cmd[%x]\n", cmd); rc = HPC_ERROR; } return rc; } /*---------------------------------------------------------------------- * Name: hpc_readcmdtoindex() * * Action: convert a read command to proper index within a controller * * Return index, HPC_ERROR *---------------------------------------------------------------------*/ static u8 hpc_readcmdtoindex(u8 cmd, u8 index) { u8 rc; switch (cmd) { case READ_CTLRSTATUS: rc = 0x0F; break; case READ_SLOTSTATUS: case READ_ALLSTAT: rc = index; break; case READ_EXTSLOTSTATUS: rc = index + WPG_1ST_EXTSLOT_INDEX; break; case READ_BUSSTATUS: rc = index + WPG_1ST_BUS_INDEX - 1; break; case READ_SLOTLATCHLOWREG: rc = 0x28; break; case READ_REVLEVEL: rc = 0x25; break; case READ_HPCOPTIONS: rc = 0x27; break; default: rc = HPC_ERROR; } return rc; } /*---------------------------------------------------------------------- * Name: HPCreadslot() * * Action: issue a READ command to HPC * * Input: pslot - cannot be NULL for READ_ALLSTAT * pstatus - can be NULL for READ_ALLSTAT * * Return 0 or error codes *---------------------------------------------------------------------*/ int ibmphp_hpc_readslot(struct slot *pslot, u8 cmd, u8 *pstatus) { void __iomem *wpg_bbar = NULL; struct controller *ctlr_ptr; u8 index, status; int rc = 0; int busindex; debug_polling("%s - Entry pslot[%p] cmd[%x] pstatus[%p]\n", __func__, pslot, cmd, pstatus); if ((pslot == NULL) || ((pstatus == NULL) && (cmd != READ_ALLSTAT) && (cmd != READ_BUSSTATUS))) { rc = -EINVAL; err("%s - Error invalid pointer, rc[%d]\n", __func__, rc); return rc; } if (cmd == READ_BUSSTATUS) { busindex = ibmphp_get_bus_index(pslot->bus); if (busindex < 0) { rc = -EINVAL; err("%s - Exit Error:invalid bus, rc[%d]\n", __func__, rc); return rc; } else index = (u8) busindex; } else index = pslot->ctlr_index; index = hpc_readcmdtoindex(cmd, index); if (index == HPC_ERROR) { rc = -EINVAL; err("%s - Exit Error:invalid index, rc[%d]\n", __func__, rc); return rc; } ctlr_ptr = pslot->ctrl; get_hpc_access(); //-------------------------------------------------------------------- // map physical address to logical address //-------------------------------------------------------------------- if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4)) wpg_bbar = ioremap(ctlr_ptr->u.wpeg_ctlr.wpegbbar, WPG_I2C_IOREMAP_SIZE); //-------------------------------------------------------------------- // check controller status before reading //-------------------------------------------------------------------- rc = hpc_wait_ctlr_notworking(HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar, &status); if (!rc) { switch (cmd) { case READ_ALLSTAT: // update the slot structure pslot->ctrl->status = status; pslot->status = ctrl_read(ctlr_ptr, wpg_bbar, index); rc = hpc_wait_ctlr_notworking(HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar, &status); if (!rc) pslot->ext_status = ctrl_read(ctlr_ptr, wpg_bbar, index + WPG_1ST_EXTSLOT_INDEX); break; case READ_SLOTSTATUS: // DO NOT update the slot structure *pstatus = ctrl_read(ctlr_ptr, wpg_bbar, index); break; case READ_EXTSLOTSTATUS: // DO NOT update the slot structure *pstatus = ctrl_read(ctlr_ptr, wpg_bbar, index); break; case READ_CTLRSTATUS: // DO NOT update the slot structure *pstatus = status; break; case READ_BUSSTATUS: pslot->busstatus = ctrl_read(ctlr_ptr, wpg_bbar, index); break; case READ_REVLEVEL: *pstatus = ctrl_read(ctlr_ptr, wpg_bbar, index); break; case READ_HPCOPTIONS: *pstatus = ctrl_read(ctlr_ptr, wpg_bbar, index); break; case READ_SLOTLATCHLOWREG: // DO NOT update the slot structure *pstatus = ctrl_read(ctlr_ptr, wpg_bbar, index); break; // Not used case READ_ALLSLOT: list_for_each_entry(pslot, &ibmphp_slot_head, ibm_slot_list) { index = pslot->ctlr_index; rc = hpc_wait_ctlr_notworking(HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar, &status); if (!rc) { pslot->status = ctrl_read(ctlr_ptr, wpg_bbar, index); rc = hpc_wait_ctlr_notworking(HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar, &status); if (!rc) pslot->ext_status = ctrl_read(ctlr_ptr, wpg_bbar, index + WPG_1ST_EXTSLOT_INDEX); } else { err("%s - Error ctrl_read failed\n", __func__); rc = -EINVAL; break; } } break; default: rc = -EINVAL; break; } } //-------------------------------------------------------------------- // cleanup //-------------------------------------------------------------------- // remove physical to logical address mapping if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4)) iounmap(wpg_bbar); free_hpc_access(); debug_polling("%s - Exit rc[%d]\n", __func__, rc); return rc; } /*---------------------------------------------------------------------- * Name: ibmphp_hpc_writeslot() * * Action: issue a WRITE command to HPC *---------------------------------------------------------------------*/ int ibmphp_hpc_writeslot(struct slot *pslot, u8 cmd) { void __iomem *wpg_bbar = NULL; struct controller *ctlr_ptr; u8 index, status; int busindex; u8 done; int rc = 0; int timeout; debug_polling("%s - Entry pslot[%p] cmd[%x]\n", __func__, pslot, cmd); if (pslot == NULL) { rc = -EINVAL; err("%s - Error Exit rc[%d]\n", __func__, rc); return rc; } if ((cmd == HPC_BUS_33CONVMODE) || (cmd == HPC_BUS_66CONVMODE) || (cmd == HPC_BUS_66PCIXMODE) || (cmd == HPC_BUS_100PCIXMODE) || (cmd == HPC_BUS_133PCIXMODE)) { busindex = ibmphp_get_bus_index(pslot->bus); if (busindex < 0) { rc = -EINVAL; err("%s - Exit Error:invalid bus, rc[%d]\n", __func__, rc); return rc; } else index = (u8) busindex; } else index = pslot->ctlr_index; index = hpc_writecmdtoindex(cmd, index); if (index == HPC_ERROR) { rc = -EINVAL; err("%s - Error Exit rc[%d]\n", __func__, rc); return rc; } ctlr_ptr = pslot->ctrl; get_hpc_access(); //-------------------------------------------------------------------- // map physical address to logical address //-------------------------------------------------------------------- if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4)) { wpg_bbar = ioremap(ctlr_ptr->u.wpeg_ctlr.wpegbbar, WPG_I2C_IOREMAP_SIZE); debug("%s - ctlr id[%x] physical[%lx] logical[%lx] i2c[%x]\n", __func__, ctlr_ptr->ctlr_id, (ulong) (ctlr_ptr->u.wpeg_ctlr.wpegbbar), (ulong) wpg_bbar, ctlr_ptr->u.wpeg_ctlr.i2c_addr); } //-------------------------------------------------------------------- // check controller status before writing //-------------------------------------------------------------------- rc = hpc_wait_ctlr_notworking(HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar, &status); if (!rc) { ctrl_write(ctlr_ptr, wpg_bbar, index, cmd); //-------------------------------------------------------------------- // check controller is still not working on the command //-------------------------------------------------------------------- timeout = CMD_COMPLETE_TOUT_SEC; done = 0; while (!done) { rc = hpc_wait_ctlr_notworking(HPC_CTLR_WORKING_TOUT, ctlr_ptr, wpg_bbar, &status); if (!rc) { if (NEEDTOCHECK_CMDSTATUS(cmd)) { if (CTLR_FINISHED(status) == HPC_CTLR_FINISHED_YES) done = 1; } else done = 1; } if (!done) { msleep(1000); if (timeout < 1) { done = 1; err("%s - Error command complete timeout\n", __func__); rc = -EFAULT; } else timeout--; } } ctlr_ptr->status = status; } // cleanup // remove physical to logical address mapping if ((ctlr_ptr->ctlr_type == 2) || (ctlr_ptr->ctlr_type == 4)) iounmap(wpg_bbar); free_hpc_access(); debug_polling("%s - Exit rc[%d]\n", __func__, rc); return rc; } /*---------------------------------------------------------------------- * Name: get_hpc_access() * * Action: make sure only one process can access HPC at one time *---------------------------------------------------------------------*/ static void get_hpc_access(void) { mutex_lock(&sem_hpcaccess); } /*---------------------------------------------------------------------- * Name: free_hpc_access() *---------------------------------------------------------------------*/ void free_hpc_access(void) { mutex_unlock(&sem_hpcaccess); } /*---------------------------------------------------------------------- * Name: ibmphp_lock_operations() * * Action: make sure only one process can change the data structure *---------------------------------------------------------------------*/ void ibmphp_lock_operations(void) { mutex_lock(&operations_mutex); to_debug = 1; } /*---------------------------------------------------------------------- * Name: ibmphp_unlock_operations() *---------------------------------------------------------------------*/ void ibmphp_unlock_operations(void) { debug("%s - Entry\n", __func__); mutex_unlock(&operations_mutex); to_debug = 0; debug("%s - Exit\n", __func__); } /*---------------------------------------------------------------------- * Name: poll_hpc() *---------------------------------------------------------------------*/ #define POLL_LATCH_REGISTER 0 #define POLL_SLOTS 1 #define POLL_SLEEP 2 static int poll_hpc(void *data) { struct slot myslot; struct slot *pslot = NULL; int rc; int poll_state = POLL_LATCH_REGISTER; u8 oldlatchlow = 0x00; u8 curlatchlow = 0x00; int poll_count = 0; u8 ctrl_count = 0x00; debug("%s - Entry\n", __func__); while (!kthread_should_stop()) { /* try to get the lock to do some kind of hardware access */ mutex_lock(&operations_mutex); switch (poll_state) { case POLL_LATCH_REGISTER: oldlatchlow = curlatchlow; ctrl_count = 0x00; list_for_each_entry(pslot, &ibmphp_slot_head, ibm_slot_list) { if (ctrl_count >= ibmphp_get_total_controllers()) break; if (pslot->ctrl->ctlr_relative_id == ctrl_count) { ctrl_count++; if (READ_SLOT_LATCH(pslot->ctrl)) { rc = ibmphp_hpc_readslot(pslot, READ_SLOTLATCHLOWREG, &curlatchlow); if (oldlatchlow != curlatchlow) process_changeinlatch(oldlatchlow, curlatchlow, pslot->ctrl); } } } ++poll_count; poll_state = POLL_SLEEP; break; case POLL_SLOTS: list_for_each_entry(pslot, &ibmphp_slot_head, ibm_slot_list) { // make a copy of the old status memcpy((void *) &myslot, (void *) pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_ALLSTAT, NULL); if ((myslot.status != pslot->status) || (myslot.ext_status != pslot->ext_status)) process_changeinstatus(pslot, &myslot); } ctrl_count = 0x00; list_for_each_entry(pslot, &ibmphp_slot_head, ibm_slot_list) { if (ctrl_count >= ibmphp_get_total_controllers()) break; if (pslot->ctrl->ctlr_relative_id == ctrl_count) { ctrl_count++; if (READ_SLOT_LATCH(pslot->ctrl)) rc = ibmphp_hpc_readslot(pslot, READ_SLOTLATCHLOWREG, &curlatchlow); } } ++poll_count; poll_state = POLL_SLEEP; break; case POLL_SLEEP: /* don't sleep with a lock on the hardware */ mutex_unlock(&operations_mutex); msleep(POLL_INTERVAL_SEC * 1000); if (kthread_should_stop()) goto out_sleep; mutex_lock(&operations_mutex); if (poll_count >= POLL_LATCH_CNT) { poll_count = 0; poll_state = POLL_SLOTS; } else poll_state = POLL_LATCH_REGISTER; break; } /* give up the hardware semaphore */ mutex_unlock(&operations_mutex); /* sleep for a short time just for good measure */ out_sleep: msleep(100); } complete(&exit_complete); debug("%s - Exit\n", __func__); return 0; } /*---------------------------------------------------------------------- * Name: process_changeinstatus * * Action: compare old and new slot status, process the change in status * * Input: pointer to slot struct, old slot struct * * Return 0 or error codes * Value: * * Side * Effects: None. * * Notes: *---------------------------------------------------------------------*/ static int process_changeinstatus(struct slot *pslot, struct slot *poldslot) { u8 status; int rc = 0; u8 disable = 0; u8 update = 0; debug("process_changeinstatus - Entry pslot[%p], poldslot[%p]\n", pslot, poldslot); // bit 0 - HPC_SLOT_POWER if ((pslot->status & 0x01) != (poldslot->status & 0x01)) update = 1; // bit 1 - HPC_SLOT_CONNECT // ignore // bit 2 - HPC_SLOT_ATTN if ((pslot->status & 0x04) != (poldslot->status & 0x04)) update = 1; // bit 3 - HPC_SLOT_PRSNT2 // bit 4 - HPC_SLOT_PRSNT1 if (((pslot->status & 0x08) != (poldslot->status & 0x08)) || ((pslot->status & 0x10) != (poldslot->status & 0x10))) update = 1; // bit 5 - HPC_SLOT_PWRGD if ((pslot->status & 0x20) != (poldslot->status & 0x20)) // OFF -> ON: ignore, ON -> OFF: disable slot if ((poldslot->status & 0x20) && (SLOT_CONNECT(poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT(poldslot->status))) disable = 1; // bit 6 - HPC_SLOT_BUS_SPEED // ignore // bit 7 - HPC_SLOT_LATCH if ((pslot->status & 0x80) != (poldslot->status & 0x80)) { update = 1; // OPEN -> CLOSE if (pslot->status & 0x80) { if (SLOT_PWRGD(pslot->status)) { // power goes on and off after closing latch // check again to make sure power is still ON msleep(1000); rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS, &status); if (SLOT_PWRGD(status)) update = 1; else // overwrite power in pslot to OFF pslot->status &= ~HPC_SLOT_POWER; } } // CLOSE -> OPEN else if ((SLOT_PWRGD(poldslot->status) == HPC_SLOT_PWRGD_GOOD) && (SLOT_CONNECT(poldslot->status) == HPC_SLOT_CONNECTED) && (SLOT_PRESENT(poldslot->status))) { disable = 1; } // else - ignore } // bit 4 - HPC_SLOT_BLINK_ATTN if ((pslot->ext_status & 0x08) != (poldslot->ext_status & 0x08)) update = 1; if (disable) { debug("process_changeinstatus - disable slot\n"); pslot->flag = 0; rc = ibmphp_do_disable_slot(pslot); } if (update || disable) ibmphp_update_slot_info(pslot); debug("%s - Exit rc[%d] disable[%x] update[%x]\n", __func__, rc, disable, update); return rc; } /*---------------------------------------------------------------------- * Name: process_changeinlatch * * Action: compare old and new latch reg status, process the change * * Input: old and current latch register status * * Return 0 or error codes * Value: *---------------------------------------------------------------------*/ static int process_changeinlatch(u8 old, u8 new, struct controller *ctrl) { struct slot myslot, *pslot; u8 i; u8 mask; int rc = 0; debug("%s - Entry old[%x], new[%x]\n", __func__, old, new); // bit 0 reserved, 0 is LSB, check bit 1-6 for 6 slots for (i = ctrl->starting_slot_num; i <= ctrl->ending_slot_num; i++) { mask = 0x01 << i; if ((mask & old) != (mask & new)) { pslot = ibmphp_get_slot_from_physical_num(i); if (pslot) { memcpy((void *) &myslot, (void *) pslot, sizeof(struct slot)); rc = ibmphp_hpc_readslot(pslot, READ_ALLSTAT, NULL); debug("%s - call process_changeinstatus for slot[%d]\n", __func__, i); process_changeinstatus(pslot, &myslot); } else { rc = -EINVAL; err("%s - Error bad pointer for slot[%d]\n", __func__, i); } } } debug("%s - Exit rc[%d]\n", __func__, rc); return rc; } /*---------------------------------------------------------------------- * Name: ibmphp_hpc_start_poll_thread * * Action: start polling thread *---------------------------------------------------------------------*/ int __init ibmphp_hpc_start_poll_thread(void) { debug("%s - Entry\n", __func__); ibmphp_poll_thread = kthread_run(poll_hpc, NULL, "hpc_poll"); if (IS_ERR(ibmphp_poll_thread)) { err("%s - Error, thread not started\n", __func__); return PTR_ERR(ibmphp_poll_thread); } return 0; } /*---------------------------------------------------------------------- * Name: ibmphp_hpc_stop_poll_thread * * Action: stop polling thread and cleanup *---------------------------------------------------------------------*/ void __exit ibmphp_hpc_stop_poll_thread(void) { debug("%s - Entry\n", __func__); kthread_stop(ibmphp_poll_thread); debug("before locking operations\n"); ibmphp_lock_operations(); debug("after locking operations\n"); // wait for poll thread to exit debug("before exit_complete down\n"); wait_for_completion(&exit_complete); debug("after exit_completion down\n"); // cleanup debug("before free_hpc_access\n"); free_hpc_access(); debug("after free_hpc_access\n"); ibmphp_unlock_operations(); debug("after unlock operations\n"); debug("%s - Exit\n", __func__); } /*---------------------------------------------------------------------- * Name: hpc_wait_ctlr_notworking * * Action: wait until the controller is in a not working state * * Return 0, HPC_ERROR * Value: *---------------------------------------------------------------------*/ static int hpc_wait_ctlr_notworking(int timeout, struct controller *ctlr_ptr, void __iomem *wpg_bbar, u8 *pstatus) { int rc = 0; u8 done = 0; debug_polling("hpc_wait_ctlr_notworking - Entry timeout[%d]\n", timeout); while (!done) { *pstatus = ctrl_read(ctlr_ptr, wpg_bbar, WPG_CTLR_INDEX); if (*pstatus == HPC_ERROR) { rc = HPC_ERROR; done = 1; } if (CTLR_WORKING(*pstatus) == HPC_CTLR_WORKING_NO) done = 1; if (!done) { msleep(1000); if (timeout < 1) { done = 1; err("HPCreadslot - Error ctlr timeout\n"); rc = HPC_ERROR; } else timeout--; } } debug_polling("hpc_wait_ctlr_notworking - Exit rc[%x] status[%x]\n", rc, *pstatus); return rc; }
linux-master
drivers/pci/hotplug/ibmphp_hpc.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCI Express Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * Send feedback to <[email protected]>, <[email protected]> * * Authors: * Dan Zink <[email protected]> * Greg Kroah-Hartman <[email protected]> * Dely Sy <[email protected]>" */ #define pr_fmt(fmt) "pciehp: " fmt #define dev_fmt pr_fmt #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/pci.h> #include "pciehp.h" #include "../pci.h" /* Global variables */ bool pciehp_poll_mode; int pciehp_poll_time; /* * not really modular, but the easiest way to keep compat with existing * bootargs behaviour is to continue using module_param here. */ module_param(pciehp_poll_mode, bool, 0644); module_param(pciehp_poll_time, int, 0644); MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); static int set_attention_status(struct hotplug_slot *slot, u8 value); static int get_power_status(struct hotplug_slot *slot, u8 *value); static int get_latch_status(struct hotplug_slot *slot, u8 *value); static int get_adapter_status(struct hotplug_slot *slot, u8 *value); static int init_slot(struct controller *ctrl) { struct hotplug_slot_ops *ops; char name[SLOT_NAME_SIZE]; int retval; /* Setup hotplug slot ops */ ops = kzalloc(sizeof(*ops), GFP_KERNEL); if (!ops) return -ENOMEM; ops->enable_slot = pciehp_sysfs_enable_slot; ops->disable_slot = pciehp_sysfs_disable_slot; ops->get_power_status = get_power_status; ops->get_adapter_status = get_adapter_status; ops->reset_slot = pciehp_reset_slot; if (MRL_SENS(ctrl)) ops->get_latch_status = get_latch_status; if (ATTN_LED(ctrl)) { ops->get_attention_status = pciehp_get_attention_status; ops->set_attention_status = set_attention_status; } else if (ctrl->pcie->port->hotplug_user_indicators) { ops->get_attention_status = pciehp_get_raw_indicator_status; ops->set_attention_status = pciehp_set_raw_indicator_status; } /* register this slot with the hotplug pci core */ ctrl->hotplug_slot.ops = ops; snprintf(name, SLOT_NAME_SIZE, "%u", PSN(ctrl)); retval = pci_hp_initialize(&ctrl->hotplug_slot, ctrl->pcie->port->subordinate, 0, name); if (retval) { ctrl_err(ctrl, "pci_hp_initialize failed: error %d\n", retval); kfree(ops); } return retval; } static void cleanup_slot(struct controller *ctrl) { struct hotplug_slot *hotplug_slot = &ctrl->hotplug_slot; pci_hp_destroy(hotplug_slot); kfree(hotplug_slot->ops); } /* * set_attention_status - Turns the Attention Indicator on, off or blinking */ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl->pcie->port; if (status) status <<= PCI_EXP_SLTCTL_ATTN_IND_SHIFT; else status = PCI_EXP_SLTCTL_ATTN_IND_OFF; pci_config_pm_runtime_get(pdev); pciehp_set_indicators(ctrl, INDICATOR_NOOP, status); pci_config_pm_runtime_put(pdev); return 0; } static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl->pcie->port; pci_config_pm_runtime_get(pdev); pciehp_get_power_status(ctrl, value); pci_config_pm_runtime_put(pdev); return 0; } static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl->pcie->port; pci_config_pm_runtime_get(pdev); pciehp_get_latch_status(ctrl, value); pci_config_pm_runtime_put(pdev); return 0; } static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct controller *ctrl = to_ctrl(hotplug_slot); struct pci_dev *pdev = ctrl->pcie->port; int ret; pci_config_pm_runtime_get(pdev); ret = pciehp_card_present_or_link_active(ctrl); pci_config_pm_runtime_put(pdev); if (ret < 0) return ret; *value = ret; return 0; } /** * pciehp_check_presence() - synthesize event if presence has changed * @ctrl: controller to check * * On probe and resume, an explicit presence check is necessary to bring up an * occupied slot or bring down an unoccupied slot. This can't be triggered by * events in the Slot Status register, they may be stale and are therefore * cleared. Secondly, sending an interrupt for "events that occur while * interrupt generation is disabled [when] interrupt generation is subsequently * enabled" is optional per PCIe r4.0, sec 6.7.3.4. */ static void pciehp_check_presence(struct controller *ctrl) { int occupied; down_read_nested(&ctrl->reset_lock, ctrl->depth); mutex_lock(&ctrl->state_lock); occupied = pciehp_card_present_or_link_active(ctrl); if ((occupied > 0 && (ctrl->state == OFF_STATE || ctrl->state == BLINKINGON_STATE)) || (!occupied && (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE))) pciehp_request(ctrl, PCI_EXP_SLTSTA_PDC); mutex_unlock(&ctrl->state_lock); up_read(&ctrl->reset_lock); } static int pciehp_probe(struct pcie_device *dev) { int rc; struct controller *ctrl; /* If this is not a "hotplug" service, we have no business here. */ if (dev->service != PCIE_PORT_SERVICE_HP) return -ENODEV; if (!dev->port->subordinate) { /* Can happen if we run out of bus numbers during probe */ pci_err(dev->port, "Hotplug bridge without secondary bus, ignoring\n"); return -ENODEV; } ctrl = pcie_init(dev); if (!ctrl) { pci_err(dev->port, "Controller initialization failed\n"); return -ENODEV; } set_service_data(dev, ctrl); /* Setup the slot information structures */ rc = init_slot(ctrl); if (rc) { if (rc == -EBUSY) ctrl_warn(ctrl, "Slot already registered by another hotplug driver\n"); else ctrl_err(ctrl, "Slot initialization failed (%d)\n", rc); goto err_out_release_ctlr; } /* Enable events after we have setup the data structures */ rc = pcie_init_notification(ctrl); if (rc) { ctrl_err(ctrl, "Notification initialization failed (%d)\n", rc); goto err_out_free_ctrl_slot; } /* Publish to user space */ rc = pci_hp_add(&ctrl->hotplug_slot); if (rc) { ctrl_err(ctrl, "Publication to user space failed (%d)\n", rc); goto err_out_shutdown_notification; } pciehp_check_presence(ctrl); return 0; err_out_shutdown_notification: pcie_shutdown_notification(ctrl); err_out_free_ctrl_slot: cleanup_slot(ctrl); err_out_release_ctlr: pciehp_release_ctrl(ctrl); return -ENODEV; } static void pciehp_remove(struct pcie_device *dev) { struct controller *ctrl = get_service_data(dev); pci_hp_del(&ctrl->hotplug_slot); pcie_shutdown_notification(ctrl); cleanup_slot(ctrl); pciehp_release_ctrl(ctrl); } #ifdef CONFIG_PM static bool pme_is_native(struct pcie_device *dev) { const struct pci_host_bridge *host; host = pci_find_host_bridge(dev->port->bus); return pcie_ports_native || host->native_pme; } static void pciehp_disable_interrupt(struct pcie_device *dev) { /* * Disable hotplug interrupt so that it does not trigger * immediately when the downstream link goes down. */ if (pme_is_native(dev)) pcie_disable_interrupt(get_service_data(dev)); } #ifdef CONFIG_PM_SLEEP static int pciehp_suspend(struct pcie_device *dev) { /* * If the port is already runtime suspended we can keep it that * way. */ if (dev_pm_skip_suspend(&dev->port->dev)) return 0; pciehp_disable_interrupt(dev); return 0; } static int pciehp_resume_noirq(struct pcie_device *dev) { struct controller *ctrl = get_service_data(dev); /* pci_restore_state() just wrote to the Slot Control register */ ctrl->cmd_started = jiffies; ctrl->cmd_busy = true; /* clear spurious events from rediscovery of inserted card */ if (ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE) pcie_clear_hotplug_events(ctrl); return 0; } #endif static int pciehp_resume(struct pcie_device *dev) { struct controller *ctrl = get_service_data(dev); if (pme_is_native(dev)) pcie_enable_interrupt(ctrl); pciehp_check_presence(ctrl); return 0; } static int pciehp_runtime_suspend(struct pcie_device *dev) { pciehp_disable_interrupt(dev); return 0; } static int pciehp_runtime_resume(struct pcie_device *dev) { struct controller *ctrl = get_service_data(dev); /* pci_restore_state() just wrote to the Slot Control register */ ctrl->cmd_started = jiffies; ctrl->cmd_busy = true; /* clear spurious events from rediscovery of inserted card */ if ((ctrl->state == ON_STATE || ctrl->state == BLINKINGOFF_STATE) && pme_is_native(dev)) pcie_clear_hotplug_events(ctrl); return pciehp_resume(dev); } #endif /* PM */ static struct pcie_port_service_driver hpdriver_portdrv = { .name = "pciehp", .port_type = PCIE_ANY_PORT, .service = PCIE_PORT_SERVICE_HP, .probe = pciehp_probe, .remove = pciehp_remove, #ifdef CONFIG_PM #ifdef CONFIG_PM_SLEEP .suspend = pciehp_suspend, .resume_noirq = pciehp_resume_noirq, .resume = pciehp_resume, #endif .runtime_suspend = pciehp_runtime_suspend, .runtime_resume = pciehp_runtime_resume, #endif /* PM */ .slot_reset = pciehp_slot_reset, }; int __init pcie_hp_init(void) { int retval = 0; retval = pcie_port_service_register(&hpdriver_portdrv); pr_debug("pcie_port_service_register = %d\n", retval); if (retval) pr_debug("Failure to register service\n"); return retval; }
linux-master
drivers/pci/hotplug/pciehp_core.c
// SPDX-License-Identifier: GPL-2.0+ /* * Compaq Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/proc_fs.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include "../pci.h" #include "cpqphp.h" #include "cpqphp_nvram.h" u8 cpqhp_nic_irq; u8 cpqhp_disk_irq; static u16 unused_IRQ; /* * detect_HRT_floating_pointer * * find the Hot Plug Resource Table in the specified region of memory. * */ static void __iomem *detect_HRT_floating_pointer(void __iomem *begin, void __iomem *end) { void __iomem *fp; void __iomem *endp; u8 temp1, temp2, temp3, temp4; int status = 0; endp = (end - sizeof(struct hrt) + 1); for (fp = begin; fp <= endp; fp += 16) { temp1 = readb(fp + SIG0); temp2 = readb(fp + SIG1); temp3 = readb(fp + SIG2); temp4 = readb(fp + SIG3); if (temp1 == '$' && temp2 == 'H' && temp3 == 'R' && temp4 == 'T') { status = 1; break; } } if (!status) fp = NULL; dbg("Discovered Hotplug Resource Table at %p\n", fp); return fp; } int cpqhp_configure_device(struct controller *ctrl, struct pci_func *func) { struct pci_bus *child; int num; pci_lock_rescan_remove(); if (func->pci_dev == NULL) func->pci_dev = pci_get_domain_bus_and_slot(0, func->bus, PCI_DEVFN(func->device, func->function)); /* No pci device, we need to create it then */ if (func->pci_dev == NULL) { dbg("INFO: pci_dev still null\n"); num = pci_scan_slot(ctrl->pci_dev->bus, PCI_DEVFN(func->device, func->function)); if (num) pci_bus_add_devices(ctrl->pci_dev->bus); func->pci_dev = pci_get_domain_bus_and_slot(0, func->bus, PCI_DEVFN(func->device, func->function)); if (func->pci_dev == NULL) { dbg("ERROR: pci_dev still null\n"); goto out; } } if (func->pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { pci_hp_add_bridge(func->pci_dev); child = func->pci_dev->subordinate; if (child) pci_bus_add_devices(child); } pci_dev_put(func->pci_dev); out: pci_unlock_rescan_remove(); return 0; } int cpqhp_unconfigure_device(struct pci_func *func) { int j; dbg("%s: bus/dev/func = %x/%x/%x\n", __func__, func->bus, func->device, func->function); pci_lock_rescan_remove(); for (j = 0; j < 8 ; j++) { struct pci_dev *temp = pci_get_domain_bus_and_slot(0, func->bus, PCI_DEVFN(func->device, j)); if (temp) { pci_dev_put(temp); pci_stop_and_remove_bus_device(temp); } } pci_unlock_rescan_remove(); return 0; } static int PCI_RefinedAccessConfig(struct pci_bus *bus, unsigned int devfn, u8 offset, u32 *value) { u32 vendID = 0; if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &vendID) == -1) return -1; if (vendID == 0xffffffff) return -1; return pci_bus_read_config_dword(bus, devfn, offset, value); } /* * cpqhp_set_irq * * @bus_num: bus number of PCI device * @dev_num: device number of PCI device * @slot: pointer to u8 where slot number will be returned */ int cpqhp_set_irq(u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num) { int rc = 0; if (cpqhp_legacy_mode) { struct pci_dev *fakedev; struct pci_bus *fakebus; u16 temp_word; fakedev = kmalloc(sizeof(*fakedev), GFP_KERNEL); fakebus = kmalloc(sizeof(*fakebus), GFP_KERNEL); if (!fakedev || !fakebus) { kfree(fakedev); kfree(fakebus); return -ENOMEM; } fakedev->devfn = dev_num << 3; fakedev->bus = fakebus; fakebus->number = bus_num; dbg("%s: dev %d, bus %d, pin %d, num %d\n", __func__, dev_num, bus_num, int_pin, irq_num); rc = pcibios_set_irq_routing(fakedev, int_pin - 1, irq_num); kfree(fakedev); kfree(fakebus); dbg("%s: rc %d\n", __func__, rc); if (!rc) return !rc; /* set the Edge Level Control Register (ELCR) */ temp_word = inb(0x4d0); temp_word |= inb(0x4d1) << 8; temp_word |= 0x01 << irq_num; /* This should only be for x86 as it sets the Edge Level * Control Register */ outb((u8)(temp_word & 0xFF), 0x4d0); outb((u8)((temp_word & 0xFF00) >> 8), 0x4d1); rc = 0; } return rc; } static int PCI_ScanBusForNonBridge(struct controller *ctrl, u8 bus_num, u8 *dev_num) { u16 tdevice; u32 work; u8 tbus; ctrl->pci_bus->number = bus_num; for (tdevice = 0; tdevice < 0xFF; tdevice++) { /* Scan for access first */ if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) continue; dbg("Looking for nonbridge bus_num %d dev_num %d\n", bus_num, tdevice); /* Yep we got one. Not a bridge ? */ if ((work >> 8) != PCI_TO_PCI_BRIDGE_CLASS) { *dev_num = tdevice; dbg("found it !\n"); return 0; } } for (tdevice = 0; tdevice < 0xFF; tdevice++) { /* Scan for access first */ if (PCI_RefinedAccessConfig(ctrl->pci_bus, tdevice, 0x08, &work) == -1) continue; dbg("Looking for bridge bus_num %d dev_num %d\n", bus_num, tdevice); /* Yep we got one. bridge ? */ if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(tdevice, 0), PCI_SECONDARY_BUS, &tbus); /* XXX: no recursion, wtf? */ dbg("Recurse on bus_num %d tdevice %d\n", tbus, tdevice); return 0; } } return -1; } static int PCI_GetBusDevHelper(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot, u8 nobridge) { int loop, len; u32 work; u8 tbus, tdevice, tslot; len = cpqhp_routing_table_length(); for (loop = 0; loop < len; ++loop) { tbus = cpqhp_routing_table->slots[loop].bus; tdevice = cpqhp_routing_table->slots[loop].devfn; tslot = cpqhp_routing_table->slots[loop].slot; if (tslot == slot) { *bus_num = tbus; *dev_num = tdevice; ctrl->pci_bus->number = tbus; pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_VENDOR_ID, &work); if (!nobridge || (work == 0xffffffff)) return 0; dbg("bus_num %d devfn %d\n", *bus_num, *dev_num); pci_bus_read_config_dword(ctrl->pci_bus, *dev_num, PCI_CLASS_REVISION, &work); dbg("work >> 8 (%x) = BRIDGE (%x)\n", work >> 8, PCI_TO_PCI_BRIDGE_CLASS); if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { pci_bus_read_config_byte(ctrl->pci_bus, *dev_num, PCI_SECONDARY_BUS, &tbus); dbg("Scan bus for Non Bridge: bus %d\n", tbus); if (PCI_ScanBusForNonBridge(ctrl, tbus, dev_num) == 0) { *bus_num = tbus; return 0; } } else return 0; } } return -1; } int cpqhp_get_bus_dev(struct controller *ctrl, u8 *bus_num, u8 *dev_num, u8 slot) { /* plain (bridges allowed) */ return PCI_GetBusDevHelper(ctrl, bus_num, dev_num, slot, 0); } /* More PCI configuration routines; this time centered around hotplug * controller */ /* * cpqhp_save_config * * Reads configuration for all slots in a PCI bus and saves info. * * Note: For non-hot plug buses, the slot # saved is the device # * * returns 0 if success */ int cpqhp_save_config(struct controller *ctrl, int busnumber, int is_hot_plug) { long rc; u8 class_code; u8 header_type; u32 ID; u8 secondary_bus; struct pci_func *new_slot; int sub_bus; int FirstSupported; int LastSupported; int max_functions; int function; u8 DevError; int device = 0; int cloop = 0; int stop_it; int index; u16 devfn; /* Decide which slots are supported */ if (is_hot_plug) { /* * is_hot_plug is the slot mask */ FirstSupported = is_hot_plug >> 4; LastSupported = FirstSupported + (is_hot_plug & 0x0F) - 1; } else { FirstSupported = 0; LastSupported = 0x1F; } /* Save PCI configuration space for all devices in supported slots */ ctrl->pci_bus->number = busnumber; for (device = FirstSupported; device <= LastSupported; device++) { ID = 0xFFFFFFFF; rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID); if (ID == 0xFFFFFFFF) { if (is_hot_plug) { /* Setup slot structure with entry for empty * slot */ new_slot = cpqhp_slot_create(busnumber); if (new_slot == NULL) return 1; new_slot->bus = (u8) busnumber; new_slot->device = (u8) device; new_slot->function = 0; new_slot->is_a_board = 0; new_slot->presence_save = 0; new_slot->switch_save = 0; } continue; } rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), 0x0B, &class_code); if (rc) return rc; rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, 0), PCI_HEADER_TYPE, &header_type); if (rc) return rc; /* If multi-function device, set max_functions to 8 */ if (header_type & 0x80) max_functions = 8; else max_functions = 1; function = 0; do { DevError = 0; if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* Recurse the subordinate bus * get the subordinate bus number */ rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_SECONDARY_BUS, &secondary_bus); if (rc) { return rc; } else { sub_bus = (int) secondary_bus; /* Save secondary bus cfg spc * with this recursive call. */ rc = cpqhp_save_config(ctrl, sub_bus, 0); if (rc) return rc; ctrl->pci_bus->number = busnumber; } } index = 0; new_slot = cpqhp_slot_find(busnumber, device, index++); while (new_slot && (new_slot->function != (u8) function)) new_slot = cpqhp_slot_find(busnumber, device, index++); if (!new_slot) { /* Setup slot structure. */ new_slot = cpqhp_slot_create(busnumber); if (new_slot == NULL) return 1; } new_slot->bus = (u8) busnumber; new_slot->device = (u8) device; new_slot->function = (u8) function; new_slot->is_a_board = 1; new_slot->switch_save = 0x10; /* In case of unsupported board */ new_slot->status = DevError; devfn = (new_slot->device << 3) | new_slot->function; new_slot->pci_dev = pci_get_domain_bus_and_slot(0, new_slot->bus, devfn); for (cloop = 0; cloop < 0x20; cloop++) { rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), cloop << 2, (u32 *) &(new_slot->config_space[cloop])); if (rc) return rc; } pci_dev_put(new_slot->pci_dev); function++; stop_it = 0; /* this loop skips to the next present function * reading in Class Code and Header type. */ while ((function < max_functions) && (!stop_it)) { rc = pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_VENDOR_ID, &ID); if (ID == 0xFFFFFFFF) { function++; continue; } rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), 0x0B, &class_code); if (rc) return rc; rc = pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(device, function), PCI_HEADER_TYPE, &header_type); if (rc) return rc; stop_it++; } } while (function < max_functions); } /* End of FOR loop */ return 0; } /* * cpqhp_save_slot_config * * Saves configuration info for all PCI devices in a given slot * including subordinate buses. * * returns 0 if success */ int cpqhp_save_slot_config(struct controller *ctrl, struct pci_func *new_slot) { long rc; u8 class_code; u8 header_type; u32 ID; u8 secondary_bus; int sub_bus; int max_functions; int function = 0; int cloop; int stop_it; ID = 0xFFFFFFFF; ctrl->pci_bus->number = new_slot->bus; pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_VENDOR_ID, &ID); if (ID == 0xFFFFFFFF) return 2; pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), 0x0B, &class_code); pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, 0), PCI_HEADER_TYPE, &header_type); if (header_type & 0x80) /* Multi-function device */ max_functions = 8; else max_functions = 1; while (function < max_functions) { if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* Recurse the subordinate bus */ pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_SECONDARY_BUS, &secondary_bus); sub_bus = (int) secondary_bus; /* Save the config headers for the secondary * bus. */ rc = cpqhp_save_config(ctrl, sub_bus, 0); if (rc) return(rc); ctrl->pci_bus->number = new_slot->bus; } new_slot->status = 0; for (cloop = 0; cloop < 0x20; cloop++) pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), cloop << 2, (u32 *) &(new_slot->config_space[cloop])); function++; stop_it = 0; /* this loop skips to the next present function * reading in the Class Code and the Header type. */ while ((function < max_functions) && (!stop_it)) { pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_VENDOR_ID, &ID); if (ID == 0xFFFFFFFF) function++; else { pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), 0x0B, &class_code); pci_bus_read_config_byte(ctrl->pci_bus, PCI_DEVFN(new_slot->device, function), PCI_HEADER_TYPE, &header_type); stop_it++; } } } return 0; } /* * cpqhp_save_base_addr_length * * Saves the length of all base address registers for the * specified slot. this is for hot plug REPLACE * * returns 0 if success */ int cpqhp_save_base_addr_length(struct controller *ctrl, struct pci_func *func) { u8 cloop; u8 header_type; u8 secondary_bus; u8 type; int sub_bus; u32 temp_register; u32 base; u32 rc; struct pci_func *next; int index = 0; struct pci_bus *pci_bus = ctrl->pci_bus; unsigned int devfn; func = cpqhp_slot_find(func->bus, func->device, index++); while (func != NULL) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Check for Bridge */ pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type); if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); sub_bus = (int) secondary_bus; next = cpqhp_slot_list[sub_bus]; while (next != NULL) { rc = cpqhp_save_base_addr_length(ctrl, next); if (rc) return rc; next = next->next; } pci_bus->number = func->bus; /* FIXME: this loop is duplicated in the non-bridge * case. The two could be rolled together Figure out * IO and memory base lengths */ for (cloop = 0x10; cloop <= 0x14; cloop += 4) { temp_register = 0xFFFFFFFF; pci_bus_write_config_dword(pci_bus, devfn, cloop, temp_register); pci_bus_read_config_dword(pci_bus, devfn, cloop, &base); /* If this register is implemented */ if (base) { if (base & 0x01L) { /* IO base * set base = amount of IO space * requested */ base = base & 0xFFFFFFFE; base = (~base) + 1; type = 1; } else { /* memory base */ base = base & 0xFFFFFFF0; base = (~base) + 1; type = 0; } } else { base = 0x0L; type = 0; } /* Save information in slot structure */ func->base_length[(cloop - 0x10) >> 2] = base; func->base_type[(cloop - 0x10) >> 2] = type; } /* End of base register loop */ } else if ((header_type & 0x7F) == 0x00) { /* Figure out IO and memory base lengths */ for (cloop = 0x10; cloop <= 0x24; cloop += 4) { temp_register = 0xFFFFFFFF; pci_bus_write_config_dword(pci_bus, devfn, cloop, temp_register); pci_bus_read_config_dword(pci_bus, devfn, cloop, &base); /* If this register is implemented */ if (base) { if (base & 0x01L) { /* IO base * base = amount of IO space * requested */ base = base & 0xFFFFFFFE; base = (~base) + 1; type = 1; } else { /* memory base * base = amount of memory * space requested */ base = base & 0xFFFFFFF0; base = (~base) + 1; type = 0; } } else { base = 0x0L; type = 0; } /* Save information in slot structure */ func->base_length[(cloop - 0x10) >> 2] = base; func->base_type[(cloop - 0x10) >> 2] = type; } /* End of base register loop */ } else { /* Some other unknown header type */ } /* find the next device in this slot */ func = cpqhp_slot_find(func->bus, func->device, index++); } return(0); } /* * cpqhp_save_used_resources * * Stores used resource information for existing boards. this is * for boards that were in the system when this driver was loaded. * this function is for hot plug ADD * * returns 0 if success */ int cpqhp_save_used_resources(struct controller *ctrl, struct pci_func *func) { u8 cloop; u8 header_type; u8 secondary_bus; u8 temp_byte; u8 b_base; u8 b_length; u16 command; u16 save_command; u16 w_base; u16 w_length; u32 temp_register; u32 save_base; u32 base; int index = 0; struct pci_resource *mem_node; struct pci_resource *p_mem_node; struct pci_resource *io_node; struct pci_resource *bus_node; struct pci_bus *pci_bus = ctrl->pci_bus; unsigned int devfn; func = cpqhp_slot_find(func->bus, func->device, index++); while ((func != NULL) && func->is_a_board) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Save the command register */ pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command); /* disable card */ command = 0x00; pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command); /* Check for Bridge */ pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type); if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* Clear Bridge Control Register */ command = 0x00; pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command); pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); pci_bus_read_config_byte(pci_bus, devfn, PCI_SUBORDINATE_BUS, &temp_byte); bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL); if (!bus_node) return -ENOMEM; bus_node->base = secondary_bus; bus_node->length = temp_byte - secondary_bus + 1; bus_node->next = func->bus_head; func->bus_head = bus_node; /* Save IO base and Limit registers */ pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &b_base); pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &b_length); if ((b_base <= b_length) && (save_command & 0x01)) { io_node = kmalloc(sizeof(*io_node), GFP_KERNEL); if (!io_node) return -ENOMEM; io_node->base = (b_base & 0xF0) << 8; io_node->length = (b_length - b_base + 0x10) << 8; io_node->next = func->io_head; func->io_head = io_node; } /* Save memory base and Limit registers */ pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base); pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length); if ((w_base <= w_length) && (save_command & 0x02)) { mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL); if (!mem_node) return -ENOMEM; mem_node->base = w_base << 16; mem_node->length = (w_length - w_base + 0x10) << 16; mem_node->next = func->mem_head; func->mem_head = mem_node; } /* Save prefetchable memory base and Limit registers */ pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base); pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length); if ((w_base <= w_length) && (save_command & 0x02)) { p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL); if (!p_mem_node) return -ENOMEM; p_mem_node->base = w_base << 16; p_mem_node->length = (w_length - w_base + 0x10) << 16; p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } /* Figure out IO and memory base lengths */ for (cloop = 0x10; cloop <= 0x14; cloop += 4) { pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base); temp_register = 0xFFFFFFFF; pci_bus_write_config_dword(pci_bus, devfn, cloop, temp_register); pci_bus_read_config_dword(pci_bus, devfn, cloop, &base); temp_register = base; /* If this register is implemented */ if (base) { if (((base & 0x03L) == 0x01) && (save_command & 0x01)) { /* IO base * set temp_register = amount * of IO space requested */ temp_register = base & 0xFFFFFFFE; temp_register = (~temp_register) + 1; io_node = kmalloc(sizeof(*io_node), GFP_KERNEL); if (!io_node) return -ENOMEM; io_node->base = save_base & (~0x03L); io_node->length = temp_register; io_node->next = func->io_head; func->io_head = io_node; } else if (((base & 0x0BL) == 0x08) && (save_command & 0x02)) { /* prefetchable memory base */ temp_register = base & 0xFFFFFFF0; temp_register = (~temp_register) + 1; p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL); if (!p_mem_node) return -ENOMEM; p_mem_node->base = save_base & (~0x0FL); p_mem_node->length = temp_register; p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } else if (((base & 0x0BL) == 0x00) && (save_command & 0x02)) { /* prefetchable memory base */ temp_register = base & 0xFFFFFFF0; temp_register = (~temp_register) + 1; mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL); if (!mem_node) return -ENOMEM; mem_node->base = save_base & (~0x0FL); mem_node->length = temp_register; mem_node->next = func->mem_head; func->mem_head = mem_node; } else return(1); } } /* End of base register loop */ /* Standard header */ } else if ((header_type & 0x7F) == 0x00) { /* Figure out IO and memory base lengths */ for (cloop = 0x10; cloop <= 0x24; cloop += 4) { pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base); temp_register = 0xFFFFFFFF; pci_bus_write_config_dword(pci_bus, devfn, cloop, temp_register); pci_bus_read_config_dword(pci_bus, devfn, cloop, &base); temp_register = base; /* If this register is implemented */ if (base) { if (((base & 0x03L) == 0x01) && (save_command & 0x01)) { /* IO base * set temp_register = amount * of IO space requested */ temp_register = base & 0xFFFFFFFE; temp_register = (~temp_register) + 1; io_node = kmalloc(sizeof(*io_node), GFP_KERNEL); if (!io_node) return -ENOMEM; io_node->base = save_base & (~0x01L); io_node->length = temp_register; io_node->next = func->io_head; func->io_head = io_node; } else if (((base & 0x0BL) == 0x08) && (save_command & 0x02)) { /* prefetchable memory base */ temp_register = base & 0xFFFFFFF0; temp_register = (~temp_register) + 1; p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL); if (!p_mem_node) return -ENOMEM; p_mem_node->base = save_base & (~0x0FL); p_mem_node->length = temp_register; p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } else if (((base & 0x0BL) == 0x00) && (save_command & 0x02)) { /* prefetchable memory base */ temp_register = base & 0xFFFFFFF0; temp_register = (~temp_register) + 1; mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL); if (!mem_node) return -ENOMEM; mem_node->base = save_base & (~0x0FL); mem_node->length = temp_register; mem_node->next = func->mem_head; func->mem_head = mem_node; } else return(1); } } /* End of base register loop */ } /* find the next device in this slot */ func = cpqhp_slot_find(func->bus, func->device, index++); } return 0; } /* * cpqhp_configure_board * * Copies saved configuration information to one slot. * this is called recursively for bridge devices. * this is for hot plug REPLACE! * * returns 0 if success */ int cpqhp_configure_board(struct controller *ctrl, struct pci_func *func) { int cloop; u8 header_type; u8 secondary_bus; int sub_bus; struct pci_func *next; u32 temp; u32 rc; int index = 0; struct pci_bus *pci_bus = ctrl->pci_bus; unsigned int devfn; func = cpqhp_slot_find(func->bus, func->device, index++); while (func != NULL) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); /* Start at the top of config space so that the control * registers are programmed last */ for (cloop = 0x3C; cloop > 0; cloop -= 4) pci_bus_write_config_dword(pci_bus, devfn, cloop, func->config_space[cloop >> 2]); pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type); /* If this is a bridge device, restore subordinate devices */ if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus); sub_bus = (int) secondary_bus; next = cpqhp_slot_list[sub_bus]; while (next != NULL) { rc = cpqhp_configure_board(ctrl, next); if (rc) return rc; next = next->next; } } else { /* Check all the base Address Registers to make sure * they are the same. If not, the board is different. */ for (cloop = 16; cloop < 40; cloop += 4) { pci_bus_read_config_dword(pci_bus, devfn, cloop, &temp); if (temp != func->config_space[cloop >> 2]) { dbg("Config space compare failure!!! offset = %x\n", cloop); dbg("bus = %x, device = %x, function = %x\n", func->bus, func->device, func->function); dbg("temp = %x, config space = %x\n\n", temp, func->config_space[cloop >> 2]); return 1; } } } func->configured = 1; func = cpqhp_slot_find(func->bus, func->device, index++); } return 0; } /* * cpqhp_valid_replace * * this function checks to see if a board is the same as the * one it is replacing. this check will detect if the device's * vendor or device id's are the same * * returns 0 if the board is the same nonzero otherwise */ int cpqhp_valid_replace(struct controller *ctrl, struct pci_func *func) { u8 cloop; u8 header_type; u8 secondary_bus; u8 type; u32 temp_register = 0; u32 base; u32 rc; struct pci_func *next; int index = 0; struct pci_bus *pci_bus = ctrl->pci_bus; unsigned int devfn; if (!func->is_a_board) return(ADD_NOT_SUPPORTED); func = cpqhp_slot_find(func->bus, func->device, index++); while (func != NULL) { pci_bus->number = func->bus; devfn = PCI_DEVFN(func->device, func->function); pci_bus_read_config_dword(pci_bus, devfn, PCI_VENDOR_ID, &temp_register); /* No adapter present */ if (temp_register == 0xFFFFFFFF) return(NO_ADAPTER_PRESENT); if (temp_register != func->config_space[0]) return(ADAPTER_NOT_SAME); /* Check for same revision number and class code */ pci_bus_read_config_dword(pci_bus, devfn, PCI_CLASS_REVISION, &temp_register); /* Adapter not the same */ if (temp_register != func->config_space[0x08 >> 2]) return(ADAPTER_NOT_SAME); /* Check for Bridge */ pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type); if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* In order to continue checking, we must program the * bus registers in the bridge to respond to accesses * for its subordinate bus(es) */ temp_register = func->config_space[0x18 >> 2]; pci_bus_write_config_dword(pci_bus, devfn, PCI_PRIMARY_BUS, temp_register); secondary_bus = (temp_register >> 8) & 0xFF; next = cpqhp_slot_list[secondary_bus]; while (next != NULL) { rc = cpqhp_valid_replace(ctrl, next); if (rc) return rc; next = next->next; } } /* Check to see if it is a standard config header */ else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) { /* Check subsystem vendor and ID */ pci_bus_read_config_dword(pci_bus, devfn, PCI_SUBSYSTEM_VENDOR_ID, &temp_register); if (temp_register != func->config_space[0x2C >> 2]) { /* If it's a SMART-2 and the register isn't * filled in, ignore the difference because * they just have an old rev of the firmware */ if (!((func->config_space[0] == 0xAE100E11) && (temp_register == 0x00L))) return(ADAPTER_NOT_SAME); } /* Figure out IO and memory base lengths */ for (cloop = 0x10; cloop <= 0x24; cloop += 4) { temp_register = 0xFFFFFFFF; pci_bus_write_config_dword(pci_bus, devfn, cloop, temp_register); pci_bus_read_config_dword(pci_bus, devfn, cloop, &base); /* If this register is implemented */ if (base) { if (base & 0x01L) { /* IO base * set base = amount of IO * space requested */ base = base & 0xFFFFFFFE; base = (~base) + 1; type = 1; } else { /* memory base */ base = base & 0xFFFFFFF0; base = (~base) + 1; type = 0; } } else { base = 0x0L; type = 0; } /* Check information in slot structure */ if (func->base_length[(cloop - 0x10) >> 2] != base) return(ADAPTER_NOT_SAME); if (func->base_type[(cloop - 0x10) >> 2] != type) return(ADAPTER_NOT_SAME); } /* End of base register loop */ } /* End of (type 0 config space) else */ else { /* this is not a type 0 or 1 config space header so * we don't know how to do it */ return(DEVICE_TYPE_NOT_SUPPORTED); } /* Get the next function */ func = cpqhp_slot_find(func->bus, func->device, index++); } return 0; } /* * cpqhp_find_available_resources * * Finds available memory, IO, and IRQ resources for programming * devices which may be added to the system * this function is for hot plug ADD! * * returns 0 if success */ int cpqhp_find_available_resources(struct controller *ctrl, void __iomem *rom_start) { u8 temp; u8 populated_slot; u8 bridged_slot; void __iomem *one_slot; void __iomem *rom_resource_table; struct pci_func *func = NULL; int i = 10, index; u32 temp_dword, rc; struct pci_resource *mem_node; struct pci_resource *p_mem_node; struct pci_resource *io_node; struct pci_resource *bus_node; rom_resource_table = detect_HRT_floating_pointer(rom_start, rom_start+0xffff); dbg("rom_resource_table = %p\n", rom_resource_table); if (rom_resource_table == NULL) return -ENODEV; /* Sum all resources and setup resource maps */ unused_IRQ = readl(rom_resource_table + UNUSED_IRQ); dbg("unused_IRQ = %x\n", unused_IRQ); temp = 0; while (unused_IRQ) { if (unused_IRQ & 1) { cpqhp_disk_irq = temp; break; } unused_IRQ = unused_IRQ >> 1; temp++; } dbg("cpqhp_disk_irq= %d\n", cpqhp_disk_irq); unused_IRQ = unused_IRQ >> 1; temp++; while (unused_IRQ) { if (unused_IRQ & 1) { cpqhp_nic_irq = temp; break; } unused_IRQ = unused_IRQ >> 1; temp++; } dbg("cpqhp_nic_irq= %d\n", cpqhp_nic_irq); unused_IRQ = readl(rom_resource_table + PCIIRQ); temp = 0; if (!cpqhp_nic_irq) cpqhp_nic_irq = ctrl->cfgspc_irq; if (!cpqhp_disk_irq) cpqhp_disk_irq = ctrl->cfgspc_irq; dbg("cpqhp_disk_irq, cpqhp_nic_irq= %d, %d\n", cpqhp_disk_irq, cpqhp_nic_irq); rc = compaq_nvram_load(rom_start, ctrl); if (rc) return rc; one_slot = rom_resource_table + sizeof(struct hrt); i = readb(rom_resource_table + NUMBER_OF_ENTRIES); dbg("number_of_entries = %d\n", i); if (!readb(one_slot + SECONDARY_BUS)) return 1; dbg("dev|IO base|length|Mem base|length|Pre base|length|PB SB MB\n"); while (i && readb(one_slot + SECONDARY_BUS)) { u8 dev_func = readb(one_slot + DEV_FUNC); u8 primary_bus = readb(one_slot + PRIMARY_BUS); u8 secondary_bus = readb(one_slot + SECONDARY_BUS); u8 max_bus = readb(one_slot + MAX_BUS); u16 io_base = readw(one_slot + IO_BASE); u16 io_length = readw(one_slot + IO_LENGTH); u16 mem_base = readw(one_slot + MEM_BASE); u16 mem_length = readw(one_slot + MEM_LENGTH); u16 pre_mem_base = readw(one_slot + PRE_MEM_BASE); u16 pre_mem_length = readw(one_slot + PRE_MEM_LENGTH); dbg("%2.2x | %4.4x | %4.4x | %4.4x | %4.4x | %4.4x | %4.4x |%2.2x %2.2x %2.2x\n", dev_func, io_base, io_length, mem_base, mem_length, pre_mem_base, pre_mem_length, primary_bus, secondary_bus, max_bus); /* If this entry isn't for our controller's bus, ignore it */ if (primary_bus != ctrl->bus) { i--; one_slot += sizeof(struct slot_rt); continue; } /* find out if this entry is for an occupied slot */ ctrl->pci_bus->number = primary_bus; pci_bus_read_config_dword(ctrl->pci_bus, dev_func, PCI_VENDOR_ID, &temp_dword); dbg("temp_D_word = %x\n", temp_dword); if (temp_dword != 0xFFFFFFFF) { index = 0; func = cpqhp_slot_find(primary_bus, dev_func >> 3, 0); while (func && (func->function != (dev_func & 0x07))) { dbg("func = %p (bus, dev, fun) = (%d, %d, %d)\n", func, primary_bus, dev_func >> 3, index); func = cpqhp_slot_find(primary_bus, dev_func >> 3, index++); } /* If we can't find a match, skip this table entry */ if (!func) { i--; one_slot += sizeof(struct slot_rt); continue; } /* this may not work and shouldn't be used */ if (secondary_bus != primary_bus) bridged_slot = 1; else bridged_slot = 0; populated_slot = 1; } else { populated_slot = 0; bridged_slot = 0; } /* If we've got a valid IO base, use it */ temp_dword = io_base + io_length; if ((io_base) && (temp_dword < 0x10000)) { io_node = kmalloc(sizeof(*io_node), GFP_KERNEL); if (!io_node) return -ENOMEM; io_node->base = io_base; io_node->length = io_length; dbg("found io_node(base, length) = %x, %x\n", io_node->base, io_node->length); dbg("populated slot =%d \n", populated_slot); if (!populated_slot) { io_node->next = ctrl->io_head; ctrl->io_head = io_node; } else { io_node->next = func->io_head; func->io_head = io_node; } } /* If we've got a valid memory base, use it */ temp_dword = mem_base + mem_length; if ((mem_base) && (temp_dword < 0x10000)) { mem_node = kmalloc(sizeof(*mem_node), GFP_KERNEL); if (!mem_node) return -ENOMEM; mem_node->base = mem_base << 16; mem_node->length = mem_length << 16; dbg("found mem_node(base, length) = %x, %x\n", mem_node->base, mem_node->length); dbg("populated slot =%d \n", populated_slot); if (!populated_slot) { mem_node->next = ctrl->mem_head; ctrl->mem_head = mem_node; } else { mem_node->next = func->mem_head; func->mem_head = mem_node; } } /* If we've got a valid prefetchable memory base, and * the base + length isn't greater than 0xFFFF */ temp_dword = pre_mem_base + pre_mem_length; if ((pre_mem_base) && (temp_dword < 0x10000)) { p_mem_node = kmalloc(sizeof(*p_mem_node), GFP_KERNEL); if (!p_mem_node) return -ENOMEM; p_mem_node->base = pre_mem_base << 16; p_mem_node->length = pre_mem_length << 16; dbg("found p_mem_node(base, length) = %x, %x\n", p_mem_node->base, p_mem_node->length); dbg("populated slot =%d \n", populated_slot); if (!populated_slot) { p_mem_node->next = ctrl->p_mem_head; ctrl->p_mem_head = p_mem_node; } else { p_mem_node->next = func->p_mem_head; func->p_mem_head = p_mem_node; } } /* If we've got a valid bus number, use it * The second condition is to ignore bus numbers on * populated slots that don't have PCI-PCI bridges */ if (secondary_bus && (secondary_bus != primary_bus)) { bus_node = kmalloc(sizeof(*bus_node), GFP_KERNEL); if (!bus_node) return -ENOMEM; bus_node->base = secondary_bus; bus_node->length = max_bus - secondary_bus + 1; dbg("found bus_node(base, length) = %x, %x\n", bus_node->base, bus_node->length); dbg("populated slot =%d \n", populated_slot); if (!populated_slot) { bus_node->next = ctrl->bus_head; ctrl->bus_head = bus_node; } else { bus_node->next = func->bus_head; func->bus_head = bus_node; } } i--; one_slot += sizeof(struct slot_rt); } /* If all of the following fail, we don't have any resources for * hot plug add */ rc = 1; rc &= cpqhp_resource_sort_and_combine(&(ctrl->mem_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->p_mem_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->io_head)); rc &= cpqhp_resource_sort_and_combine(&(ctrl->bus_head)); return rc; } /* * cpqhp_return_board_resources * * this routine returns all resources allocated to a board to * the available pool. * * returns 0 if success */ int cpqhp_return_board_resources(struct pci_func *func, struct resource_lists *resources) { int rc = 0; struct pci_resource *node; struct pci_resource *t_node; dbg("%s\n", __func__); if (!func) return 1; node = func->io_head; func->io_head = NULL; while (node) { t_node = node->next; return_resource(&(resources->io_head), node); node = t_node; } node = func->mem_head; func->mem_head = NULL; while (node) { t_node = node->next; return_resource(&(resources->mem_head), node); node = t_node; } node = func->p_mem_head; func->p_mem_head = NULL; while (node) { t_node = node->next; return_resource(&(resources->p_mem_head), node); node = t_node; } node = func->bus_head; func->bus_head = NULL; while (node) { t_node = node->next; return_resource(&(resources->bus_head), node); node = t_node; } rc |= cpqhp_resource_sort_and_combine(&(resources->mem_head)); rc |= cpqhp_resource_sort_and_combine(&(resources->p_mem_head)); rc |= cpqhp_resource_sort_and_combine(&(resources->io_head)); rc |= cpqhp_resource_sort_and_combine(&(resources->bus_head)); return rc; } /* * cpqhp_destroy_resource_list * * Puts node back in the resource list pointed to by head */ void cpqhp_destroy_resource_list(struct resource_lists *resources) { struct pci_resource *res, *tres; res = resources->io_head; resources->io_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } res = resources->mem_head; resources->mem_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } res = resources->p_mem_head; resources->p_mem_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } res = resources->bus_head; resources->bus_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } } /* * cpqhp_destroy_board_resources * * Puts node back in the resource list pointed to by head */ void cpqhp_destroy_board_resources(struct pci_func *func) { struct pci_resource *res, *tres; res = func->io_head; func->io_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } res = func->mem_head; func->mem_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } res = func->p_mem_head; func->p_mem_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } res = func->bus_head; func->bus_head = NULL; while (res) { tres = res; res = res->next; kfree(tres); } }
linux-master
drivers/pci/hotplug/cpqphp_pci.c
// SPDX-License-Identifier: GPL-2.0+ /* * Compaq Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman <[email protected]> * Copyright (C) 2001 IBM Corp. * * All rights reserved. * * Send feedback to <[email protected]> * * Jan 12, 2003 - Added 66/100/133MHz PCI-X support, * Torben Mathiasen <[email protected]> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/uaccess.h> #include "cpqphp.h" #include "cpqphp_nvram.h" /* Global variables */ int cpqhp_debug; int cpqhp_legacy_mode; struct controller *cpqhp_ctrl_list; /* = NULL */ struct pci_func *cpqhp_slot_list[256]; struct irq_routing_table *cpqhp_routing_table; /* local variables */ static void __iomem *smbios_table; static void __iomem *smbios_start; static void __iomem *cpqhp_rom_start; static bool power_mode; static bool debug; static int initialized; #define DRIVER_VERSION "0.9.8" #define DRIVER_AUTHOR "Dan Zink <[email protected]>, Greg Kroah-Hartman <[email protected]>" #define DRIVER_DESC "Compaq Hot Plug PCI Controller Driver" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(power_mode, bool, 0644); MODULE_PARM_DESC(power_mode, "Power mode enabled or not"); module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); #define CPQHPC_MODULE_MINOR 208 static inline int is_slot64bit(struct slot *slot) { return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0; } static inline int is_slot66mhz(struct slot *slot) { return (readb(slot->p_sm_slot + SMBIOS_SLOT_TYPE) == 0x0E) ? 1 : 0; } /** * detect_SMBIOS_pointer - find the System Management BIOS Table in mem region. * @begin: begin pointer for region to be scanned. * @end: end pointer for region to be scanned. * * Returns pointer to the head of the SMBIOS tables (or %NULL). */ static void __iomem *detect_SMBIOS_pointer(void __iomem *begin, void __iomem *end) { void __iomem *fp; void __iomem *endp; u8 temp1, temp2, temp3, temp4; int status = 0; endp = (end - sizeof(u32) + 1); for (fp = begin; fp <= endp; fp += 16) { temp1 = readb(fp); temp2 = readb(fp+1); temp3 = readb(fp+2); temp4 = readb(fp+3); if (temp1 == '_' && temp2 == 'S' && temp3 == 'M' && temp4 == '_') { status = 1; break; } } if (!status) fp = NULL; dbg("Discovered SMBIOS Entry point at %p\n", fp); return fp; } /** * init_SERR - Initializes the per slot SERR generation. * @ctrl: controller to use * * For unexpected switch opens */ static int init_SERR(struct controller *ctrl) { u32 tempdword; u32 number_of_slots; if (!ctrl) return 1; tempdword = ctrl->first_slot; number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F; /* Loop through slots */ while (number_of_slots) { writeb(0, ctrl->hpc_reg + SLOT_SERR); tempdword++; number_of_slots--; } return 0; } static int init_cpqhp_routing_table(void) { int len; cpqhp_routing_table = pcibios_get_irq_routing_table(); if (cpqhp_routing_table == NULL) return -ENOMEM; len = cpqhp_routing_table_length(); if (len == 0) { kfree(cpqhp_routing_table); cpqhp_routing_table = NULL; return -1; } return 0; } /* nice debugging output */ static void pci_print_IRQ_route(void) { int len; int loop; u8 tbus, tdevice, tslot; len = cpqhp_routing_table_length(); dbg("bus dev func slot\n"); for (loop = 0; loop < len; ++loop) { tbus = cpqhp_routing_table->slots[loop].bus; tdevice = cpqhp_routing_table->slots[loop].devfn; tslot = cpqhp_routing_table->slots[loop].slot; dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot); } } /** * get_subsequent_smbios_entry: get the next entry from bios table. * @smbios_start: where to start in the SMBIOS table * @smbios_table: location of the SMBIOS table * @curr: %NULL or pointer to previously returned structure * * Gets the first entry if previous == NULL; * otherwise, returns the next entry. * Uses global SMBIOS Table pointer. * * Returns a pointer to an SMBIOS structure or NULL if none found. */ static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start, void __iomem *smbios_table, void __iomem *curr) { u8 bail = 0; u8 previous_byte = 1; void __iomem *p_temp; void __iomem *p_max; if (!smbios_table || !curr) return NULL; /* set p_max to the end of the table */ p_max = smbios_start + readw(smbios_table + ST_LENGTH); p_temp = curr; p_temp += readb(curr + SMBIOS_GENERIC_LENGTH); while ((p_temp < p_max) && !bail) { /* Look for the double NULL terminator * The first condition is the previous byte * and the second is the curr */ if (!previous_byte && !(readb(p_temp))) bail = 1; previous_byte = readb(p_temp); p_temp++; } if (p_temp < p_max) return p_temp; else return NULL; } /** * get_SMBIOS_entry - return the requested SMBIOS entry or %NULL * @smbios_start: where to start in the SMBIOS table * @smbios_table: location of the SMBIOS table * @type: SMBIOS structure type to be returned * @previous: %NULL or pointer to previously returned structure * * Gets the first entry of the specified type if previous == %NULL; * Otherwise, returns the next entry of the given type. * Uses global SMBIOS Table pointer. * Uses get_subsequent_smbios_entry. * * Returns a pointer to an SMBIOS structure or %NULL if none found. */ static void __iomem *get_SMBIOS_entry(void __iomem *smbios_start, void __iomem *smbios_table, u8 type, void __iomem *previous) { if (!smbios_table) return NULL; if (!previous) previous = smbios_start; else previous = get_subsequent_smbios_entry(smbios_start, smbios_table, previous); while (previous) if (readb(previous + SMBIOS_GENERIC_TYPE) != type) previous = get_subsequent_smbios_entry(smbios_start, smbios_table, previous); else break; return previous; } static int ctrl_slot_cleanup(struct controller *ctrl) { struct slot *old_slot, *next_slot; old_slot = ctrl->slot; ctrl->slot = NULL; while (old_slot) { next_slot = old_slot->next; pci_hp_deregister(&old_slot->hotplug_slot); kfree(old_slot); old_slot = next_slot; } cpqhp_remove_debugfs_files(ctrl); /* Free IRQ associated with hot plug device */ free_irq(ctrl->interrupt, ctrl); /* Unmap the memory */ iounmap(ctrl->hpc_reg); /* Finally reclaim PCI mem */ release_mem_region(pci_resource_start(ctrl->pci_dev, 0), pci_resource_len(ctrl->pci_dev, 0)); return 0; } /** * get_slot_mapping - determine logical slot mapping for PCI device * * Won't work for more than one PCI-PCI bridge in a slot. * * @bus: pointer to the PCI bus structure * @bus_num: bus number of PCI device * @dev_num: device number of PCI device * @slot: Pointer to u8 where slot number will be returned * * Output: SUCCESS or FAILURE */ static int get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot) { u32 work; long len; long loop; u8 tbus, tdevice, tslot, bridgeSlot; dbg("%s: %p, %d, %d, %p\n", __func__, bus, bus_num, dev_num, slot); bridgeSlot = 0xFF; len = cpqhp_routing_table_length(); for (loop = 0; loop < len; ++loop) { tbus = cpqhp_routing_table->slots[loop].bus; tdevice = cpqhp_routing_table->slots[loop].devfn >> 3; tslot = cpqhp_routing_table->slots[loop].slot; if ((tbus == bus_num) && (tdevice == dev_num)) { *slot = tslot; return 0; } else { /* Did not get a match on the target PCI device. Check * if the current IRQ table entry is a PCI-to-PCI * bridge device. If so, and it's secondary bus * matches the bus number for the target device, I need * to save the bridge's slot number. If I can not find * an entry for the target device, I will have to * assume it's on the other side of the bridge, and * assign it the bridge's slot. */ bus->number = tbus; pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0), PCI_CLASS_REVISION, &work); if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) { pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0), PCI_PRIMARY_BUS, &work); // See if bridge's secondary bus matches target bus. if (((work >> 8) & 0x000000FF) == (long) bus_num) bridgeSlot = tslot; } } } /* If we got here, we didn't find an entry in the IRQ mapping table for * the target PCI device. If we did determine that the target device * is on the other side of a PCI-to-PCI bridge, return the slot number * for the bridge. */ if (bridgeSlot != 0xFF) { *slot = bridgeSlot; return 0; } /* Couldn't find an entry in the routing table for this PCI device */ return -1; } /** * cpqhp_set_attention_status - Turns the Amber LED for a slot on or off * @ctrl: struct controller to use * @func: PCI device/function info * @status: LED control flag: 1 = LED on, 0 = LED off */ static int cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func, u32 status) { u8 hp_slot; if (func == NULL) return 1; hp_slot = func->device - ctrl->slot_device_offset; /* Wait for exclusive access to hardware */ mutex_lock(&ctrl->crit_sect); if (status == 1) amber_LED_on(ctrl, hp_slot); else if (status == 0) amber_LED_off(ctrl, hp_slot); else { /* Done with exclusive hardware access */ mutex_unlock(&ctrl->crit_sect); return 1; } set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); /* Done with exclusive hardware access */ mutex_unlock(&ctrl->crit_sect); return 0; } /** * set_attention_status - Turns the Amber LED for a slot on or off * @hotplug_slot: slot to change LED on * @status: LED control flag */ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { struct pci_func *slot_func; struct slot *slot = to_slot(hotplug_slot); struct controller *ctrl = slot->ctrl; u8 bus; u8 devfn; u8 device; u8 function; dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) return -ENODEV; device = devfn >> 3; function = devfn & 0x7; dbg("bus, dev, fn = %d, %d, %d\n", bus, device, function); slot_func = cpqhp_slot_find(bus, device, function); if (!slot_func) return -ENODEV; return cpqhp_set_attention_status(ctrl, slot_func, status); } static int process_SI(struct hotplug_slot *hotplug_slot) { struct pci_func *slot_func; struct slot *slot = to_slot(hotplug_slot); struct controller *ctrl = slot->ctrl; u8 bus; u8 devfn; u8 device; u8 function; dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) return -ENODEV; device = devfn >> 3; function = devfn & 0x7; dbg("bus, dev, fn = %d, %d, %d\n", bus, device, function); slot_func = cpqhp_slot_find(bus, device, function); if (!slot_func) return -ENODEV; slot_func->bus = bus; slot_func->device = device; slot_func->function = function; slot_func->configured = 0; dbg("board_added(%p, %p)\n", slot_func, ctrl); return cpqhp_process_SI(ctrl, slot_func); } static int process_SS(struct hotplug_slot *hotplug_slot) { struct pci_func *slot_func; struct slot *slot = to_slot(hotplug_slot); struct controller *ctrl = slot->ctrl; u8 bus; u8 devfn; u8 device; u8 function; dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) return -ENODEV; device = devfn >> 3; function = devfn & 0x7; dbg("bus, dev, fn = %d, %d, %d\n", bus, device, function); slot_func = cpqhp_slot_find(bus, device, function); if (!slot_func) return -ENODEV; dbg("In %s, slot_func = %p, ctrl = %p\n", __func__, slot_func, ctrl); return cpqhp_process_SS(ctrl, slot_func); } static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value) { struct slot *slot = to_slot(hotplug_slot); struct controller *ctrl = slot->ctrl; dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); return cpqhp_hardware_test(ctrl, value); } static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); struct controller *ctrl = slot->ctrl; dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = get_slot_enabled(ctrl, slot); return 0; } static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); struct controller *ctrl = slot->ctrl; dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = cpq_get_attention_status(ctrl, slot); return 0; } static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); struct controller *ctrl = slot->ctrl; dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = cpq_get_latch_status(ctrl, slot); return 0; } static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); struct controller *ctrl = slot->ctrl; dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = get_presence_status(ctrl, slot); return 0; } static const struct hotplug_slot_ops cpqphp_hotplug_slot_ops = { .set_attention_status = set_attention_status, .enable_slot = process_SI, .disable_slot = process_SS, .hardware_test = hardware_test, .get_power_status = get_power_status, .get_attention_status = get_attention_status, .get_latch_status = get_latch_status, .get_adapter_status = get_adapter_status, }; #define SLOT_NAME_SIZE 10 static int ctrl_slot_setup(struct controller *ctrl, void __iomem *smbios_start, void __iomem *smbios_table) { struct slot *slot; struct pci_bus *bus = ctrl->pci_bus; u8 number_of_slots; u8 slot_device; u8 slot_number; u8 ctrl_slot; u32 tempdword; char name[SLOT_NAME_SIZE]; void __iomem *slot_entry = NULL; int result; dbg("%s\n", __func__); tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F; slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4; slot_number = ctrl->first_slot; while (number_of_slots) { slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) { result = -ENOMEM; goto error; } slot->ctrl = ctrl; slot->bus = ctrl->bus; slot->device = slot_device; slot->number = slot_number; dbg("slot->number = %u\n", slot->number); slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9, slot_entry); while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) != slot->number)) { slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9, slot_entry); } slot->p_sm_slot = slot_entry; timer_setup(&slot->task_event, cpqhp_pushbutton_thread, 0); slot->task_event.expires = jiffies + 5 * HZ; /*FIXME: these capabilities aren't used but if they are * they need to be correctly implemented */ slot->capabilities |= PCISLOT_REPLACE_SUPPORTED; slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED; if (is_slot64bit(slot)) slot->capabilities |= PCISLOT_64_BIT_SUPPORTED; if (is_slot66mhz(slot)) slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED; if (bus->cur_bus_speed == PCI_SPEED_66MHz) slot->capabilities |= PCISLOT_66_MHZ_OPERATION; ctrl_slot = slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4); /* Check presence */ slot->capabilities |= ((((~tempdword) >> 23) | ((~tempdword) >> 15)) >> ctrl_slot) & 0x02; /* Check the switch state */ slot->capabilities |= ((~tempdword & 0xFF) >> ctrl_slot) & 0x01; /* Check the slot enable */ slot->capabilities |= ((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04; /* register this slot with the hotplug pci core */ snprintf(name, SLOT_NAME_SIZE, "%u", slot->number); slot->hotplug_slot.ops = &cpqphp_hotplug_slot_ops; dbg("registering bus %d, dev %d, number %d, ctrl->slot_device_offset %d, slot %d\n", slot->bus, slot->device, slot->number, ctrl->slot_device_offset, slot_number); result = pci_hp_register(&slot->hotplug_slot, ctrl->pci_dev->bus, slot->device, name); if (result) { err("pci_hp_register failed with error %d\n", result); goto error_slot; } slot->next = ctrl->slot; ctrl->slot = slot; number_of_slots--; slot_device++; slot_number++; } return 0; error_slot: kfree(slot); error: return result; } static int one_time_init(void) { int loop; int retval = 0; if (initialized) return 0; power_mode = 0; retval = init_cpqhp_routing_table(); if (retval) goto error; if (cpqhp_debug) pci_print_IRQ_route(); dbg("Initialize + Start the notification mechanism\n"); retval = cpqhp_event_start_thread(); if (retval) goto error; dbg("Initialize slot lists\n"); for (loop = 0; loop < 256; loop++) cpqhp_slot_list[loop] = NULL; /* FIXME: We also need to hook the NMI handler eventually. * this also needs to be worked with Christoph * register_NMI_handler(); */ /* Map rom address */ cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN); if (!cpqhp_rom_start) { err("Could not ioremap memory region for ROM\n"); retval = -EIO; goto error; } /* Now, map the int15 entry point if we are on compaq specific * hardware */ compaq_nvram_init(cpqhp_rom_start); /* Map smbios table entry point structure */ smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start, cpqhp_rom_start + ROM_PHY_LEN); if (!smbios_table) { err("Could not find the SMBIOS pointer in memory\n"); retval = -EIO; goto error_rom_start; } smbios_start = ioremap(readl(smbios_table + ST_ADDRESS), readw(smbios_table + ST_LENGTH)); if (!smbios_start) { err("Could not ioremap memory region taken from SMBIOS values\n"); retval = -EIO; goto error_smbios_start; } initialized = 1; return retval; error_smbios_start: iounmap(smbios_start); error_rom_start: iounmap(cpqhp_rom_start); error: return retval; } static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { u8 num_of_slots = 0; u8 hp_slot = 0; u8 device; u8 bus_cap; u16 temp_word; u16 vendor_id; u16 subsystem_vid; u16 subsystem_deviceid; u32 rc; struct controller *ctrl; struct pci_func *func; struct pci_bus *bus; int err; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR MY_NAME ": cannot enable PCI device %s (%d)\n", pci_name(pdev), err); return err; } bus = pdev->subordinate; if (!bus) { pci_notice(pdev, "the device is not a bridge, skipping\n"); rc = -ENODEV; goto err_disable_device; } /* Need to read VID early b/c it's used to differentiate CPQ and INTC * discovery */ vendor_id = pdev->vendor; if ((vendor_id != PCI_VENDOR_ID_COMPAQ) && (vendor_id != PCI_VENDOR_ID_INTEL)) { err(msg_HPC_non_compaq_or_intel); rc = -ENODEV; goto err_disable_device; } dbg("Vendor ID: %x\n", vendor_id); dbg("revision: %d\n", pdev->revision); if ((vendor_id == PCI_VENDOR_ID_COMPAQ) && (!pdev->revision)) { err(msg_HPC_rev_error); rc = -ENODEV; goto err_disable_device; } /* Check for the proper subsystem IDs * Intel uses a different SSID programming model than Compaq. * For Intel, each SSID bit identifies a PHP capability. * Also Intel HPCs may have RID=0. */ if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) { err(msg_HPC_not_supported); rc = -ENODEV; goto err_disable_device; } /* TODO: This code can be made to support non-Compaq or Intel * subsystem IDs */ subsystem_vid = pdev->subsystem_vendor; dbg("Subsystem Vendor ID: %x\n", subsystem_vid); if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) { err(msg_HPC_non_compaq_or_intel); rc = -ENODEV; goto err_disable_device; } ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL); if (!ctrl) { rc = -ENOMEM; goto err_disable_device; } subsystem_deviceid = pdev->subsystem_device; info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid); /* Set Vendor ID, so it can be accessed later from other * functions */ ctrl->vendor_id = vendor_id; switch (subsystem_vid) { case PCI_VENDOR_ID_COMPAQ: if (pdev->revision >= 0x13) { /* CIOBX */ ctrl->push_flag = 1; ctrl->slot_switch_type = 1; ctrl->push_button = 1; ctrl->pci_config_space = 1; ctrl->defeature_PHP = 1; ctrl->pcix_support = 1; ctrl->pcix_speed_capability = 1; pci_read_config_byte(pdev, 0x41, &bus_cap); if (bus_cap & 0x80) { dbg("bus max supports 133MHz PCI-X\n"); bus->max_bus_speed = PCI_SPEED_133MHz_PCIX; break; } if (bus_cap & 0x40) { dbg("bus max supports 100MHz PCI-X\n"); bus->max_bus_speed = PCI_SPEED_100MHz_PCIX; break; } if (bus_cap & 0x20) { dbg("bus max supports 66MHz PCI-X\n"); bus->max_bus_speed = PCI_SPEED_66MHz_PCIX; break; } if (bus_cap & 0x10) { dbg("bus max supports 66MHz PCI\n"); bus->max_bus_speed = PCI_SPEED_66MHz; break; } break; } switch (subsystem_deviceid) { case PCI_SUB_HPC_ID: /* Original 6500/7000 implementation */ ctrl->slot_switch_type = 1; bus->max_bus_speed = PCI_SPEED_33MHz; ctrl->push_button = 0; ctrl->pci_config_space = 1; ctrl->defeature_PHP = 1; ctrl->pcix_support = 0; ctrl->pcix_speed_capability = 0; break; case PCI_SUB_HPC_ID2: /* First Pushbutton implementation */ ctrl->push_flag = 1; ctrl->slot_switch_type = 1; bus->max_bus_speed = PCI_SPEED_33MHz; ctrl->push_button = 1; ctrl->pci_config_space = 1; ctrl->defeature_PHP = 1; ctrl->pcix_support = 0; ctrl->pcix_speed_capability = 0; break; case PCI_SUB_HPC_ID_INTC: /* Third party (6500/7000) */ ctrl->slot_switch_type = 1; bus->max_bus_speed = PCI_SPEED_33MHz; ctrl->push_button = 0; ctrl->pci_config_space = 1; ctrl->defeature_PHP = 1; ctrl->pcix_support = 0; ctrl->pcix_speed_capability = 0; break; case PCI_SUB_HPC_ID3: /* First 66 Mhz implementation */ ctrl->push_flag = 1; ctrl->slot_switch_type = 1; bus->max_bus_speed = PCI_SPEED_66MHz; ctrl->push_button = 1; ctrl->pci_config_space = 1; ctrl->defeature_PHP = 1; ctrl->pcix_support = 0; ctrl->pcix_speed_capability = 0; break; case PCI_SUB_HPC_ID4: /* First PCI-X implementation, 100MHz */ ctrl->push_flag = 1; ctrl->slot_switch_type = 1; bus->max_bus_speed = PCI_SPEED_100MHz_PCIX; ctrl->push_button = 1; ctrl->pci_config_space = 1; ctrl->defeature_PHP = 1; ctrl->pcix_support = 1; ctrl->pcix_speed_capability = 0; break; default: err(msg_HPC_not_supported); rc = -ENODEV; goto err_free_ctrl; } break; case PCI_VENDOR_ID_INTEL: /* Check for speed capability (0=33, 1=66) */ if (subsystem_deviceid & 0x0001) bus->max_bus_speed = PCI_SPEED_66MHz; else bus->max_bus_speed = PCI_SPEED_33MHz; /* Check for push button */ if (subsystem_deviceid & 0x0002) ctrl->push_button = 0; else ctrl->push_button = 1; /* Check for slot switch type (0=mechanical, 1=not mechanical) */ if (subsystem_deviceid & 0x0004) ctrl->slot_switch_type = 0; else ctrl->slot_switch_type = 1; /* PHP Status (0=De-feature PHP, 1=Normal operation) */ if (subsystem_deviceid & 0x0008) ctrl->defeature_PHP = 1; /* PHP supported */ else ctrl->defeature_PHP = 0; /* PHP not supported */ /* Alternate Base Address Register Interface * (0=not supported, 1=supported) */ if (subsystem_deviceid & 0x0010) ctrl->alternate_base_address = 1; else ctrl->alternate_base_address = 0; /* PCI Config Space Index (0=not supported, 1=supported) */ if (subsystem_deviceid & 0x0020) ctrl->pci_config_space = 1; else ctrl->pci_config_space = 0; /* PCI-X support */ if (subsystem_deviceid & 0x0080) { ctrl->pcix_support = 1; if (subsystem_deviceid & 0x0040) /* 133MHz PCI-X if bit 7 is 1 */ ctrl->pcix_speed_capability = 1; else /* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */ /* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */ ctrl->pcix_speed_capability = 0; } else { /* Conventional PCI */ ctrl->pcix_support = 0; ctrl->pcix_speed_capability = 0; } break; default: err(msg_HPC_not_supported); rc = -ENODEV; goto err_free_ctrl; } /* Tell the user that we found one. */ info("Initializing the PCI hot plug controller residing on PCI bus %d\n", pdev->bus->number); dbg("Hotplug controller capabilities:\n"); dbg(" speed_capability %d\n", bus->max_bus_speed); dbg(" slot_switch_type %s\n", ctrl->slot_switch_type ? "switch present" : "no switch"); dbg(" defeature_PHP %s\n", ctrl->defeature_PHP ? "PHP supported" : "PHP not supported"); dbg(" alternate_base_address %s\n", ctrl->alternate_base_address ? "supported" : "not supported"); dbg(" pci_config_space %s\n", ctrl->pci_config_space ? "supported" : "not supported"); dbg(" pcix_speed_capability %s\n", ctrl->pcix_speed_capability ? "supported" : "not supported"); dbg(" pcix_support %s\n", ctrl->pcix_support ? "supported" : "not supported"); ctrl->pci_dev = pdev; pci_set_drvdata(pdev, ctrl); /* make our own copy of the pci bus structure, * as we like tweaking it a lot */ ctrl->pci_bus = kmemdup(pdev->bus, sizeof(*ctrl->pci_bus), GFP_KERNEL); if (!ctrl->pci_bus) { err("out of memory\n"); rc = -ENOMEM; goto err_free_ctrl; } ctrl->bus = pdev->bus->number; ctrl->rev = pdev->revision; dbg("bus device function rev: %d %d %d %d\n", ctrl->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), ctrl->rev); mutex_init(&ctrl->crit_sect); init_waitqueue_head(&ctrl->queue); /* initialize our threads if they haven't already been started up */ rc = one_time_init(); if (rc) goto err_free_bus; dbg("pdev = %p\n", pdev); dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0)); dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0)); if (!request_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0), MY_NAME)) { err("cannot reserve MMIO region\n"); rc = -ENOMEM; goto err_free_bus; } ctrl->hpc_reg = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!ctrl->hpc_reg) { err("cannot remap MMIO region %llx @ %llx\n", (unsigned long long)pci_resource_len(pdev, 0), (unsigned long long)pci_resource_start(pdev, 0)); rc = -ENODEV; goto err_free_mem_region; } /* Check for 66Mhz operation */ bus->cur_bus_speed = get_controller_speed(ctrl); /******************************************************** * * Save configuration headers for this and * subordinate PCI buses * ********************************************************/ /* find the physical slot number of the first hot plug slot */ /* Get slot won't work for devices behind bridges, but * in this case it will always be called for the "base" * bus/dev/func of a slot. * CS: this is leveraging the PCIIRQ routing code from the kernel * (pci-pc.c: get_irq_routing_table) */ rc = get_slot_mapping(ctrl->pci_bus, pdev->bus->number, (readb(ctrl->hpc_reg + SLOT_MASK) >> 4), &(ctrl->first_slot)); dbg("get_slot_mapping: first_slot = %d, returned = %d\n", ctrl->first_slot, rc); if (rc) { err(msg_initialization_err, rc); goto err_iounmap; } /* Store PCI Config Space for all devices on this bus */ rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK)); if (rc) { err("%s: unable to save PCI configuration data, error %d\n", __func__, rc); goto err_iounmap; } /* * Get IO, memory, and IRQ resources for new devices */ /* The next line is required for cpqhp_find_available_resources */ ctrl->interrupt = pdev->irq; if (ctrl->interrupt < 0x10) { cpqhp_legacy_mode = 1; dbg("System seems to be configured for Full Table Mapped MPS mode\n"); } ctrl->cfgspc_irq = 0; pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &ctrl->cfgspc_irq); rc = cpqhp_find_available_resources(ctrl, cpqhp_rom_start); ctrl->add_support = !rc; if (rc) { dbg("cpqhp_find_available_resources = 0x%x\n", rc); err("unable to locate PCI configuration resources for hot plug add.\n"); goto err_iounmap; } /* * Finish setting up the hot plug ctrl device */ ctrl->slot_device_offset = readb(ctrl->hpc_reg + SLOT_MASK) >> 4; dbg("NumSlots %d\n", ctrl->slot_device_offset); ctrl->next_event = 0; /* Setup the slot information structures */ rc = ctrl_slot_setup(ctrl, smbios_start, smbios_table); if (rc) { err(msg_initialization_err, 6); err("%s: unable to save PCI configuration data, error %d\n", __func__, rc); goto err_iounmap; } /* Mask all general input interrupts */ writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK); /* set up the interrupt */ dbg("HPC interrupt = %d\n", ctrl->interrupt); if (request_irq(ctrl->interrupt, cpqhp_ctrl_intr, IRQF_SHARED, MY_NAME, ctrl)) { err("Can't get irq %d for the hotplug pci controller\n", ctrl->interrupt); rc = -ENODEV; goto err_iounmap; } /* Enable Shift Out interrupt and clear it, also enable SERR on power * fault */ temp_word = readw(ctrl->hpc_reg + MISC); temp_word |= 0x4006; writew(temp_word, ctrl->hpc_reg + MISC); /* Changed 05/05/97 to clear all interrupts at start */ writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR); ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR); writel(0x0L, ctrl->hpc_reg + INT_MASK); if (!cpqhp_ctrl_list) { cpqhp_ctrl_list = ctrl; ctrl->next = NULL; } else { ctrl->next = cpqhp_ctrl_list; cpqhp_ctrl_list = ctrl; } /* turn off empty slots here unless command line option "ON" set * Wait for exclusive access to hardware */ mutex_lock(&ctrl->crit_sect); num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F; /* find first device number for the ctrl */ device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4; while (num_of_slots) { dbg("num_of_slots: %d\n", num_of_slots); func = cpqhp_slot_find(ctrl->bus, device, 0); if (!func) break; hp_slot = func->device - ctrl->slot_device_offset; dbg("hp_slot: %d\n", hp_slot); /* We have to save the presence info for these slots */ temp_word = ctrl->ctrl_int_comp >> 16; func->presence_save = (temp_word >> hp_slot) & 0x01; func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02; if (ctrl->ctrl_int_comp & (0x1L << hp_slot)) func->switch_save = 0; else func->switch_save = 0x10; if (!power_mode) if (!func->is_a_board) { green_LED_off(ctrl, hp_slot); slot_disable(ctrl, hp_slot); } device++; num_of_slots--; } if (!power_mode) { set_SOGO(ctrl); /* Wait for SOBS to be unset */ wait_for_ctrl_irq(ctrl); } rc = init_SERR(ctrl); if (rc) { err("init_SERR failed\n"); mutex_unlock(&ctrl->crit_sect); goto err_free_irq; } /* Done with exclusive hardware access */ mutex_unlock(&ctrl->crit_sect); cpqhp_create_debugfs_files(ctrl); return 0; err_free_irq: free_irq(ctrl->interrupt, ctrl); err_iounmap: iounmap(ctrl->hpc_reg); err_free_mem_region: release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); err_free_bus: kfree(ctrl->pci_bus); err_free_ctrl: kfree(ctrl); err_disable_device: pci_disable_device(pdev); return rc; } static void __exit unload_cpqphpd(void) { struct pci_func *next; struct pci_func *TempSlot; int loop; u32 rc; struct controller *ctrl; struct controller *tctrl; struct pci_resource *res; struct pci_resource *tres; compaq_nvram_store(cpqhp_rom_start); ctrl = cpqhp_ctrl_list; while (ctrl) { if (ctrl->hpc_reg) { u16 misc; rc = read_slot_enable(ctrl); writeb(0, ctrl->hpc_reg + SLOT_SERR); writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK); misc = readw(ctrl->hpc_reg + MISC); misc &= 0xFFFD; writew(misc, ctrl->hpc_reg + MISC); } ctrl_slot_cleanup(ctrl); res = ctrl->io_head; while (res) { tres = res; res = res->next; kfree(tres); } res = ctrl->mem_head; while (res) { tres = res; res = res->next; kfree(tres); } res = ctrl->p_mem_head; while (res) { tres = res; res = res->next; kfree(tres); } res = ctrl->bus_head; while (res) { tres = res; res = res->next; kfree(tres); } kfree(ctrl->pci_bus); tctrl = ctrl; ctrl = ctrl->next; kfree(tctrl); } for (loop = 0; loop < 256; loop++) { next = cpqhp_slot_list[loop]; while (next != NULL) { res = next->io_head; while (res) { tres = res; res = res->next; kfree(tres); } res = next->mem_head; while (res) { tres = res; res = res->next; kfree(tres); } res = next->p_mem_head; while (res) { tres = res; res = res->next; kfree(tres); } res = next->bus_head; while (res) { tres = res; res = res->next; kfree(tres); } TempSlot = next; next = next->next; kfree(TempSlot); } } /* Stop the notification mechanism */ if (initialized) cpqhp_event_stop_thread(); /* unmap the rom address */ if (cpqhp_rom_start) iounmap(cpqhp_rom_start); if (smbios_start) iounmap(smbios_start); } static const struct pci_device_id hpcd_pci_tbl[] = { { /* handle any PCI Hotplug controller */ .class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00), .class_mask = ~0, /* no matter who makes it */ .vendor = PCI_ANY_ID, .device = PCI_ANY_ID, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl); static struct pci_driver cpqhpc_driver = { .name = "compaq_pci_hotplug", .id_table = hpcd_pci_tbl, .probe = cpqhpc_probe, /* remove: cpqhpc_remove_one, */ }; static int __init cpqhpc_init(void) { int result; cpqhp_debug = debug; info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); cpqhp_initialize_debugfs(); result = pci_register_driver(&cpqhpc_driver); dbg("pci_register_driver = %d\n", result); return result; } static void __exit cpqhpc_cleanup(void) { dbg("unload_cpqphpd()\n"); unload_cpqphpd(); dbg("pci_unregister_driver\n"); pci_unregister_driver(&cpqhpc_driver); cpqhp_shutdown_debugfs(); } module_init(cpqhpc_init); module_exit(cpqhpc_cleanup);
linux-master
drivers/pci/hotplug/cpqphp_core.c
// SPDX-License-Identifier: GPL-2.0+ /* * Interface for Dynamic Logical Partitioning of I/O Slots on * RPA-compliant PPC64 platform. * * John Rose <[email protected]> * October 2003 * * Copyright (C) 2003 IBM. */ #include <linux/kobject.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include "rpaphp.h" #include "rpadlpar.h" #include "../pci.h" #define DLPAR_KOBJ_NAME "control" /* Those two have no quotes because they are passed to __ATTR() which * stringifies the argument (yuck !) */ #define ADD_SLOT_ATTR_NAME add_slot #define REMOVE_SLOT_ATTR_NAME remove_slot static ssize_t add_slot_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t nbytes) { char drc_name[MAX_DRC_NAME_LEN]; char *end; int rc; if (nbytes >= MAX_DRC_NAME_LEN) return 0; strscpy(drc_name, buf, nbytes + 1); end = strchr(drc_name, '\n'); if (end) *end = '\0'; rc = dlpar_add_slot(drc_name); if (rc) return rc; return nbytes; } static ssize_t add_slot_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "0\n"); } static ssize_t remove_slot_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t nbytes) { char drc_name[MAX_DRC_NAME_LEN]; int rc; char *end; if (nbytes >= MAX_DRC_NAME_LEN) return 0; strscpy(drc_name, buf, nbytes + 1); end = strchr(drc_name, '\n'); if (end) *end = '\0'; rc = dlpar_remove_slot(drc_name); if (rc) return rc; return nbytes; } static ssize_t remove_slot_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { return sysfs_emit(buf, "0\n"); } static struct kobj_attribute add_slot_attr = __ATTR(ADD_SLOT_ATTR_NAME, 0644, add_slot_show, add_slot_store); static struct kobj_attribute remove_slot_attr = __ATTR(REMOVE_SLOT_ATTR_NAME, 0644, remove_slot_show, remove_slot_store); static struct attribute *default_attrs[] = { &add_slot_attr.attr, &remove_slot_attr.attr, NULL, }; static const struct attribute_group dlpar_attr_group = { .attrs = default_attrs, }; static struct kobject *dlpar_kobj; int dlpar_sysfs_init(void) { int error; dlpar_kobj = kobject_create_and_add(DLPAR_KOBJ_NAME, &pci_slots_kset->kobj); if (!dlpar_kobj) return -EINVAL; error = sysfs_create_group(dlpar_kobj, &dlpar_attr_group); if (error) kobject_put(dlpar_kobj); return error; } void dlpar_sysfs_exit(void) { sysfs_remove_group(dlpar_kobj, &dlpar_attr_group); kobject_put(dlpar_kobj); }
linux-master
drivers/pci/hotplug/rpadlpar_sysfs.c
// SPDX-License-Identifier: GPL-2.0+ /* * PCI Hot Plug Controller Driver for RPA-compliant PPC64 platform. * Copyright (C) 2003 Linda Xie <[email protected]> * * All rights reserved. * * Send feedback to <[email protected]> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <asm/firmware.h> #include <asm/eeh.h> /* for eeh_add_device() */ #include <asm/rtas.h> /* rtas_call */ #include <asm/pci-bridge.h> /* for pci_controller */ #include <asm/prom.h> #include "../pci.h" /* for pci_add_new_bus */ /* and pci_do_scan_bus */ #include "rpaphp.h" bool rpaphp_debug; LIST_HEAD(rpaphp_slot_head); EXPORT_SYMBOL_GPL(rpaphp_slot_head); #define DRIVER_VERSION "0.1" #define DRIVER_AUTHOR "Linda Xie <[email protected]>" #define DRIVER_DESC "RPA HOT Plug PCI Controller Driver" #define MAX_LOC_CODE 128 MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param_named(debug, rpaphp_debug, bool, 0644); /** * set_attention_status - set attention LED * @hotplug_slot: target &hotplug_slot * @value: LED control value * * echo 0 > attention -- set LED OFF * echo 1 > attention -- set LED ON * echo 2 > attention -- set LED ID(identify, light is blinking) */ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 value) { int rc; struct slot *slot = to_slot(hotplug_slot); switch (value) { case 0: case 1: case 2: break; default: value = 1; break; } rc = rtas_set_indicator(DR_INDICATOR, slot->index, value); if (!rc) slot->attention_status = value; return rc; } /** * get_power_status - get power status of a slot * @hotplug_slot: slot to get status * @value: pointer to store status */ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) { int retval, level; struct slot *slot = to_slot(hotplug_slot); retval = rtas_get_power_level(slot->power_domain, &level); if (!retval) *value = level; return retval; } /** * get_attention_status - get attention LED status * @hotplug_slot: slot to get status * @value: pointer to store status */ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); *value = slot->attention_status; return 0; } static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = to_slot(hotplug_slot); int rc, state; rc = rpaphp_get_sensor_state(slot, &state); *value = NOT_VALID; if (rc) return rc; if (state == EMPTY) *value = EMPTY; else if (state == PRESENT) *value = slot->state; return 0; } static enum pci_bus_speed get_max_bus_speed(struct slot *slot) { enum pci_bus_speed speed; switch (slot->type) { case 1: case 2: case 3: case 4: case 5: case 6: speed = PCI_SPEED_33MHz; /* speed for case 1-6 */ break; case 7: case 8: speed = PCI_SPEED_66MHz; break; case 11: case 14: speed = PCI_SPEED_66MHz_PCIX; break; case 12: case 15: speed = PCI_SPEED_100MHz_PCIX; break; case 13: case 16: speed = PCI_SPEED_133MHz_PCIX; break; default: speed = PCI_SPEED_UNKNOWN; break; } return speed; } static int get_children_props(struct device_node *dn, const __be32 **drc_indexes, const __be32 **drc_names, const __be32 **drc_types, const __be32 **drc_power_domains) { const __be32 *indexes, *names, *types, *domains; indexes = of_get_property(dn, "ibm,drc-indexes", NULL); names = of_get_property(dn, "ibm,drc-names", NULL); types = of_get_property(dn, "ibm,drc-types", NULL); domains = of_get_property(dn, "ibm,drc-power-domains", NULL); if (!indexes || !names || !types || !domains) { /* Slot does not have dynamically-removable children */ return -EINVAL; } if (drc_indexes) *drc_indexes = indexes; if (drc_names) /* &drc_names[1] contains NULL terminated slot names */ *drc_names = names; if (drc_types) /* &drc_types[1] contains NULL terminated slot types */ *drc_types = types; if (drc_power_domains) *drc_power_domains = domains; return 0; } /* Verify the existence of 'drc_name' and/or 'drc_type' within the * current node. First obtain its my-drc-index property. Next, * obtain the DRC info from its parent. Use the my-drc-index for * correlation, and obtain/validate the requested properties. */ static int rpaphp_check_drc_props_v1(struct device_node *dn, char *drc_name, char *drc_type, unsigned int my_index) { char *name_tmp, *type_tmp; const __be32 *indexes, *names; const __be32 *types, *domains; int i, rc; rc = get_children_props(dn->parent, &indexes, &names, &types, &domains); if (rc < 0) { return -EINVAL; } name_tmp = (char *) &names[1]; type_tmp = (char *) &types[1]; /* Iterate through parent properties, looking for my-drc-index */ for (i = 0; i < be32_to_cpu(indexes[0]); i++) { if (be32_to_cpu(indexes[i + 1]) == my_index) break; name_tmp += (strlen(name_tmp) + 1); type_tmp += (strlen(type_tmp) + 1); } if (((drc_name == NULL) || (drc_name && !strcmp(drc_name, name_tmp))) && ((drc_type == NULL) || (drc_type && !strcmp(drc_type, type_tmp)))) return 0; return -EINVAL; } static int rpaphp_check_drc_props_v2(struct device_node *dn, char *drc_name, char *drc_type, unsigned int my_index) { struct property *info; unsigned int entries; struct of_drc_info drc; const __be32 *value; char cell_drc_name[MAX_DRC_NAME_LEN]; int j; info = of_find_property(dn->parent, "ibm,drc-info", NULL); if (info == NULL) return -EINVAL; value = of_prop_next_u32(info, NULL, &entries); if (!value) return -EINVAL; else value++; for (j = 0; j < entries; j++) { of_read_drc_info_cell(&info, &value, &drc); /* Should now know end of current entry */ /* Found it */ if (my_index >= drc.drc_index_start && my_index <= drc.last_drc_index) { int index = my_index - drc.drc_index_start; sprintf(cell_drc_name, "%s%d", drc.drc_name_prefix, drc.drc_name_suffix_start + index); break; } } if (((drc_name == NULL) || (drc_name && !strcmp(drc_name, cell_drc_name))) && ((drc_type == NULL) || (drc_type && !strcmp(drc_type, drc.drc_type)))) return 0; return -EINVAL; } int rpaphp_check_drc_props(struct device_node *dn, char *drc_name, char *drc_type) { const __be32 *my_index; my_index = of_get_property(dn, "ibm,my-drc-index", NULL); if (!my_index) { /* Node isn't DLPAR/hotplug capable */ return -EINVAL; } if (of_property_present(dn->parent, "ibm,drc-info")) return rpaphp_check_drc_props_v2(dn, drc_name, drc_type, be32_to_cpu(*my_index)); else return rpaphp_check_drc_props_v1(dn, drc_name, drc_type, be32_to_cpu(*my_index)); } EXPORT_SYMBOL_GPL(rpaphp_check_drc_props); static int is_php_type(char *drc_type) { char *endptr; /* PCI Hotplug nodes have an integer for drc_type */ simple_strtoul(drc_type, &endptr, 10); if (endptr == drc_type) return 0; return 1; } /** * is_php_dn() - return 1 if this is a hotpluggable pci slot, else 0 * @dn: target &device_node * @indexes: passed to get_children_props() * @names: passed to get_children_props() * @types: returned from get_children_props() * @power_domains: * * This routine will return true only if the device node is * a hotpluggable slot. This routine will return false * for built-in pci slots (even when the built-in slots are * dlparable.) */ static int is_php_dn(struct device_node *dn, const __be32 **indexes, const __be32 **names, const __be32 **types, const __be32 **power_domains) { const __be32 *drc_types; int rc; rc = get_children_props(dn, indexes, names, &drc_types, power_domains); if (rc < 0) return 0; if (!is_php_type((char *) &drc_types[1])) return 0; *types = drc_types; return 1; } static int rpaphp_drc_info_add_slot(struct device_node *dn) { struct slot *slot; struct property *info; struct of_drc_info drc; char drc_name[MAX_DRC_NAME_LEN]; const __be32 *cur; u32 count; int retval = 0; info = of_find_property(dn, "ibm,drc-info", NULL); if (!info) return 0; cur = of_prop_next_u32(info, NULL, &count); if (cur) cur++; else return 0; of_read_drc_info_cell(&info, &cur, &drc); if (!is_php_type(drc.drc_type)) return 0; sprintf(drc_name, "%s%d", drc.drc_name_prefix, drc.drc_name_suffix_start); slot = alloc_slot_struct(dn, drc.drc_index_start, drc_name, drc.drc_power_domain); if (!slot) return -ENOMEM; slot->type = simple_strtoul(drc.drc_type, NULL, 10); retval = rpaphp_enable_slot(slot); if (!retval) retval = rpaphp_register_slot(slot); if (retval) dealloc_slot_struct(slot); return retval; } static int rpaphp_drc_add_slot(struct device_node *dn) { struct slot *slot; int retval = 0; int i; const __be32 *indexes, *names, *types, *power_domains; char *name, *type; /* If this is not a hotplug slot, return without doing anything. */ if (!is_php_dn(dn, &indexes, &names, &types, &power_domains)) return 0; dbg("Entry %s: dn=%pOF\n", __func__, dn); /* register PCI devices */ name = (char *) &names[1]; type = (char *) &types[1]; for (i = 0; i < be32_to_cpu(indexes[0]); i++) { int index; index = be32_to_cpu(indexes[i + 1]); slot = alloc_slot_struct(dn, index, name, be32_to_cpu(power_domains[i + 1])); if (!slot) return -ENOMEM; slot->type = simple_strtoul(type, NULL, 10); dbg("Found drc-index:0x%x drc-name:%s drc-type:%s\n", index, name, type); retval = rpaphp_enable_slot(slot); if (!retval) retval = rpaphp_register_slot(slot); if (retval) dealloc_slot_struct(slot); name += strlen(name) + 1; type += strlen(type) + 1; } dbg("%s - Exit: rc[%d]\n", __func__, retval); /* XXX FIXME: reports a failure only if last entry in loop failed */ return retval; } /** * rpaphp_add_slot -- declare a hotplug slot to the hotplug subsystem. * @dn: device node of slot * * This subroutine will register a hotpluggable slot with the * PCI hotplug infrastructure. This routine is typically called * during boot time, if the hotplug slots are present at boot time, * or is called later, by the dlpar add code, if the slot is * being dynamically added during runtime. * * If the device node points at an embedded (built-in) slot, this * routine will just return without doing anything, since embedded * slots cannot be hotplugged. * * To remove a slot, it suffices to call rpaphp_deregister_slot(). */ int rpaphp_add_slot(struct device_node *dn) { if (!of_node_name_eq(dn, "pci")) return 0; if (of_property_present(dn, "ibm,drc-info")) return rpaphp_drc_info_add_slot(dn); else return rpaphp_drc_add_slot(dn); } EXPORT_SYMBOL_GPL(rpaphp_add_slot); static void __exit cleanup_slots(void) { struct slot *slot, *next; /* * Unregister all of our slots with the pci_hotplug subsystem, * and free up all memory that we had allocated. */ list_for_each_entry_safe(slot, next, &rpaphp_slot_head, rpaphp_slot_list) { list_del(&slot->rpaphp_slot_list); pci_hp_deregister(&slot->hotplug_slot); dealloc_slot_struct(slot); } } static int __init rpaphp_init(void) { struct device_node *dn; info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); for_each_node_by_name(dn, "pci") rpaphp_add_slot(dn); return 0; } static void __exit rpaphp_exit(void) { cleanup_slots(); } static int enable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = to_slot(hotplug_slot); int state; int retval; if (slot->state == CONFIGURED) return 0; retval = rpaphp_get_sensor_state(slot, &state); if (retval) return retval; if (state == PRESENT) { pseries_eeh_init_edev_recursive(PCI_DN(slot->dn)); pci_lock_rescan_remove(); pci_hp_add_devices(slot->bus); pci_unlock_rescan_remove(); slot->state = CONFIGURED; } else if (state == EMPTY) { slot->state = EMPTY; } else { err("%s: slot[%s] is in invalid state\n", __func__, slot->name); slot->state = NOT_VALID; return -EINVAL; } slot->bus->max_bus_speed = get_max_bus_speed(slot); return 0; } static int disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = to_slot(hotplug_slot); if (slot->state == NOT_CONFIGURED) return -EINVAL; pci_lock_rescan_remove(); pci_hp_remove_devices(slot->bus); pci_unlock_rescan_remove(); vm_unmap_aliases(); slot->state = NOT_CONFIGURED; return 0; } const struct hotplug_slot_ops rpaphp_hotplug_slot_ops = { .enable_slot = enable_slot, .disable_slot = disable_slot, .set_attention_status = set_attention_status, .get_power_status = get_power_status, .get_attention_status = get_attention_status, .get_adapter_status = get_adapter_status, }; module_init(rpaphp_init); module_exit(rpaphp_exit);
linux-master
drivers/pci/hotplug/rpaphp_core.c
// SPDX-License-Identifier: GPL-2.0+ /* * Standard Hot Plug Controller Driver * * Copyright (C) 1995,2001 Compaq Computer Corporation * Copyright (C) 2001 Greg Kroah-Hartman ([email protected]) * Copyright (C) 2001 IBM Corp. * Copyright (C) 2003-2004 Intel Corporation * * All rights reserved. * * Send feedback to <[email protected]>, <[email protected]> * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/pci.h> #include "../pci.h" #include "shpchp.h" static void interrupt_event_handler(struct work_struct *work); static int shpchp_enable_slot(struct slot *p_slot); static int shpchp_disable_slot(struct slot *p_slot); static int queue_interrupt_event(struct slot *p_slot, u32 event_type) { struct event_info *info; info = kmalloc(sizeof(*info), GFP_ATOMIC); if (!info) return -ENOMEM; info->event_type = event_type; info->p_slot = p_slot; INIT_WORK(&info->work, interrupt_event_handler); queue_work(p_slot->wq, &info->work); return 0; } u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl) { struct slot *p_slot; u32 event_type; /* Attention Button Change */ ctrl_dbg(ctrl, "Attention button interrupt received\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); /* * Button pressed - See if need to TAKE ACTION!!! */ ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot)); event_type = INT_BUTTON_PRESS; queue_interrupt_event(p_slot, event_type); return 0; } u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl) { struct slot *p_slot; u8 getstatus; u32 event_type; /* Switch Change */ ctrl_dbg(ctrl, "Switch interrupt received\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); ctrl_dbg(ctrl, "Card present %x Power status %x\n", p_slot->presence_save, p_slot->pwr_save); if (getstatus) { /* * Switch opened */ ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot)); event_type = INT_SWITCH_OPEN; if (p_slot->pwr_save && p_slot->presence_save) { event_type = INT_POWER_FAULT; ctrl_err(ctrl, "Surprise Removal of card\n"); } } else { /* * Switch closed */ ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot)); event_type = INT_SWITCH_CLOSE; } queue_interrupt_event(p_slot, event_type); return 1; } u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl) { struct slot *p_slot; u32 event_type; /* Presence Change */ ctrl_dbg(ctrl, "Presence/Notify input change\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); /* * Save the presence state */ p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); if (p_slot->presence_save) { /* * Card Present */ ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot)); event_type = INT_PRESENCE_ON; } else { /* * Not Present */ ctrl_info(ctrl, "Card not present on Slot(%s)\n", slot_name(p_slot)); event_type = INT_PRESENCE_OFF; } queue_interrupt_event(p_slot, event_type); return 1; } u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl) { struct slot *p_slot; u32 event_type; /* Power fault */ ctrl_dbg(ctrl, "Power fault interrupt received\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); if (!(p_slot->hpc_ops->query_power_fault(p_slot))) { /* * Power fault Cleared */ ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n", slot_name(p_slot)); p_slot->status = 0x00; event_type = INT_POWER_FAULT_CLEAR; } else { /* * Power fault */ ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot)); event_type = INT_POWER_FAULT; /* set power fault status for this board */ p_slot->status = 0xFF; ctrl_info(ctrl, "Power fault bit %x set\n", hp_slot); } queue_interrupt_event(p_slot, event_type); return 1; } /* The following routines constitute the bulk of the hotplug controller logic */ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot, enum pci_bus_speed speed) { int rc = 0; ctrl_dbg(ctrl, "Change speed to %d\n", speed); rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed); if (rc) { ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n", __func__); return WRONG_BUS_FREQUENCY; } return rc; } static int fix_bus_speed(struct controller *ctrl, struct slot *pslot, u8 flag, enum pci_bus_speed asp, enum pci_bus_speed bsp, enum pci_bus_speed msp) { int rc = 0; /* * If other slots on the same bus are occupied, we cannot * change the bus speed. */ if (flag) { if (asp < bsp) { ctrl_err(ctrl, "Speed of bus %x and adapter %x mismatch\n", bsp, asp); rc = WRONG_BUS_FREQUENCY; } return rc; } if (asp < msp) { if (bsp != asp) rc = change_bus_speed(ctrl, pslot, asp); } else { if (bsp != msp) rc = change_bus_speed(ctrl, pslot, msp); } return rc; } /** * board_added - Called after a board has been added to the system. * @p_slot: target &slot * * Turns power on for the board. * Configures board. */ static int board_added(struct slot *p_slot) { u8 hp_slot; u8 slots_not_empty = 0; int rc = 0; enum pci_bus_speed asp, bsp, msp; struct controller *ctrl = p_slot->ctrl; struct pci_bus *parent = ctrl->pci_dev->subordinate; hp_slot = p_slot->device - ctrl->slot_device_offset; ctrl_dbg(ctrl, "%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n", __func__, p_slot->device, ctrl->slot_device_offset, hp_slot); /* Power on slot without connecting to bus */ rc = p_slot->hpc_ops->power_on_slot(p_slot); if (rc) { ctrl_err(ctrl, "Failed to power on slot\n"); return -1; } if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) { rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz); if (rc) { ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n", __func__); return WRONG_BUS_FREQUENCY; } /* turn on board, blink green LED, turn off Amber LED */ rc = p_slot->hpc_ops->slot_enable(p_slot); if (rc) { ctrl_err(ctrl, "Issue of Slot Enable command failed\n"); return rc; } } rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp); if (rc) { ctrl_err(ctrl, "Can't get adapter speed or bus mode mismatch\n"); return WRONG_BUS_FREQUENCY; } bsp = ctrl->pci_dev->subordinate->cur_bus_speed; msp = ctrl->pci_dev->subordinate->max_bus_speed; /* Check if there are other slots or devices on the same bus */ if (!list_empty(&ctrl->pci_dev->subordinate->devices)) slots_not_empty = 1; ctrl_dbg(ctrl, "%s: slots_not_empty %d, adapter_speed %d, bus_speed %d, max_bus_speed %d\n", __func__, slots_not_empty, asp, bsp, msp); rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, asp, bsp, msp); if (rc) return rc; /* turn on board, blink green LED, turn off Amber LED */ rc = p_slot->hpc_ops->slot_enable(p_slot); if (rc) { ctrl_err(ctrl, "Issue of Slot Enable command failed\n"); return rc; } /* Wait for ~1 second */ msleep(1000); ctrl_dbg(ctrl, "%s: slot status = %x\n", __func__, p_slot->status); /* Check for a power fault */ if (p_slot->status == 0xFF) { /* power fault occurred, but it was benign */ ctrl_dbg(ctrl, "%s: Power fault\n", __func__); p_slot->status = 0; goto err_exit; } if (shpchp_configure_device(p_slot)) { ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n", pci_domain_nr(parent), p_slot->bus, p_slot->device); goto err_exit; } p_slot->status = 0; p_slot->is_a_board = 0x01; p_slot->pwr_save = 1; p_slot->hpc_ops->green_led_on(p_slot); return 0; err_exit: /* turn off slot, turn on Amber LED, turn off Green LED */ rc = p_slot->hpc_ops->slot_disable(p_slot); if (rc) { ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n", __func__); return rc; } return(rc); } /** * remove_board - Turns off slot and LEDs * @p_slot: target &slot */ static int remove_board(struct slot *p_slot) { struct controller *ctrl = p_slot->ctrl; u8 hp_slot; int rc; shpchp_unconfigure_device(p_slot); hp_slot = p_slot->device - ctrl->slot_device_offset; p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, hp_slot); /* Change status to shutdown */ if (p_slot->is_a_board) p_slot->status = 0x01; /* turn off slot, turn on Amber LED, turn off Green LED */ rc = p_slot->hpc_ops->slot_disable(p_slot); if (rc) { ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n", __func__); return rc; } rc = p_slot->hpc_ops->set_attention_status(p_slot, 0); if (rc) { ctrl_err(ctrl, "Issue of Set Attention command failed\n"); return rc; } p_slot->pwr_save = 0; p_slot->is_a_board = 0; return 0; } struct pushbutton_work_info { struct slot *p_slot; struct work_struct work; }; /** * shpchp_pushbutton_thread - handle pushbutton events * @work: &struct work_struct to be handled * * Scheduled procedure to handle blocking stuff for the pushbuttons. * Handles all pending events and exits. */ static void shpchp_pushbutton_thread(struct work_struct *work) { struct pushbutton_work_info *info = container_of(work, struct pushbutton_work_info, work); struct slot *p_slot = info->p_slot; mutex_lock(&p_slot->lock); switch (p_slot->state) { case POWEROFF_STATE: mutex_unlock(&p_slot->lock); shpchp_disable_slot(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; case POWERON_STATE: mutex_unlock(&p_slot->lock); if (shpchp_enable_slot(p_slot)) p_slot->hpc_ops->green_led_off(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; default: break; } mutex_unlock(&p_slot->lock); kfree(info); } void shpchp_queue_pushbutton_work(struct work_struct *work) { struct slot *p_slot = container_of(work, struct slot, work.work); struct pushbutton_work_info *info; info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n", __func__); return; } info->p_slot = p_slot; INIT_WORK(&info->work, shpchp_pushbutton_thread); mutex_lock(&p_slot->lock); switch (p_slot->state) { case BLINKINGOFF_STATE: p_slot->state = POWEROFF_STATE; break; case BLINKINGON_STATE: p_slot->state = POWERON_STATE; break; default: kfree(info); goto out; } queue_work(p_slot->wq, &info->work); out: mutex_unlock(&p_slot->lock); } static void update_slot_info(struct slot *slot) { slot->hpc_ops->get_power_status(slot, &slot->pwr_save); slot->hpc_ops->get_attention_status(slot, &slot->attention_save); slot->hpc_ops->get_latch_status(slot, &slot->latch_save); slot->hpc_ops->get_adapter_status(slot, &slot->presence_save); } /* * Note: This function must be called with slot->lock held */ static void handle_button_press_event(struct slot *p_slot) { u8 getstatus; struct controller *ctrl = p_slot->ctrl; switch (p_slot->state) { case STATIC_STATE: p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (getstatus) { p_slot->state = BLINKINGOFF_STATE; ctrl_info(ctrl, "PCI slot #%s - powering off due to button press\n", slot_name(p_slot)); } else { p_slot->state = BLINKINGON_STATE; ctrl_info(ctrl, "PCI slot #%s - powering on due to button press\n", slot_name(p_slot)); } /* blink green LED and turn off amber */ p_slot->hpc_ops->green_led_blink(p_slot); p_slot->hpc_ops->set_attention_status(p_slot, 0); queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); break; case BLINKINGOFF_STATE: case BLINKINGON_STATE: /* * Cancel if we are still blinking; this means that we * press the attention again before the 5 sec. limit * expires to cancel hot-add or hot-remove */ ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot)); cancel_delayed_work(&p_slot->work); if (p_slot->state == BLINKINGOFF_STATE) p_slot->hpc_ops->green_led_on(p_slot); else p_slot->hpc_ops->green_led_off(p_slot); p_slot->hpc_ops->set_attention_status(p_slot, 0); ctrl_info(ctrl, "PCI slot #%s - action canceled due to button press\n", slot_name(p_slot)); p_slot->state = STATIC_STATE; break; case POWEROFF_STATE: case POWERON_STATE: /* * Ignore if the slot is on power-on or power-off state; * this means that the previous attention button action * to hot-add or hot-remove is undergoing */ ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot)); update_slot_info(p_slot); break; default: ctrl_warn(ctrl, "Not a valid state\n"); break; } } static void interrupt_event_handler(struct work_struct *work) { struct event_info *info = container_of(work, struct event_info, work); struct slot *p_slot = info->p_slot; mutex_lock(&p_slot->lock); switch (info->event_type) { case INT_BUTTON_PRESS: handle_button_press_event(p_slot); break; case INT_POWER_FAULT: ctrl_dbg(p_slot->ctrl, "%s: Power fault\n", __func__); p_slot->hpc_ops->set_attention_status(p_slot, 1); p_slot->hpc_ops->green_led_off(p_slot); break; default: update_slot_info(p_slot); break; } mutex_unlock(&p_slot->lock); kfree(info); } static int shpchp_enable_slot (struct slot *p_slot) { u8 getstatus = 0; int rc, retval = -ENODEV; struct controller *ctrl = p_slot->ctrl; /* Check to see if (latch closed, card present, power off) */ mutex_lock(&p_slot->ctrl->crit_sect); rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); goto out; } rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot)); goto out; } rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Already enabled on slot(%s)\n", slot_name(p_slot)); goto out; } p_slot->is_a_board = 1; /* We have to save the presence info for these slots */ p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save)); ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if ((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD && p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458) && p_slot->ctrl->num_slots == 1) { /* handle AMD POGO errata; this must be done before enable */ amd_pogo_errata_save_misc_reg(p_slot); retval = board_added(p_slot); /* handle AMD POGO errata; this must be done after enable */ amd_pogo_errata_restore_misc_reg(p_slot); } else retval = board_added(p_slot); if (retval) { p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); } update_slot_info(p_slot); out: mutex_unlock(&p_slot->ctrl->crit_sect); return retval; } static int shpchp_disable_slot (struct slot *p_slot) { u8 getstatus = 0; int rc, retval = -ENODEV; struct controller *ctrl = p_slot->ctrl; if (!p_slot->ctrl) return -ENODEV; /* Check to see if (latch closed, card present, power on) */ mutex_lock(&p_slot->ctrl->crit_sect); rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); goto out; } rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (rc || getstatus) { ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot)); goto out; } rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (rc || !getstatus) { ctrl_info(ctrl, "Already disabled on slot(%s)\n", slot_name(p_slot)); goto out; } retval = remove_board(p_slot); update_slot_info(p_slot); out: mutex_unlock(&p_slot->ctrl->crit_sect); return retval; } int shpchp_sysfs_enable_slot(struct slot *p_slot) { int retval = -ENODEV; struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (p_slot->state) { case BLINKINGON_STATE: cancel_delayed_work(&p_slot->work); fallthrough; case STATIC_STATE: p_slot->state = POWERON_STATE; mutex_unlock(&p_slot->lock); retval = shpchp_enable_slot(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; case POWERON_STATE: ctrl_info(ctrl, "Slot %s is already in powering on state\n", slot_name(p_slot)); break; case BLINKINGOFF_STATE: case POWEROFF_STATE: ctrl_info(ctrl, "Already enabled on slot %s\n", slot_name(p_slot)); break; default: ctrl_err(ctrl, "Not a valid state on slot %s\n", slot_name(p_slot)); break; } mutex_unlock(&p_slot->lock); return retval; } int shpchp_sysfs_disable_slot(struct slot *p_slot) { int retval = -ENODEV; struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (p_slot->state) { case BLINKINGOFF_STATE: cancel_delayed_work(&p_slot->work); fallthrough; case STATIC_STATE: p_slot->state = POWEROFF_STATE; mutex_unlock(&p_slot->lock); retval = shpchp_disable_slot(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; break; case POWEROFF_STATE: ctrl_info(ctrl, "Slot %s is already in powering off state\n", slot_name(p_slot)); break; case BLINKINGON_STATE: case POWERON_STATE: ctrl_info(ctrl, "Already disabled on slot %s\n", slot_name(p_slot)); break; default: ctrl_err(ctrl, "Not a valid state on slot %s\n", slot_name(p_slot)); break; } mutex_unlock(&p_slot->lock); return retval; }
linux-master
drivers/pci/hotplug/shpchp_ctrl.c
// SPDX-License-Identifier: GPL-2.0+ /* * cpcihp_zt5550.c * * Intel/Ziatech ZT5550 CompactPCI Host Controller driver * * Copyright 2002 SOMA Networks, Inc. * Copyright 2001 Intel San Luis Obispo * Copyright 2000,2001 MontaVista Software Inc. * * Send feedback to <[email protected]> */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/signal.h> /* IRQF_SHARED */ #include "cpci_hotplug.h" #include "cpcihp_zt5550.h" #define DRIVER_VERSION "0.2" #define DRIVER_AUTHOR "Scott Murray <[email protected]>" #define DRIVER_DESC "ZT5550 CompactPCI Hot Plug Driver" #define MY_NAME "cpcihp_zt5550" #define dbg(format, arg...) \ do { \ if (debug) \ printk(KERN_DEBUG "%s: " format "\n", \ MY_NAME, ## arg); \ } while (0) #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME, ## arg) #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME, ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME, ## arg) /* local variables */ static bool debug; static bool poll; static struct cpci_hp_controller_ops zt5550_hpc_ops; static struct cpci_hp_controller zt5550_hpc; /* Primary cPCI bus bridge device */ static struct pci_dev *bus0_dev; static struct pci_bus *bus0; /* Host controller device */ static struct pci_dev *hc_dev; /* Host controller register addresses */ static void __iomem *hc_registers; static void __iomem *csr_hc_index; static void __iomem *csr_hc_data; static void __iomem *csr_int_status; static void __iomem *csr_int_mask; static int zt5550_hc_config(struct pci_dev *pdev) { int ret; /* Since we know that no boards exist with two HC chips, treat it as an error */ if (hc_dev) { err("too many host controller devices?"); return -EBUSY; } ret = pci_enable_device(pdev); if (ret) { err("cannot enable %s\n", pci_name(pdev)); return ret; } hc_dev = pdev; dbg("hc_dev = %p", hc_dev); dbg("pci resource start %llx", (unsigned long long)pci_resource_start(hc_dev, 1)); dbg("pci resource len %llx", (unsigned long long)pci_resource_len(hc_dev, 1)); if (!request_mem_region(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1), MY_NAME)) { err("cannot reserve MMIO region"); ret = -ENOMEM; goto exit_disable_device; } hc_registers = ioremap(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1)); if (!hc_registers) { err("cannot remap MMIO region %llx @ %llx", (unsigned long long)pci_resource_len(hc_dev, 1), (unsigned long long)pci_resource_start(hc_dev, 1)); ret = -ENODEV; goto exit_release_region; } csr_hc_index = hc_registers + CSR_HCINDEX; csr_hc_data = hc_registers + CSR_HCDATA; csr_int_status = hc_registers + CSR_INTSTAT; csr_int_mask = hc_registers + CSR_INTMASK; /* * Disable host control, fault and serial interrupts */ dbg("disabling host control, fault and serial interrupts"); writeb((u8) HC_INT_MASK_REG, csr_hc_index); writeb((u8) ALL_INDEXED_INTS_MASK, csr_hc_data); dbg("disabled host control, fault and serial interrupts"); /* * Disable timer0, timer1 and ENUM interrupts */ dbg("disabling timer0, timer1 and ENUM interrupts"); writeb((u8) ALL_DIRECT_INTS_MASK, csr_int_mask); dbg("disabled timer0, timer1 and ENUM interrupts"); return 0; exit_release_region: release_mem_region(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1)); exit_disable_device: pci_disable_device(hc_dev); return ret; } static int zt5550_hc_cleanup(void) { if (!hc_dev) return -ENODEV; iounmap(hc_registers); release_mem_region(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1)); pci_disable_device(hc_dev); return 0; } static int zt5550_hc_query_enum(void) { u8 value; value = inb_p(ENUM_PORT); return ((value & ENUM_MASK) == ENUM_MASK); } static int zt5550_hc_check_irq(void *dev_id) { int ret; u8 reg; ret = 0; if (dev_id == zt5550_hpc.dev_id) { reg = readb(csr_int_status); if (reg) ret = 1; } return ret; } static int zt5550_hc_enable_irq(void) { u8 reg; if (hc_dev == NULL) return -ENODEV; reg = readb(csr_int_mask); reg = reg & ~ENUM_INT_MASK; writeb(reg, csr_int_mask); return 0; } static int zt5550_hc_disable_irq(void) { u8 reg; if (hc_dev == NULL) return -ENODEV; reg = readb(csr_int_mask); reg = reg | ENUM_INT_MASK; writeb(reg, csr_int_mask); return 0; } static int zt5550_hc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int status; status = zt5550_hc_config(pdev); if (status != 0) return status; dbg("returned from zt5550_hc_config"); memset(&zt5550_hpc, 0, sizeof(struct cpci_hp_controller)); zt5550_hpc_ops.query_enum = zt5550_hc_query_enum; zt5550_hpc.ops = &zt5550_hpc_ops; if (!poll) { zt5550_hpc.irq = hc_dev->irq; zt5550_hpc.irq_flags = IRQF_SHARED; zt5550_hpc.dev_id = hc_dev; zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq; zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq; zt5550_hpc_ops.check_irq = zt5550_hc_check_irq; } else { info("using ENUM# polling mode"); } status = cpci_hp_register_controller(&zt5550_hpc); if (status != 0) { err("could not register cPCI hotplug controller"); goto init_hc_error; } dbg("registered controller"); /* Look for first device matching cPCI bus's bridge vendor and device IDs */ bus0_dev = pci_get_device(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21154, NULL); if (!bus0_dev) { status = -ENODEV; goto init_register_error; } bus0 = bus0_dev->subordinate; pci_dev_put(bus0_dev); status = cpci_hp_register_bus(bus0, 0x0a, 0x0f); if (status != 0) { err("could not register cPCI hotplug bus"); goto init_register_error; } dbg("registered bus"); status = cpci_hp_start(); if (status != 0) { err("could not started cPCI hotplug system"); cpci_hp_unregister_bus(bus0); goto init_register_error; } dbg("started cpci hp system"); return 0; init_register_error: cpci_hp_unregister_controller(&zt5550_hpc); init_hc_error: err("status = %d", status); zt5550_hc_cleanup(); return status; } static void zt5550_hc_remove_one(struct pci_dev *pdev) { cpci_hp_stop(); cpci_hp_unregister_bus(bus0); cpci_hp_unregister_controller(&zt5550_hpc); zt5550_hc_cleanup(); } static const struct pci_device_id zt5550_hc_pci_tbl[] = { { PCI_VENDOR_ID_ZIATECH, PCI_DEVICE_ID_ZIATECH_5550_HC, PCI_ANY_ID, PCI_ANY_ID, }, { 0, } }; MODULE_DEVICE_TABLE(pci, zt5550_hc_pci_tbl); static struct pci_driver zt5550_hc_driver = { .name = "zt5550_hc", .id_table = zt5550_hc_pci_tbl, .probe = zt5550_hc_init_one, .remove = zt5550_hc_remove_one, }; static int __init zt5550_init(void) { struct resource *r; int rc; info(DRIVER_DESC " version: " DRIVER_VERSION); r = request_region(ENUM_PORT, 1, "#ENUM hotswap signal register"); if (!r) return -EBUSY; rc = pci_register_driver(&zt5550_hc_driver); if (rc < 0) release_region(ENUM_PORT, 1); return rc; } static void __exit zt5550_exit(void) { pci_unregister_driver(&zt5550_hc_driver); release_region(ENUM_PORT, 1); } module_init(zt5550_init); module_exit(zt5550_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, 0644); MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); module_param(poll, bool, 0644); MODULE_PARM_DESC(poll, "#ENUM polling mode enabled or not");
linux-master
drivers/pci/hotplug/cpcihp_zt5550.c