python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/utsname.h>
#include <linux/version.h>
#include <rdma/ib_user_verbs.h>
#include "efa.h"
#define PCI_DEV_ID_EFA0_VF 0xefa0
#define PCI_DEV_ID_EFA1_VF 0xefa1
#define PCI_DEV_ID_EFA2_VF 0xefa2
static const struct pci_device_id efa_pci_tbl[] = {
{ PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) },
{ PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) },
{ PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA2_VF) },
{ }
};
MODULE_AUTHOR("Amazon.com, Inc. or its affiliates");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION(DEVICE_NAME);
MODULE_DEVICE_TABLE(pci, efa_pci_tbl);
#define EFA_REG_BAR 0
#define EFA_MEM_BAR 2
#define EFA_BASE_BAR_MASK (BIT(EFA_REG_BAR) | BIT(EFA_MEM_BAR))
#define EFA_AENQ_ENABLED_GROUPS \
(BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \
BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE))
/* This handler will called for unknown event group or unimplemented handlers */
static void unimplemented_aenq_handler(void *data,
struct efa_admin_aenq_entry *aenq_e)
{
struct efa_dev *dev = (struct efa_dev *)data;
ibdev_err(&dev->ibdev,
"Unknown event was received or event with unimplemented handler\n");
}
static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e)
{
struct efa_dev *dev = (struct efa_dev *)data;
atomic64_inc(&dev->stats.keep_alive_rcvd);
}
static struct efa_aenq_handlers aenq_handlers = {
.handlers = {
[EFA_ADMIN_KEEP_ALIVE] = efa_keep_alive,
},
.unimplemented_handler = unimplemented_aenq_handler
};
static void efa_release_bars(struct efa_dev *dev, int bars_mask)
{
struct pci_dev *pdev = dev->pdev;
int release_bars;
release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & bars_mask;
pci_release_selected_regions(pdev, release_bars);
}
static void efa_process_comp_eqe(struct efa_dev *dev, struct efa_admin_eqe *eqe)
{
u16 cqn = eqe->u.comp_event.cqn;
struct efa_cq *cq;
/* Safe to load as we're in irq and removal calls synchronize_irq() */
cq = xa_load(&dev->cqs_xa, cqn);
if (unlikely(!cq)) {
ibdev_err_ratelimited(&dev->ibdev,
"Completion event on non-existent CQ[%u]",
cqn);
return;
}
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
static void efa_process_eqe(struct efa_com_eq *eeq, struct efa_admin_eqe *eqe)
{
struct efa_dev *dev = container_of(eeq->edev, struct efa_dev, edev);
if (likely(EFA_GET(&eqe->common, EFA_ADMIN_EQE_EVENT_TYPE) ==
EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION))
efa_process_comp_eqe(dev, eqe);
else
ibdev_err_ratelimited(&dev->ibdev,
"Unknown event type received %lu",
EFA_GET(&eqe->common,
EFA_ADMIN_EQE_EVENT_TYPE));
}
static irqreturn_t efa_intr_msix_comp(int irq, void *data)
{
struct efa_eq *eq = data;
struct efa_com_dev *edev = eq->eeq.edev;
efa_com_eq_comp_intr_handler(edev, &eq->eeq);
return IRQ_HANDLED;
}
static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
{
struct efa_dev *dev = data;
efa_com_admin_q_comp_intr_handler(&dev->edev);
efa_com_aenq_intr_handler(&dev->edev, data);
return IRQ_HANDLED;
}
static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq)
{
int err;
err = request_irq(irq->irqn, irq->handler, 0, irq->name, irq->data);
if (err) {
dev_err(&dev->pdev->dev, "Failed to request irq %s (%d)\n",
irq->name, err);
return err;
}
irq_set_affinity_hint(irq->irqn, &irq->affinity_hint_mask);
return 0;
}
static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq,
int vector)
{
u32 cpu;
cpu = vector - EFA_COMP_EQS_VEC_BASE;
snprintf(eq->irq.name, EFA_IRQNAME_SIZE, "efa-comp%d@pci:%s", cpu,
pci_name(dev->pdev));
eq->irq.handler = efa_intr_msix_comp;
eq->irq.data = eq;
eq->irq.vector = vector;
eq->irq.irqn = pci_irq_vector(dev->pdev, vector);
cpumask_set_cpu(cpu, &eq->irq.affinity_hint_mask);
}
static void efa_free_irq(struct efa_dev *dev, struct efa_irq *irq)
{
irq_set_affinity_hint(irq->irqn, NULL);
free_irq(irq->irqn, irq->data);
}
static void efa_setup_mgmnt_irq(struct efa_dev *dev)
{
u32 cpu;
snprintf(dev->admin_irq.name, EFA_IRQNAME_SIZE,
"efa-mgmnt@pci:%s", pci_name(dev->pdev));
dev->admin_irq.handler = efa_intr_msix_mgmnt;
dev->admin_irq.data = dev;
dev->admin_irq.vector = dev->admin_msix_vector_idx;
dev->admin_irq.irqn = pci_irq_vector(dev->pdev,
dev->admin_msix_vector_idx);
cpu = cpumask_first(cpu_online_mask);
cpumask_set_cpu(cpu,
&dev->admin_irq.affinity_hint_mask);
dev_info(&dev->pdev->dev, "Setup irq:%d name:%s\n",
dev->admin_irq.irqn,
dev->admin_irq.name);
}
static int efa_set_mgmnt_irq(struct efa_dev *dev)
{
efa_setup_mgmnt_irq(dev);
return efa_request_irq(dev, &dev->admin_irq);
}
static int efa_request_doorbell_bar(struct efa_dev *dev)
{
u8 db_bar_idx = dev->dev_attr.db_bar;
struct pci_dev *pdev = dev->pdev;
int bars;
int err;
if (!(BIT(db_bar_idx) & EFA_BASE_BAR_MASK)) {
bars = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(db_bar_idx);
err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
if (err) {
dev_err(&dev->pdev->dev,
"pci_request_selected_regions for bar %d failed %d\n",
db_bar_idx, err);
return err;
}
}
dev->db_bar_addr = pci_resource_start(dev->pdev, db_bar_idx);
dev->db_bar_len = pci_resource_len(dev->pdev, db_bar_idx);
return 0;
}
static void efa_release_doorbell_bar(struct efa_dev *dev)
{
if (!(BIT(dev->dev_attr.db_bar) & EFA_BASE_BAR_MASK))
efa_release_bars(dev, BIT(dev->dev_attr.db_bar));
}
static void efa_update_hw_hints(struct efa_dev *dev,
struct efa_com_get_hw_hints_result *hw_hints)
{
struct efa_com_dev *edev = &dev->edev;
if (hw_hints->mmio_read_timeout)
edev->mmio_read.mmio_read_timeout =
hw_hints->mmio_read_timeout * 1000;
if (hw_hints->poll_interval)
edev->aq.poll_interval = hw_hints->poll_interval;
if (hw_hints->admin_completion_timeout)
edev->aq.completion_timeout =
hw_hints->admin_completion_timeout;
}
static void efa_stats_init(struct efa_dev *dev)
{
atomic64_t *s = (atomic64_t *)&dev->stats;
int i;
for (i = 0; i < sizeof(dev->stats) / sizeof(*s); i++, s++)
atomic64_set(s, 0);
}
static void efa_set_host_info(struct efa_dev *dev)
{
struct efa_admin_set_feature_resp resp = {};
struct efa_admin_set_feature_cmd cmd = {};
struct efa_admin_host_info *hinf;
u32 bufsz = sizeof(*hinf);
dma_addr_t hinf_dma;
if (!efa_com_check_supported_feature_id(&dev->edev,
EFA_ADMIN_HOST_INFO))
return;
/* Failures in host info set shall not disturb probe */
hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma,
GFP_KERNEL);
if (!hinf)
return;
strscpy(hinf->os_dist_str, utsname()->release,
sizeof(hinf->os_dist_str));
hinf->os_type = EFA_ADMIN_OS_LINUX;
strscpy(hinf->kernel_ver_str, utsname()->version,
sizeof(hinf->kernel_ver_str));
hinf->kernel_ver = LINUX_VERSION_CODE;
EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0);
EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0);
EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0);
EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0);
EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number);
EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE,
PCI_SLOT(dev->pdev->devfn));
EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION,
PCI_FUNC(dev->pdev->devfn));
EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR,
EFA_COMMON_SPEC_VERSION_MAJOR);
EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR,
EFA_COMMON_SPEC_VERSION_MINOR);
EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1);
EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0);
efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO,
hinf_dma, bufsz);
dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
}
static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq)
{
efa_com_eq_destroy(&dev->edev, &eq->eeq);
efa_free_irq(dev, &eq->irq);
}
static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec)
{
int err;
efa_setup_comp_irq(dev, eq, msix_vec);
err = efa_request_irq(dev, &eq->irq);
if (err)
return err;
err = efa_com_eq_init(&dev->edev, &eq->eeq, efa_process_eqe,
dev->dev_attr.max_eq_depth, msix_vec);
if (err)
goto err_free_comp_irq;
return 0;
err_free_comp_irq:
efa_free_irq(dev, &eq->irq);
return err;
}
static int efa_create_eqs(struct efa_dev *dev)
{
unsigned int neqs = dev->dev_attr.max_eq;
int err;
int i;
neqs = min_t(unsigned int, neqs, num_online_cpus());
dev->neqs = neqs;
dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
if (!dev->eqs)
return -ENOMEM;
for (i = 0; i < neqs; i++) {
err = efa_create_eq(dev, &dev->eqs[i],
i + EFA_COMP_EQS_VEC_BASE);
if (err)
goto err_destroy_eqs;
}
return 0;
err_destroy_eqs:
for (i--; i >= 0; i--)
efa_destroy_eq(dev, &dev->eqs[i]);
kfree(dev->eqs);
return err;
}
static void efa_destroy_eqs(struct efa_dev *dev)
{
int i;
for (i = 0; i < dev->neqs; i++)
efa_destroy_eq(dev, &dev->eqs[i]);
kfree(dev->eqs);
}
static const struct ib_device_ops efa_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_EFA,
.uverbs_abi_ver = EFA_UVERBS_ABI_VERSION,
.alloc_hw_port_stats = efa_alloc_hw_port_stats,
.alloc_hw_device_stats = efa_alloc_hw_device_stats,
.alloc_pd = efa_alloc_pd,
.alloc_ucontext = efa_alloc_ucontext,
.create_cq = efa_create_cq,
.create_qp = efa_create_qp,
.create_user_ah = efa_create_ah,
.dealloc_pd = efa_dealloc_pd,
.dealloc_ucontext = efa_dealloc_ucontext,
.dereg_mr = efa_dereg_mr,
.destroy_ah = efa_destroy_ah,
.destroy_cq = efa_destroy_cq,
.destroy_qp = efa_destroy_qp,
.get_hw_stats = efa_get_hw_stats,
.get_link_layer = efa_port_link_layer,
.get_port_immutable = efa_get_port_immutable,
.mmap = efa_mmap,
.mmap_free = efa_mmap_free,
.modify_qp = efa_modify_qp,
.query_device = efa_query_device,
.query_gid = efa_query_gid,
.query_pkey = efa_query_pkey,
.query_port = efa_query_port,
.query_qp = efa_query_qp,
.reg_user_mr = efa_reg_mr,
.reg_user_mr_dmabuf = efa_reg_user_mr_dmabuf,
INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd),
INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp),
INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext),
};
static int efa_ib_device_add(struct efa_dev *dev)
{
struct efa_com_get_hw_hints_result hw_hints;
struct pci_dev *pdev = dev->pdev;
int err;
efa_stats_init(dev);
err = efa_com_get_device_attr(&dev->edev, &dev->dev_attr);
if (err)
return err;
dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar);
err = efa_request_doorbell_bar(dev);
if (err)
return err;
err = efa_com_get_hw_hints(&dev->edev, &hw_hints);
if (err)
goto err_release_doorbell_bar;
efa_update_hw_hints(dev, &hw_hints);
/* Try to enable all the available aenq groups */
err = efa_com_set_aenq_config(&dev->edev, EFA_AENQ_ENABLED_GROUPS);
if (err)
goto err_release_doorbell_bar;
err = efa_create_eqs(dev);
if (err)
goto err_release_doorbell_bar;
efa_set_host_info(dev);
dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = dev->neqs ?: 1;
dev->ibdev.dev.parent = &pdev->dev;
ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
if (err)
goto err_destroy_eqs;
ibdev_info(&dev->ibdev, "IB device registered\n");
return 0;
err_destroy_eqs:
efa_destroy_eqs(dev);
err_release_doorbell_bar:
efa_release_doorbell_bar(dev);
return err;
}
static void efa_ib_device_remove(struct efa_dev *dev)
{
ibdev_info(&dev->ibdev, "Unregister ib device\n");
ib_unregister_device(&dev->ibdev);
efa_destroy_eqs(dev);
efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
efa_release_doorbell_bar(dev);
}
static void efa_disable_msix(struct efa_dev *dev)
{
pci_free_irq_vectors(dev->pdev);
}
static int efa_enable_msix(struct efa_dev *dev)
{
int msix_vecs, irq_num;
/*
* Reserve the max msix vectors we might need, one vector is reserved
* for admin.
*/
msix_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
num_online_cpus() + 1);
dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
msix_vecs);
dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs,
msix_vecs, PCI_IRQ_MSIX);
if (irq_num < 0) {
dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n",
irq_num);
return -ENOSPC;
}
if (irq_num != msix_vecs) {
efa_disable_msix(dev);
dev_err(&dev->pdev->dev,
"Allocated %d MSI-X (out of %d requested)\n",
irq_num, msix_vecs);
return -ENOSPC;
}
return 0;
}
static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev)
{
int dma_width;
int err;
err = efa_com_dev_reset(edev, EFA_REGS_RESET_NORMAL);
if (err)
return err;
err = efa_com_validate_version(edev);
if (err)
return err;
dma_width = efa_com_get_dma_width(edev);
if (dma_width < 0) {
err = dma_width;
return err;
}
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(dma_width));
if (err) {
dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n", err);
return err;
}
dma_set_max_seg_size(&pdev->dev, UINT_MAX);
return 0;
}
static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
{
struct efa_com_dev *edev;
struct efa_dev *dev;
int bars;
int err;
err = pci_enable_device_mem(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n");
return ERR_PTR(err);
}
pci_set_master(pdev);
dev = ib_alloc_device(efa_dev, ibdev);
if (!dev) {
dev_err(&pdev->dev, "Device alloc failed\n");
err = -ENOMEM;
goto err_disable_device;
}
pci_set_drvdata(pdev, dev);
edev = &dev->edev;
edev->efa_dev = dev;
edev->dmadev = &pdev->dev;
dev->pdev = pdev;
xa_init(&dev->cqs_xa);
bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK;
err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
if (err) {
dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n",
err);
goto err_ibdev_destroy;
}
dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR);
dev->reg_bar_len = pci_resource_len(pdev, EFA_REG_BAR);
dev->mem_bar_addr = pci_resource_start(pdev, EFA_MEM_BAR);
dev->mem_bar_len = pci_resource_len(pdev, EFA_MEM_BAR);
edev->reg_bar = devm_ioremap(&pdev->dev,
dev->reg_bar_addr,
dev->reg_bar_len);
if (!edev->reg_bar) {
dev_err(&pdev->dev, "Failed to remap register bar\n");
err = -EFAULT;
goto err_release_bars;
}
err = efa_com_mmio_reg_read_init(edev);
if (err) {
dev_err(&pdev->dev, "Failed to init readless MMIO\n");
goto err_iounmap;
}
err = efa_device_init(edev, pdev);
if (err) {
dev_err(&pdev->dev, "EFA device init failed\n");
if (err == -ETIME)
err = -EPROBE_DEFER;
goto err_reg_read_destroy;
}
err = efa_enable_msix(dev);
if (err)
goto err_reg_read_destroy;
edev->aq.msix_vector_idx = dev->admin_msix_vector_idx;
edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx;
err = efa_set_mgmnt_irq(dev);
if (err)
goto err_disable_msix;
err = efa_com_admin_init(edev, &aenq_handlers);
if (err)
goto err_free_mgmnt_irq;
return dev;
err_free_mgmnt_irq:
efa_free_irq(dev, &dev->admin_irq);
err_disable_msix:
efa_disable_msix(dev);
err_reg_read_destroy:
efa_com_mmio_reg_read_destroy(edev);
err_iounmap:
devm_iounmap(&pdev->dev, edev->reg_bar);
err_release_bars:
efa_release_bars(dev, EFA_BASE_BAR_MASK);
err_ibdev_destroy:
ib_dealloc_device(&dev->ibdev);
err_disable_device:
pci_disable_device(pdev);
return ERR_PTR(err);
}
static void efa_remove_device(struct pci_dev *pdev)
{
struct efa_dev *dev = pci_get_drvdata(pdev);
struct efa_com_dev *edev;
edev = &dev->edev;
efa_com_admin_destroy(edev);
efa_free_irq(dev, &dev->admin_irq);
efa_disable_msix(dev);
efa_com_mmio_reg_read_destroy(edev);
devm_iounmap(&pdev->dev, edev->reg_bar);
efa_release_bars(dev, EFA_BASE_BAR_MASK);
xa_destroy(&dev->cqs_xa);
ib_dealloc_device(&dev->ibdev);
pci_disable_device(pdev);
}
static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct efa_dev *dev;
int err;
dev = efa_probe_device(pdev);
if (IS_ERR(dev))
return PTR_ERR(dev);
err = efa_ib_device_add(dev);
if (err)
goto err_remove_device;
return 0;
err_remove_device:
efa_remove_device(pdev);
return err;
}
static void efa_remove(struct pci_dev *pdev)
{
struct efa_dev *dev = pci_get_drvdata(pdev);
efa_ib_device_remove(dev);
efa_remove_device(pdev);
}
static struct pci_driver efa_pci_driver = {
.name = DRV_MODULE_NAME,
.id_table = efa_pci_tbl,
.probe = efa_probe,
.remove = efa_remove,
};
module_pci_driver(efa_pci_driver);
| linux-master | drivers/infiniband/hw/efa/efa_main.c |
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
* Copyright 2018-2023 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
#include "efa_com_cmd.h"
int efa_com_create_qp(struct efa_com_dev *edev,
struct efa_com_create_qp_params *params,
struct efa_com_create_qp_result *res)
{
struct efa_admin_create_qp_cmd create_qp_cmd = {};
struct efa_admin_create_qp_resp cmd_completion;
struct efa_com_admin_queue *aq = &edev->aq;
int err;
create_qp_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_QP;
create_qp_cmd.pd = params->pd;
create_qp_cmd.qp_type = params->qp_type;
create_qp_cmd.rq_base_addr = params->rq_base_addr;
create_qp_cmd.send_cq_idx = params->send_cq_idx;
create_qp_cmd.recv_cq_idx = params->recv_cq_idx;
create_qp_cmd.qp_alloc_size.send_queue_ring_size =
params->sq_ring_size_in_bytes;
create_qp_cmd.qp_alloc_size.send_queue_depth =
params->sq_depth;
create_qp_cmd.qp_alloc_size.recv_queue_ring_size =
params->rq_ring_size_in_bytes;
create_qp_cmd.qp_alloc_size.recv_queue_depth =
params->rq_depth;
create_qp_cmd.uar = params->uarn;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&create_qp_cmd,
sizeof(create_qp_cmd),
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to create qp [%d]\n", err);
return err;
}
res->qp_handle = cmd_completion.qp_handle;
res->qp_num = cmd_completion.qp_num;
res->sq_db_offset = cmd_completion.sq_db_offset;
res->rq_db_offset = cmd_completion.rq_db_offset;
res->llq_descriptors_offset = cmd_completion.llq_descriptors_offset;
res->send_sub_cq_idx = cmd_completion.send_sub_cq_idx;
res->recv_sub_cq_idx = cmd_completion.recv_sub_cq_idx;
return 0;
}
int efa_com_modify_qp(struct efa_com_dev *edev,
struct efa_com_modify_qp_params *params)
{
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_modify_qp_cmd cmd = {};
struct efa_admin_modify_qp_resp resp;
int err;
cmd.aq_common_desc.opcode = EFA_ADMIN_MODIFY_QP;
cmd.modify_mask = params->modify_mask;
cmd.qp_handle = params->qp_handle;
cmd.qp_state = params->qp_state;
cmd.cur_qp_state = params->cur_qp_state;
cmd.qkey = params->qkey;
cmd.sq_psn = params->sq_psn;
cmd.sq_drained_async_notify = params->sq_drained_async_notify;
cmd.rnr_retry = params->rnr_retry;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&cmd,
sizeof(cmd),
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
ibdev_err_ratelimited(
edev->efa_dev,
"Failed to modify qp-%u modify_mask[%#x] [%d]\n",
cmd.qp_handle, cmd.modify_mask, err);
return err;
}
return 0;
}
int efa_com_query_qp(struct efa_com_dev *edev,
struct efa_com_query_qp_params *params,
struct efa_com_query_qp_result *result)
{
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_query_qp_cmd cmd = {};
struct efa_admin_query_qp_resp resp;
int err;
cmd.aq_common_desc.opcode = EFA_ADMIN_QUERY_QP;
cmd.qp_handle = params->qp_handle;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&cmd,
sizeof(cmd),
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to query qp-%u [%d]\n",
cmd.qp_handle, err);
return err;
}
result->qp_state = resp.qp_state;
result->qkey = resp.qkey;
result->sq_draining = resp.sq_draining;
result->sq_psn = resp.sq_psn;
result->rnr_retry = resp.rnr_retry;
return 0;
}
int efa_com_destroy_qp(struct efa_com_dev *edev,
struct efa_com_destroy_qp_params *params)
{
struct efa_admin_destroy_qp_resp cmd_completion;
struct efa_admin_destroy_qp_cmd qp_cmd = {};
struct efa_com_admin_queue *aq = &edev->aq;
int err;
qp_cmd.aq_common_desc.opcode = EFA_ADMIN_DESTROY_QP;
qp_cmd.qp_handle = params->qp_handle;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&qp_cmd,
sizeof(qp_cmd),
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to destroy qp-%u [%d]\n",
qp_cmd.qp_handle, err);
return err;
}
return 0;
}
int efa_com_create_cq(struct efa_com_dev *edev,
struct efa_com_create_cq_params *params,
struct efa_com_create_cq_result *result)
{
struct efa_admin_create_cq_resp cmd_completion = {};
struct efa_admin_create_cq_cmd create_cmd = {};
struct efa_com_admin_queue *aq = &edev->aq;
int err;
create_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_CQ;
EFA_SET(&create_cmd.cq_caps_2,
EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS,
params->entry_size_in_bytes / 4);
create_cmd.cq_depth = params->cq_depth;
create_cmd.num_sub_cqs = params->num_sub_cqs;
create_cmd.uar = params->uarn;
if (params->interrupt_mode_enabled) {
EFA_SET(&create_cmd.cq_caps_1,
EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED, 1);
create_cmd.eqn = params->eqn;
}
if (params->set_src_addr) {
EFA_SET(&create_cmd.cq_caps_2,
EFA_ADMIN_CREATE_CQ_CMD_SET_SRC_ADDR, 1);
}
efa_com_set_dma_addr(params->dma_addr,
&create_cmd.cq_ba.mem_addr_high,
&create_cmd.cq_ba.mem_addr_low);
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&create_cmd,
sizeof(create_cmd),
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to create cq[%d]\n", err);
return err;
}
result->cq_idx = cmd_completion.cq_idx;
result->actual_depth = params->cq_depth;
result->db_off = cmd_completion.db_offset;
result->db_valid = EFA_GET(&cmd_completion.flags,
EFA_ADMIN_CREATE_CQ_RESP_DB_VALID);
return 0;
}
int efa_com_destroy_cq(struct efa_com_dev *edev,
struct efa_com_destroy_cq_params *params)
{
struct efa_admin_destroy_cq_cmd destroy_cmd = {};
struct efa_admin_destroy_cq_resp destroy_resp;
struct efa_com_admin_queue *aq = &edev->aq;
int err;
destroy_cmd.cq_idx = params->cq_idx;
destroy_cmd.aq_common_desc.opcode = EFA_ADMIN_DESTROY_CQ;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&destroy_cmd,
sizeof(destroy_cmd),
(struct efa_admin_acq_entry *)&destroy_resp,
sizeof(destroy_resp));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to destroy CQ-%u [%d]\n",
params->cq_idx, err);
return err;
}
return 0;
}
int efa_com_register_mr(struct efa_com_dev *edev,
struct efa_com_reg_mr_params *params,
struct efa_com_reg_mr_result *result)
{
struct efa_admin_reg_mr_resp cmd_completion;
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_reg_mr_cmd mr_cmd = {};
int err;
mr_cmd.aq_common_desc.opcode = EFA_ADMIN_REG_MR;
mr_cmd.pd = params->pd;
mr_cmd.mr_length = params->mr_length_in_bytes;
EFA_SET(&mr_cmd.flags, EFA_ADMIN_REG_MR_CMD_PHYS_PAGE_SIZE_SHIFT,
params->page_shift);
mr_cmd.iova = params->iova;
mr_cmd.permissions = params->permissions;
if (params->inline_pbl) {
memcpy(mr_cmd.pbl.inline_pbl_array,
params->pbl.inline_pbl_array,
sizeof(mr_cmd.pbl.inline_pbl_array));
} else {
mr_cmd.pbl.pbl.length = params->pbl.pbl.length;
mr_cmd.pbl.pbl.address.mem_addr_low =
params->pbl.pbl.address.mem_addr_low;
mr_cmd.pbl.pbl.address.mem_addr_high =
params->pbl.pbl.address.mem_addr_high;
EFA_SET(&mr_cmd.aq_common_desc.flags,
EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
if (params->indirect)
EFA_SET(&mr_cmd.aq_common_desc.flags,
EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT, 1);
}
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&mr_cmd,
sizeof(mr_cmd),
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to register mr [%d]\n", err);
return err;
}
result->l_key = cmd_completion.l_key;
result->r_key = cmd_completion.r_key;
return 0;
}
int efa_com_dereg_mr(struct efa_com_dev *edev,
struct efa_com_dereg_mr_params *params)
{
struct efa_admin_dereg_mr_resp cmd_completion;
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_dereg_mr_cmd mr_cmd = {};
int err;
mr_cmd.aq_common_desc.opcode = EFA_ADMIN_DEREG_MR;
mr_cmd.l_key = params->l_key;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&mr_cmd,
sizeof(mr_cmd),
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to de-register mr(lkey-%u) [%d]\n",
mr_cmd.l_key, err);
return err;
}
return 0;
}
int efa_com_create_ah(struct efa_com_dev *edev,
struct efa_com_create_ah_params *params,
struct efa_com_create_ah_result *result)
{
struct efa_admin_create_ah_resp cmd_completion;
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_create_ah_cmd ah_cmd = {};
int err;
ah_cmd.aq_common_desc.opcode = EFA_ADMIN_CREATE_AH;
memcpy(ah_cmd.dest_addr, params->dest_addr, sizeof(ah_cmd.dest_addr));
ah_cmd.pd = params->pdn;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&ah_cmd,
sizeof(ah_cmd),
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to create ah for %pI6 [%d]\n",
ah_cmd.dest_addr, err);
return err;
}
result->ah = cmd_completion.ah;
return 0;
}
int efa_com_destroy_ah(struct efa_com_dev *edev,
struct efa_com_destroy_ah_params *params)
{
struct efa_admin_destroy_ah_resp cmd_completion;
struct efa_admin_destroy_ah_cmd ah_cmd = {};
struct efa_com_admin_queue *aq = &edev->aq;
int err;
ah_cmd.aq_common_desc.opcode = EFA_ADMIN_DESTROY_AH;
ah_cmd.ah = params->ah;
ah_cmd.pd = params->pdn;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&ah_cmd,
sizeof(ah_cmd),
(struct efa_admin_acq_entry *)&cmd_completion,
sizeof(cmd_completion));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to destroy ah-%d pd-%d [%d]\n",
ah_cmd.ah, ah_cmd.pd, err);
return err;
}
return 0;
}
bool
efa_com_check_supported_feature_id(struct efa_com_dev *edev,
enum efa_admin_aq_feature_id feature_id)
{
u32 feature_mask = 1 << feature_id;
/* Device attributes is always supported */
if (feature_id != EFA_ADMIN_DEVICE_ATTR &&
!(edev->supported_features & feature_mask))
return false;
return true;
}
static int efa_com_get_feature_ex(struct efa_com_dev *edev,
struct efa_admin_get_feature_resp *get_resp,
enum efa_admin_aq_feature_id feature_id,
dma_addr_t control_buf_dma_addr,
u32 control_buff_size)
{
struct efa_admin_get_feature_cmd get_cmd = {};
struct efa_com_admin_queue *aq;
int err;
if (!efa_com_check_supported_feature_id(edev, feature_id)) {
ibdev_err_ratelimited(edev->efa_dev,
"Feature %d isn't supported\n",
feature_id);
return -EOPNOTSUPP;
}
aq = &edev->aq;
get_cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_FEATURE;
if (control_buff_size)
EFA_SET(&get_cmd.aq_common_descriptor.flags,
EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
efa_com_set_dma_addr(control_buf_dma_addr,
&get_cmd.control_buffer.address.mem_addr_high,
&get_cmd.control_buffer.address.mem_addr_low);
get_cmd.control_buffer.length = control_buff_size;
get_cmd.feature_common.feature_id = feature_id;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)
&get_cmd,
sizeof(get_cmd),
(struct efa_admin_acq_entry *)
get_resp,
sizeof(*get_resp));
if (err) {
ibdev_err_ratelimited(
edev->efa_dev,
"Failed to submit get_feature command %d [%d]\n",
feature_id, err);
return err;
}
return 0;
}
static int efa_com_get_feature(struct efa_com_dev *edev,
struct efa_admin_get_feature_resp *get_resp,
enum efa_admin_aq_feature_id feature_id)
{
return efa_com_get_feature_ex(edev, get_resp, feature_id, 0, 0);
}
int efa_com_get_device_attr(struct efa_com_dev *edev,
struct efa_com_get_device_attr_result *result)
{
struct efa_admin_get_feature_resp resp;
int err;
err = efa_com_get_feature(edev, &resp, EFA_ADMIN_DEVICE_ATTR);
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to get device attributes %d\n",
err);
return err;
}
result->page_size_cap = resp.u.device_attr.page_size_cap;
result->fw_version = resp.u.device_attr.fw_version;
result->admin_api_version = resp.u.device_attr.admin_api_version;
result->device_version = resp.u.device_attr.device_version;
result->supported_features = resp.u.device_attr.supported_features;
result->phys_addr_width = resp.u.device_attr.phys_addr_width;
result->virt_addr_width = resp.u.device_attr.virt_addr_width;
result->db_bar = resp.u.device_attr.db_bar;
result->max_rdma_size = resp.u.device_attr.max_rdma_size;
result->device_caps = resp.u.device_attr.device_caps;
if (result->admin_api_version < 1) {
ibdev_err_ratelimited(
edev->efa_dev,
"Failed to get device attr api version [%u < 1]\n",
result->admin_api_version);
return -EINVAL;
}
edev->supported_features = resp.u.device_attr.supported_features;
err = efa_com_get_feature(edev, &resp,
EFA_ADMIN_QUEUE_ATTR);
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to get queue attributes %d\n",
err);
return err;
}
result->max_qp = resp.u.queue_attr.max_qp;
result->max_sq_depth = resp.u.queue_attr.max_sq_depth;
result->max_rq_depth = resp.u.queue_attr.max_rq_depth;
result->max_cq = resp.u.queue_attr.max_cq;
result->max_cq_depth = resp.u.queue_attr.max_cq_depth;
result->inline_buf_size = resp.u.queue_attr.inline_buf_size;
result->max_sq_sge = resp.u.queue_attr.max_wr_send_sges;
result->max_rq_sge = resp.u.queue_attr.max_wr_recv_sges;
result->max_mr = resp.u.queue_attr.max_mr;
result->max_mr_pages = resp.u.queue_attr.max_mr_pages;
result->max_pd = resp.u.queue_attr.max_pd;
result->max_ah = resp.u.queue_attr.max_ah;
result->max_llq_size = resp.u.queue_attr.max_llq_size;
result->sub_cqs_per_cq = resp.u.queue_attr.sub_cqs_per_cq;
result->max_wr_rdma_sge = resp.u.queue_attr.max_wr_rdma_sges;
result->max_tx_batch = resp.u.queue_attr.max_tx_batch;
result->min_sq_depth = resp.u.queue_attr.min_sq_depth;
err = efa_com_get_feature(edev, &resp, EFA_ADMIN_NETWORK_ATTR);
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to get network attributes %d\n",
err);
return err;
}
memcpy(result->addr, resp.u.network_attr.addr,
sizeof(resp.u.network_attr.addr));
result->mtu = resp.u.network_attr.mtu;
if (efa_com_check_supported_feature_id(edev,
EFA_ADMIN_EVENT_QUEUE_ATTR)) {
err = efa_com_get_feature(edev, &resp,
EFA_ADMIN_EVENT_QUEUE_ATTR);
if (err) {
ibdev_err_ratelimited(
edev->efa_dev,
"Failed to get event queue attributes %d\n",
err);
return err;
}
result->max_eq = resp.u.event_queue_attr.max_eq;
result->max_eq_depth = resp.u.event_queue_attr.max_eq_depth;
result->event_bitmask = resp.u.event_queue_attr.event_bitmask;
}
return 0;
}
int efa_com_get_hw_hints(struct efa_com_dev *edev,
struct efa_com_get_hw_hints_result *result)
{
struct efa_admin_get_feature_resp resp;
int err;
err = efa_com_get_feature(edev, &resp, EFA_ADMIN_HW_HINTS);
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to get hw hints %d\n", err);
return err;
}
result->admin_completion_timeout = resp.u.hw_hints.admin_completion_timeout;
result->driver_watchdog_timeout = resp.u.hw_hints.driver_watchdog_timeout;
result->mmio_read_timeout = resp.u.hw_hints.mmio_read_timeout;
result->poll_interval = resp.u.hw_hints.poll_interval;
return 0;
}
int efa_com_set_feature_ex(struct efa_com_dev *edev,
struct efa_admin_set_feature_resp *set_resp,
struct efa_admin_set_feature_cmd *set_cmd,
enum efa_admin_aq_feature_id feature_id,
dma_addr_t control_buf_dma_addr,
u32 control_buff_size)
{
struct efa_com_admin_queue *aq;
int err;
if (!efa_com_check_supported_feature_id(edev, feature_id)) {
ibdev_err_ratelimited(edev->efa_dev,
"Feature %d isn't supported\n",
feature_id);
return -EOPNOTSUPP;
}
aq = &edev->aq;
set_cmd->aq_common_descriptor.opcode = EFA_ADMIN_SET_FEATURE;
if (control_buff_size) {
set_cmd->aq_common_descriptor.flags = 0;
EFA_SET(&set_cmd->aq_common_descriptor.flags,
EFA_ADMIN_AQ_COMMON_DESC_CTRL_DATA, 1);
efa_com_set_dma_addr(control_buf_dma_addr,
&set_cmd->control_buffer.address.mem_addr_high,
&set_cmd->control_buffer.address.mem_addr_low);
}
set_cmd->control_buffer.length = control_buff_size;
set_cmd->feature_common.feature_id = feature_id;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)set_cmd,
sizeof(*set_cmd),
(struct efa_admin_acq_entry *)set_resp,
sizeof(*set_resp));
if (err) {
ibdev_err_ratelimited(
edev->efa_dev,
"Failed to submit set_feature command %d error: %d\n",
feature_id, err);
return err;
}
return 0;
}
static int efa_com_set_feature(struct efa_com_dev *edev,
struct efa_admin_set_feature_resp *set_resp,
struct efa_admin_set_feature_cmd *set_cmd,
enum efa_admin_aq_feature_id feature_id)
{
return efa_com_set_feature_ex(edev, set_resp, set_cmd, feature_id,
0, 0);
}
int efa_com_set_aenq_config(struct efa_com_dev *edev, u32 groups)
{
struct efa_admin_get_feature_resp get_resp;
struct efa_admin_set_feature_resp set_resp;
struct efa_admin_set_feature_cmd cmd = {};
int err;
ibdev_dbg(edev->efa_dev, "Configuring aenq with groups[%#x]\n", groups);
err = efa_com_get_feature(edev, &get_resp, EFA_ADMIN_AENQ_CONFIG);
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to get aenq attributes: %d\n",
err);
return err;
}
ibdev_dbg(edev->efa_dev,
"Get aenq groups: supported[%#x] enabled[%#x]\n",
get_resp.u.aenq.supported_groups,
get_resp.u.aenq.enabled_groups);
if ((get_resp.u.aenq.supported_groups & groups) != groups) {
ibdev_err_ratelimited(
edev->efa_dev,
"Trying to set unsupported aenq groups[%#x] supported[%#x]\n",
groups, get_resp.u.aenq.supported_groups);
return -EOPNOTSUPP;
}
cmd.u.aenq.enabled_groups = groups;
err = efa_com_set_feature(edev, &set_resp, &cmd,
EFA_ADMIN_AENQ_CONFIG);
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to set aenq attributes: %d\n",
err);
return err;
}
return 0;
}
int efa_com_alloc_pd(struct efa_com_dev *edev,
struct efa_com_alloc_pd_result *result)
{
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_alloc_pd_cmd cmd = {};
struct efa_admin_alloc_pd_resp resp;
int err;
cmd.aq_common_descriptor.opcode = EFA_ADMIN_ALLOC_PD;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&cmd,
sizeof(cmd),
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to allocate pd[%d]\n", err);
return err;
}
result->pdn = resp.pd;
return 0;
}
int efa_com_dealloc_pd(struct efa_com_dev *edev,
struct efa_com_dealloc_pd_params *params)
{
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_dealloc_pd_cmd cmd = {};
struct efa_admin_dealloc_pd_resp resp;
int err;
cmd.aq_common_descriptor.opcode = EFA_ADMIN_DEALLOC_PD;
cmd.pd = params->pdn;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&cmd,
sizeof(cmd),
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to deallocate pd-%u [%d]\n",
cmd.pd, err);
return err;
}
return 0;
}
int efa_com_alloc_uar(struct efa_com_dev *edev,
struct efa_com_alloc_uar_result *result)
{
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_alloc_uar_cmd cmd = {};
struct efa_admin_alloc_uar_resp resp;
int err;
cmd.aq_common_descriptor.opcode = EFA_ADMIN_ALLOC_UAR;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&cmd,
sizeof(cmd),
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to allocate uar[%d]\n", err);
return err;
}
result->uarn = resp.uar;
return 0;
}
int efa_com_dealloc_uar(struct efa_com_dev *edev,
struct efa_com_dealloc_uar_params *params)
{
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_dealloc_uar_cmd cmd = {};
struct efa_admin_dealloc_uar_resp resp;
int err;
cmd.aq_common_descriptor.opcode = EFA_ADMIN_DEALLOC_UAR;
cmd.uar = params->uarn;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&cmd,
sizeof(cmd),
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
ibdev_err_ratelimited(edev->efa_dev,
"Failed to deallocate uar-%u [%d]\n",
cmd.uar, err);
return err;
}
return 0;
}
int efa_com_get_stats(struct efa_com_dev *edev,
struct efa_com_get_stats_params *params,
union efa_com_get_stats_result *result)
{
struct efa_com_admin_queue *aq = &edev->aq;
struct efa_admin_aq_get_stats_cmd cmd = {};
struct efa_admin_acq_get_stats_resp resp;
int err;
cmd.aq_common_descriptor.opcode = EFA_ADMIN_GET_STATS;
cmd.type = params->type;
cmd.scope = params->scope;
cmd.scope_modifier = params->scope_modifier;
err = efa_com_cmd_exec(aq,
(struct efa_admin_aq_entry *)&cmd,
sizeof(cmd),
(struct efa_admin_acq_entry *)&resp,
sizeof(resp));
if (err) {
ibdev_err_ratelimited(
edev->efa_dev,
"Failed to get stats type-%u scope-%u.%u [%d]\n",
cmd.type, cmd.scope, cmd.scope_modifier, err);
return err;
}
switch (cmd.type) {
case EFA_ADMIN_GET_STATS_TYPE_BASIC:
result->basic_stats.tx_bytes = resp.u.basic_stats.tx_bytes;
result->basic_stats.tx_pkts = resp.u.basic_stats.tx_pkts;
result->basic_stats.rx_bytes = resp.u.basic_stats.rx_bytes;
result->basic_stats.rx_pkts = resp.u.basic_stats.rx_pkts;
result->basic_stats.rx_drops = resp.u.basic_stats.rx_drops;
break;
case EFA_ADMIN_GET_STATS_TYPE_MESSAGES:
result->messages_stats.send_bytes = resp.u.messages_stats.send_bytes;
result->messages_stats.send_wrs = resp.u.messages_stats.send_wrs;
result->messages_stats.recv_bytes = resp.u.messages_stats.recv_bytes;
result->messages_stats.recv_wrs = resp.u.messages_stats.recv_wrs;
break;
case EFA_ADMIN_GET_STATS_TYPE_RDMA_READ:
result->rdma_read_stats.read_wrs = resp.u.rdma_read_stats.read_wrs;
result->rdma_read_stats.read_bytes = resp.u.rdma_read_stats.read_bytes;
result->rdma_read_stats.read_wr_err = resp.u.rdma_read_stats.read_wr_err;
result->rdma_read_stats.read_resp_bytes = resp.u.rdma_read_stats.read_resp_bytes;
break;
case EFA_ADMIN_GET_STATS_TYPE_RDMA_WRITE:
result->rdma_write_stats.write_wrs = resp.u.rdma_write_stats.write_wrs;
result->rdma_write_stats.write_bytes = resp.u.rdma_write_stats.write_bytes;
result->rdma_write_stats.write_wr_err = resp.u.rdma_write_stats.write_wr_err;
result->rdma_write_stats.write_recv_bytes = resp.u.rdma_write_stats.write_recv_bytes;
break;
}
return 0;
}
| linux-master | drivers/infiniband/hw/efa/efa_com_cmd.c |
/*
* Copyright(c) 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* This file contains OPA VNIC EMA Interface functions.
*/
#include "opa_vnic_internal.h"
/**
* opa_vnic_vema_report_event - sent trap to report the specified event
* @adapter: vnic port adapter
* @event: event to be reported
*
* This function calls vema api to sent a trap for the given event.
*/
void opa_vnic_vema_report_event(struct opa_vnic_adapter *adapter, u8 event)
{
struct __opa_veswport_info *info = &adapter->info;
struct __opa_veswport_trap trap_data;
trap_data.fabric_id = info->vesw.fabric_id;
trap_data.veswid = info->vesw.vesw_id;
trap_data.veswportnum = info->vport.port_num;
trap_data.opaportnum = adapter->port_num;
trap_data.veswportindex = adapter->vport_num;
trap_data.opcode = event;
opa_vnic_vema_send_trap(adapter, &trap_data, info->vport.encap_slid);
}
/**
* opa_vnic_get_summary_counters - get summary counters
* @adapter: vnic port adapter
* @cntrs: pointer to destination summary counters structure
*
* This function populates the summary counters that is maintained by the
* given adapter to destination address provided.
*/
void opa_vnic_get_summary_counters(struct opa_vnic_adapter *adapter,
struct opa_veswport_summary_counters *cntrs)
{
struct opa_vnic_stats vstats;
__be64 *dst;
u64 *src;
memset(&vstats, 0, sizeof(vstats));
spin_lock(&adapter->stats_lock);
adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats);
spin_unlock(&adapter->stats_lock);
cntrs->vp_instance = cpu_to_be16(adapter->vport_num);
cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id);
cntrs->veswport_num = cpu_to_be32(adapter->port_num);
cntrs->tx_errors = cpu_to_be64(vstats.netstats.tx_errors);
cntrs->rx_errors = cpu_to_be64(vstats.netstats.rx_errors);
cntrs->tx_packets = cpu_to_be64(vstats.netstats.tx_packets);
cntrs->rx_packets = cpu_to_be64(vstats.netstats.rx_packets);
cntrs->tx_bytes = cpu_to_be64(vstats.netstats.tx_bytes);
cntrs->rx_bytes = cpu_to_be64(vstats.netstats.rx_bytes);
/*
* This loop depends on layout of
* opa_veswport_summary_counters opa_vnic_stats structures.
*/
for (dst = &cntrs->tx_unicast, src = &vstats.tx_grp.unicast;
dst < &cntrs->reserved[0]; dst++, src++) {
*dst = cpu_to_be64(*src);
}
}
/**
* opa_vnic_get_error_counters - get error counters
* @adapter: vnic port adapter
* @cntrs: pointer to destination error counters structure
*
* This function populates the error counters that is maintained by the
* given adapter to destination address provided.
*/
void opa_vnic_get_error_counters(struct opa_vnic_adapter *adapter,
struct opa_veswport_error_counters *cntrs)
{
struct opa_vnic_stats vstats;
memset(&vstats, 0, sizeof(vstats));
spin_lock(&adapter->stats_lock);
adapter->rn_ops->ndo_get_stats64(adapter->netdev, &vstats.netstats);
spin_unlock(&adapter->stats_lock);
cntrs->vp_instance = cpu_to_be16(adapter->vport_num);
cntrs->vesw_id = cpu_to_be16(adapter->info.vesw.vesw_id);
cntrs->veswport_num = cpu_to_be32(adapter->port_num);
cntrs->tx_errors = cpu_to_be64(vstats.netstats.tx_errors);
cntrs->rx_errors = cpu_to_be64(vstats.netstats.rx_errors);
cntrs->tx_dlid_zero = cpu_to_be64(vstats.tx_dlid_zero);
cntrs->tx_drop_state = cpu_to_be64(vstats.tx_drop_state);
cntrs->tx_logic = cpu_to_be64(vstats.netstats.tx_fifo_errors +
vstats.netstats.tx_carrier_errors);
cntrs->rx_bad_veswid = cpu_to_be64(vstats.netstats.rx_nohandler);
cntrs->rx_runt = cpu_to_be64(vstats.rx_runt);
cntrs->rx_oversize = cpu_to_be64(vstats.rx_oversize);
cntrs->rx_drop_state = cpu_to_be64(vstats.rx_drop_state);
cntrs->rx_logic = cpu_to_be64(vstats.netstats.rx_fifo_errors);
}
/**
* opa_vnic_get_vesw_info -- Get the vesw information
* @adapter: vnic port adapter
* @info: pointer to destination vesw info structure
*
* This function copies the vesw info that is maintained by the
* given adapter to destination address provided.
*/
void opa_vnic_get_vesw_info(struct opa_vnic_adapter *adapter,
struct opa_vesw_info *info)
{
struct __opa_vesw_info *src = &adapter->info.vesw;
int i;
info->fabric_id = cpu_to_be16(src->fabric_id);
info->vesw_id = cpu_to_be16(src->vesw_id);
memcpy(info->rsvd0, src->rsvd0, ARRAY_SIZE(src->rsvd0));
info->def_port_mask = cpu_to_be16(src->def_port_mask);
memcpy(info->rsvd1, src->rsvd1, ARRAY_SIZE(src->rsvd1));
info->pkey = cpu_to_be16(src->pkey);
memcpy(info->rsvd2, src->rsvd2, ARRAY_SIZE(src->rsvd2));
info->u_mcast_dlid = cpu_to_be32(src->u_mcast_dlid);
for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
info->u_ucast_dlid[i] = cpu_to_be32(src->u_ucast_dlid[i]);
info->rc = cpu_to_be32(src->rc);
memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3));
info->eth_mtu = cpu_to_be16(src->eth_mtu);
memcpy(info->rsvd4, src->rsvd4, ARRAY_SIZE(src->rsvd4));
}
/**
* opa_vnic_set_vesw_info -- Set the vesw information
* @adapter: vnic port adapter
* @info: pointer to vesw info structure
*
* This function updates the vesw info that is maintained by the
* given adapter with vesw info provided. Reserved fields are stored
* and returned back to EM as is.
*/
void opa_vnic_set_vesw_info(struct opa_vnic_adapter *adapter,
struct opa_vesw_info *info)
{
struct __opa_vesw_info *dst = &adapter->info.vesw;
int i;
dst->fabric_id = be16_to_cpu(info->fabric_id);
dst->vesw_id = be16_to_cpu(info->vesw_id);
memcpy(dst->rsvd0, info->rsvd0, ARRAY_SIZE(info->rsvd0));
dst->def_port_mask = be16_to_cpu(info->def_port_mask);
memcpy(dst->rsvd1, info->rsvd1, ARRAY_SIZE(info->rsvd1));
dst->pkey = be16_to_cpu(info->pkey);
memcpy(dst->rsvd2, info->rsvd2, ARRAY_SIZE(info->rsvd2));
dst->u_mcast_dlid = be32_to_cpu(info->u_mcast_dlid);
for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++)
dst->u_ucast_dlid[i] = be32_to_cpu(info->u_ucast_dlid[i]);
dst->rc = be32_to_cpu(info->rc);
memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3));
dst->eth_mtu = be16_to_cpu(info->eth_mtu);
memcpy(dst->rsvd4, info->rsvd4, ARRAY_SIZE(info->rsvd4));
}
/**
* opa_vnic_get_per_veswport_info -- Get the vesw per port information
* @adapter: vnic port adapter
* @info: pointer to destination vport info structure
*
* This function copies the vesw per port info that is maintained by the
* given adapter to destination address provided.
* Note that the read only fields are not copied.
*/
void opa_vnic_get_per_veswport_info(struct opa_vnic_adapter *adapter,
struct opa_per_veswport_info *info)
{
struct __opa_per_veswport_info *src = &adapter->info.vport;
info->port_num = cpu_to_be32(src->port_num);
info->eth_link_status = src->eth_link_status;
memcpy(info->rsvd0, src->rsvd0, ARRAY_SIZE(src->rsvd0));
memcpy(info->base_mac_addr, src->base_mac_addr,
ARRAY_SIZE(info->base_mac_addr));
info->config_state = src->config_state;
info->oper_state = src->oper_state;
info->max_mac_tbl_ent = cpu_to_be16(src->max_mac_tbl_ent);
info->max_smac_ent = cpu_to_be16(src->max_smac_ent);
info->mac_tbl_digest = cpu_to_be32(src->mac_tbl_digest);
memcpy(info->rsvd1, src->rsvd1, ARRAY_SIZE(src->rsvd1));
info->encap_slid = cpu_to_be32(src->encap_slid);
memcpy(info->pcp_to_sc_uc, src->pcp_to_sc_uc,
ARRAY_SIZE(info->pcp_to_sc_uc));
memcpy(info->pcp_to_vl_uc, src->pcp_to_vl_uc,
ARRAY_SIZE(info->pcp_to_vl_uc));
memcpy(info->pcp_to_sc_mc, src->pcp_to_sc_mc,
ARRAY_SIZE(info->pcp_to_sc_mc));
memcpy(info->pcp_to_vl_mc, src->pcp_to_vl_mc,
ARRAY_SIZE(info->pcp_to_vl_mc));
info->non_vlan_sc_uc = src->non_vlan_sc_uc;
info->non_vlan_vl_uc = src->non_vlan_vl_uc;
info->non_vlan_sc_mc = src->non_vlan_sc_mc;
info->non_vlan_vl_mc = src->non_vlan_vl_mc;
memcpy(info->rsvd2, src->rsvd2, ARRAY_SIZE(src->rsvd2));
info->uc_macs_gen_count = cpu_to_be16(src->uc_macs_gen_count);
info->mc_macs_gen_count = cpu_to_be16(src->mc_macs_gen_count);
memcpy(info->rsvd3, src->rsvd3, ARRAY_SIZE(src->rsvd3));
}
/**
* opa_vnic_set_per_veswport_info -- Set vesw per port information
* @adapter: vnic port adapter
* @info: pointer to vport info structure
*
* This function updates the vesw per port info that is maintained by the
* given adapter with vesw per port info provided. Reserved fields are
* stored and returned back to EM as is.
*/
void opa_vnic_set_per_veswport_info(struct opa_vnic_adapter *adapter,
struct opa_per_veswport_info *info)
{
struct __opa_per_veswport_info *dst = &adapter->info.vport;
dst->port_num = be32_to_cpu(info->port_num);
memcpy(dst->rsvd0, info->rsvd0, ARRAY_SIZE(info->rsvd0));
memcpy(dst->base_mac_addr, info->base_mac_addr,
ARRAY_SIZE(dst->base_mac_addr));
dst->config_state = info->config_state;
memcpy(dst->rsvd1, info->rsvd1, ARRAY_SIZE(info->rsvd1));
dst->encap_slid = be32_to_cpu(info->encap_slid);
memcpy(dst->pcp_to_sc_uc, info->pcp_to_sc_uc,
ARRAY_SIZE(dst->pcp_to_sc_uc));
memcpy(dst->pcp_to_vl_uc, info->pcp_to_vl_uc,
ARRAY_SIZE(dst->pcp_to_vl_uc));
memcpy(dst->pcp_to_sc_mc, info->pcp_to_sc_mc,
ARRAY_SIZE(dst->pcp_to_sc_mc));
memcpy(dst->pcp_to_vl_mc, info->pcp_to_vl_mc,
ARRAY_SIZE(dst->pcp_to_vl_mc));
dst->non_vlan_sc_uc = info->non_vlan_sc_uc;
dst->non_vlan_vl_uc = info->non_vlan_vl_uc;
dst->non_vlan_sc_mc = info->non_vlan_sc_mc;
dst->non_vlan_vl_mc = info->non_vlan_vl_mc;
memcpy(dst->rsvd2, info->rsvd2, ARRAY_SIZE(info->rsvd2));
memcpy(dst->rsvd3, info->rsvd3, ARRAY_SIZE(info->rsvd3));
}
/**
* opa_vnic_query_mcast_macs - query multicast mac list
* @adapter: vnic port adapter
* @macs: pointer mac list
*
* This function populates the provided mac list with the configured
* multicast addresses in the adapter.
*/
void opa_vnic_query_mcast_macs(struct opa_vnic_adapter *adapter,
struct opa_veswport_iface_macs *macs)
{
u16 start_idx, num_macs, idx = 0, count = 0;
struct netdev_hw_addr *ha;
start_idx = be16_to_cpu(macs->start_idx);
num_macs = be16_to_cpu(macs->num_macs_in_msg);
netdev_for_each_mc_addr(ha, adapter->netdev) {
struct opa_vnic_iface_mac_entry *entry = &macs->entry[count];
if (start_idx > idx++)
continue;
else if (num_macs == count)
break;
memcpy(entry, ha->addr, sizeof(*entry));
count++;
}
macs->tot_macs_in_lst = cpu_to_be16(netdev_mc_count(adapter->netdev));
macs->num_macs_in_msg = cpu_to_be16(count);
macs->gen_count = cpu_to_be16(adapter->info.vport.mc_macs_gen_count);
}
/**
* opa_vnic_query_ucast_macs - query unicast mac list
* @adapter: vnic port adapter
* @macs: pointer mac list
*
* This function populates the provided mac list with the configured
* unicast addresses in the adapter.
*/
void opa_vnic_query_ucast_macs(struct opa_vnic_adapter *adapter,
struct opa_veswport_iface_macs *macs)
{
u16 start_idx, tot_macs, num_macs, idx = 0, count = 0, em_macs = 0;
struct netdev_hw_addr *ha;
start_idx = be16_to_cpu(macs->start_idx);
num_macs = be16_to_cpu(macs->num_macs_in_msg);
/* loop through dev_addrs list first */
for_each_dev_addr(adapter->netdev, ha) {
struct opa_vnic_iface_mac_entry *entry = &macs->entry[count];
/* Do not include EM specified MAC address */
if (!memcmp(adapter->info.vport.base_mac_addr, ha->addr,
ARRAY_SIZE(adapter->info.vport.base_mac_addr))) {
em_macs++;
continue;
}
if (start_idx > idx++)
continue;
else if (num_macs == count)
break;
memcpy(entry, ha->addr, sizeof(*entry));
count++;
}
/* loop through uc list */
netdev_for_each_uc_addr(ha, adapter->netdev) {
struct opa_vnic_iface_mac_entry *entry = &macs->entry[count];
if (start_idx > idx++)
continue;
else if (num_macs == count)
break;
memcpy(entry, ha->addr, sizeof(*entry));
count++;
}
tot_macs = netdev_hw_addr_list_count(&adapter->netdev->dev_addrs) +
netdev_uc_count(adapter->netdev) - em_macs;
macs->tot_macs_in_lst = cpu_to_be16(tot_macs);
macs->num_macs_in_msg = cpu_to_be16(count);
macs->gen_count = cpu_to_be16(adapter->info.vport.uc_macs_gen_count);
}
| linux-master | drivers/infiniband/ulp/opa_vnic/opa_vnic_vema_iface.c |
/*
* Copyright(c) 2017 Intel Corporation.
* Copyright(c) 2021 Cornelis Networks.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* This file contains OPX Virtual Network Interface Controller (VNIC)
* Ethernet Management Agent (EMA) driver
*/
#include <linux/module.h>
#include <linux/xarray.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_verbs.h>
#include <rdma/opa_smi.h>
#include <rdma/opa_port_info.h>
#include "opa_vnic_internal.h"
char opa_vnic_driver_name[] = "opa_vnic";
/*
* The trap service level is kept in bits 3 to 7 in the trap_sl_rsvd
* field in the class port info MAD.
*/
#define GET_TRAP_SL_FROM_CLASS_PORT_INFO(x) (((x) >> 3) & 0x1f)
/* Cap trap bursts to a reasonable limit good for normal cases */
#define OPA_VNIC_TRAP_BURST_LIMIT 4
/*
* VNIC trap limit timeout.
* Inverse of cap2_mask response time out (1.0737 secs) = 0.9
* secs approx IB spec 13.4.6.2.1 PortInfoSubnetTimeout and
* 13.4.9 Traps.
*/
#define OPA_VNIC_TRAP_TIMEOUT ((4096 * (1UL << 18)) / 1000)
#define OPA_VNIC_UNSUP_ATTR \
cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB)
#define OPA_VNIC_INVAL_ATTR \
cpu_to_be16(IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE)
#define OPA_VNIC_CLASS_CAP_TRAP 0x1
/* Maximum number of VNIC ports supported */
#define OPA_VNIC_MAX_NUM_VPORT 255
/**
* struct opa_vnic_vema_port -- VNIC VEMA port details
* @cport: pointer to port
* @mad_agent: pointer to mad agent for port
* @class_port_info: Class port info information.
* @tid: Transaction id
* @port_num: OPA port number
* @vports: vnic ports
* @event_handler: ib event handler
* @lock: adapter interface lock
*/
struct opa_vnic_vema_port {
struct opa_vnic_ctrl_port *cport;
struct ib_mad_agent *mad_agent;
struct opa_class_port_info class_port_info;
u64 tid;
u8 port_num;
struct xarray vports;
struct ib_event_handler event_handler;
/* Lock to query/update network adapter */
struct mutex lock;
};
static int opa_vnic_vema_add_one(struct ib_device *device);
static void opa_vnic_vema_rem_one(struct ib_device *device,
void *client_data);
static struct ib_client opa_vnic_client = {
.name = opa_vnic_driver_name,
.add = opa_vnic_vema_add_one,
.remove = opa_vnic_vema_rem_one,
};
/**
* vema_get_vport_num -- Get the vnic from the mad
* @recvd_mad: Received mad
*
* Return: returns value of the vnic port number
*/
static inline u8 vema_get_vport_num(struct opa_vnic_vema_mad *recvd_mad)
{
return be32_to_cpu(recvd_mad->mad_hdr.attr_mod) & 0xff;
}
/**
* vema_get_vport_adapter -- Get vnic port adapter from recvd mad
* @recvd_mad: received mad
* @port: ptr to port struct on which MAD was recvd
*
* Return: vnic adapter
*/
static inline struct opa_vnic_adapter *
vema_get_vport_adapter(struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_port *port)
{
u8 vport_num = vema_get_vport_num(recvd_mad);
return xa_load(&port->vports, vport_num);
}
/**
* vema_mac_tbl_req_ok -- Check if mac request has correct values
* @mac_tbl: mac table
*
* This function checks for the validity of the offset and number of
* entries required.
*
* Return: true if offset and num_entries are valid
*/
static inline bool vema_mac_tbl_req_ok(struct opa_veswport_mactable *mac_tbl)
{
u16 offset, num_entries;
u16 req_entries = ((OPA_VNIC_EMA_DATA - sizeof(*mac_tbl)) /
sizeof(mac_tbl->tbl_entries[0]));
offset = be16_to_cpu(mac_tbl->offset);
num_entries = be16_to_cpu(mac_tbl->num_entries);
return ((num_entries <= req_entries) &&
(offset + num_entries <= OPA_VNIC_MAC_TBL_MAX_ENTRIES));
}
/*
* Return the power on default values in the port info structure
* in big endian format as required by MAD.
*/
static inline void vema_get_pod_values(struct opa_veswport_info *port_info)
{
memset(port_info, 0, sizeof(*port_info));
port_info->vport.max_mac_tbl_ent =
cpu_to_be16(OPA_VNIC_MAC_TBL_MAX_ENTRIES);
port_info->vport.max_smac_ent =
cpu_to_be16(OPA_VNIC_MAX_SMAC_LIMIT);
port_info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
port_info->vport.config_state = OPA_VNIC_STATE_DROP_ALL;
port_info->vesw.eth_mtu = cpu_to_be16(ETH_DATA_LEN);
}
/**
* vema_add_vport -- Add a new vnic port
* @port: ptr to opa_vnic_vema_port struct
* @vport_num: vnic port number (to be added)
*
* Return a pointer to the vnic adapter structure
*/
static struct opa_vnic_adapter *vema_add_vport(struct opa_vnic_vema_port *port,
u8 vport_num)
{
struct opa_vnic_ctrl_port *cport = port->cport;
struct opa_vnic_adapter *adapter;
adapter = opa_vnic_add_netdev(cport->ibdev, port->port_num, vport_num);
if (!IS_ERR(adapter)) {
int rc;
adapter->cport = cport;
rc = xa_insert(&port->vports, vport_num, adapter, GFP_KERNEL);
if (rc < 0) {
opa_vnic_rem_netdev(adapter);
adapter = ERR_PTR(rc);
}
}
return adapter;
}
/**
* vema_get_class_port_info -- Get class info for port
* @port: Port on whic MAD was received
* @recvd_mad: pointer to the received mad
* @rsp_mad: pointer to respose mad
*
* This function copies the latest class port info value set for the
* port and stores it for generating traps
*/
static void vema_get_class_port_info(struct opa_vnic_vema_port *port,
struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_mad *rsp_mad)
{
struct opa_class_port_info *port_info;
port_info = (struct opa_class_port_info *)rsp_mad->data;
memcpy(port_info, &port->class_port_info, sizeof(*port_info));
port_info->base_version = OPA_MGMT_BASE_VERSION;
port_info->class_version = OPA_EMA_CLASS_VERSION;
/*
* Set capability mask bit indicating agent generates traps,
* and set the maximum number of VNIC ports supported.
*/
port_info->cap_mask = cpu_to_be16((OPA_VNIC_CLASS_CAP_TRAP |
(OPA_VNIC_MAX_NUM_VPORT << 8)));
/*
* Since a get routine is always sent by the EM first we
* set the expected response time to
* 4.096 usec * 2^18 == 1.0737 sec here.
*/
port_info->cap_mask2_resp_time = cpu_to_be32(18);
}
/**
* vema_set_class_port_info -- Get class info for port
* @port: Port on whic MAD was received
* @recvd_mad: pointer to the received mad
* @rsp_mad: pointer to respose mad
*
* This function updates the port class info for the specific vnic
* and sets up the response mad data
*/
static void vema_set_class_port_info(struct opa_vnic_vema_port *port,
struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_mad *rsp_mad)
{
memcpy(&port->class_port_info, recvd_mad->data,
sizeof(port->class_port_info));
vema_get_class_port_info(port, recvd_mad, rsp_mad);
}
/**
* vema_get_veswport_info -- Get veswport info
* @port: source port on which MAD was received
* @recvd_mad: pointer to the received mad
* @rsp_mad: pointer to respose mad
*/
static void vema_get_veswport_info(struct opa_vnic_vema_port *port,
struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_mad *rsp_mad)
{
struct opa_veswport_info *port_info =
(struct opa_veswport_info *)rsp_mad->data;
struct opa_vnic_adapter *adapter;
adapter = vema_get_vport_adapter(recvd_mad, port);
if (adapter) {
memset(port_info, 0, sizeof(*port_info));
opa_vnic_get_vesw_info(adapter, &port_info->vesw);
opa_vnic_get_per_veswport_info(adapter,
&port_info->vport);
} else {
vema_get_pod_values(port_info);
}
}
/**
* vema_set_veswport_info -- Set veswport info
* @port: source port on which MAD was received
* @recvd_mad: pointer to the received mad
* @rsp_mad: pointer to respose mad
*
* This function gets the port class infor for vnic
*/
static void vema_set_veswport_info(struct opa_vnic_vema_port *port,
struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_mad *rsp_mad)
{
struct opa_vnic_ctrl_port *cport = port->cport;
struct opa_veswport_info *port_info;
struct opa_vnic_adapter *adapter;
u8 vport_num;
vport_num = vema_get_vport_num(recvd_mad);
adapter = vema_get_vport_adapter(recvd_mad, port);
if (!adapter) {
adapter = vema_add_vport(port, vport_num);
if (IS_ERR(adapter)) {
c_err("failed to add vport %d: %ld\n",
vport_num, PTR_ERR(adapter));
goto err_exit;
}
}
port_info = (struct opa_veswport_info *)recvd_mad->data;
opa_vnic_set_vesw_info(adapter, &port_info->vesw);
opa_vnic_set_per_veswport_info(adapter, &port_info->vport);
/* Process the new config settings */
opa_vnic_process_vema_config(adapter);
vema_get_veswport_info(port, recvd_mad, rsp_mad);
return;
err_exit:
rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
}
/**
* vema_get_mac_entries -- Get MAC entries in VNIC MAC table
* @port: source port on which MAD was received
* @recvd_mad: pointer to the received mad
* @rsp_mad: pointer to respose mad
*
* This function gets the MAC entries that are programmed into
* the VNIC MAC forwarding table. It checks for the validity of
* the index into the MAC table and the number of entries that
* are to be retrieved.
*/
static void vema_get_mac_entries(struct opa_vnic_vema_port *port,
struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_mad *rsp_mad)
{
struct opa_veswport_mactable *mac_tbl_in, *mac_tbl_out;
struct opa_vnic_adapter *adapter;
adapter = vema_get_vport_adapter(recvd_mad, port);
if (!adapter) {
rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
return;
}
mac_tbl_in = (struct opa_veswport_mactable *)recvd_mad->data;
mac_tbl_out = (struct opa_veswport_mactable *)rsp_mad->data;
if (vema_mac_tbl_req_ok(mac_tbl_in)) {
mac_tbl_out->offset = mac_tbl_in->offset;
mac_tbl_out->num_entries = mac_tbl_in->num_entries;
opa_vnic_query_mac_tbl(adapter, mac_tbl_out);
} else {
rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
}
}
/**
* vema_set_mac_entries -- Set MAC entries in VNIC MAC table
* @port: source port on which MAD was received
* @recvd_mad: pointer to the received mad
* @rsp_mad: pointer to respose mad
*
* This function sets the MAC entries in the VNIC forwarding table
* It checks for the validity of the index and the number of forwarding
* table entries to be programmed.
*/
static void vema_set_mac_entries(struct opa_vnic_vema_port *port,
struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_mad *rsp_mad)
{
struct opa_veswport_mactable *mac_tbl;
struct opa_vnic_adapter *adapter;
adapter = vema_get_vport_adapter(recvd_mad, port);
if (!adapter) {
rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
return;
}
mac_tbl = (struct opa_veswport_mactable *)recvd_mad->data;
if (vema_mac_tbl_req_ok(mac_tbl)) {
if (opa_vnic_update_mac_tbl(adapter, mac_tbl))
rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
} else {
rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
}
vema_get_mac_entries(port, recvd_mad, rsp_mad);
}
/**
* vema_set_delete_vesw -- Reset VESW info to POD values
* @port: source port on which MAD was received
* @recvd_mad: pointer to the received mad
* @rsp_mad: pointer to respose mad
*
* This function clears all the fields of veswport info for the requested vesw
* and sets them back to the power-on default values. It does not delete the
* vesw.
*/
static void vema_set_delete_vesw(struct opa_vnic_vema_port *port,
struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_mad *rsp_mad)
{
struct opa_veswport_info *port_info =
(struct opa_veswport_info *)rsp_mad->data;
struct opa_vnic_adapter *adapter;
adapter = vema_get_vport_adapter(recvd_mad, port);
if (!adapter) {
rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
return;
}
vema_get_pod_values(port_info);
opa_vnic_set_vesw_info(adapter, &port_info->vesw);
opa_vnic_set_per_veswport_info(adapter, &port_info->vport);
/* Process the new config settings */
opa_vnic_process_vema_config(adapter);
opa_vnic_release_mac_tbl(adapter);
vema_get_veswport_info(port, recvd_mad, rsp_mad);
}
/**
* vema_get_mac_list -- Get the unicast/multicast macs.
* @port: source port on which MAD was received
* @recvd_mad: Received mad contains fields to set vnic parameters
* @rsp_mad: Response mad to be built
* @attr_id: Attribute ID indicating multicast or unicast mac list
*/
static void vema_get_mac_list(struct opa_vnic_vema_port *port,
struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_mad *rsp_mad,
u16 attr_id)
{
struct opa_veswport_iface_macs *macs_in, *macs_out;
int max_entries = (OPA_VNIC_EMA_DATA - sizeof(*macs_out)) / ETH_ALEN;
struct opa_vnic_adapter *adapter;
adapter = vema_get_vport_adapter(recvd_mad, port);
if (!adapter) {
rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
return;
}
macs_in = (struct opa_veswport_iface_macs *)recvd_mad->data;
macs_out = (struct opa_veswport_iface_macs *)rsp_mad->data;
macs_out->start_idx = macs_in->start_idx;
if (macs_in->num_macs_in_msg)
macs_out->num_macs_in_msg = macs_in->num_macs_in_msg;
else
macs_out->num_macs_in_msg = cpu_to_be16(max_entries);
if (attr_id == OPA_EM_ATTR_IFACE_MCAST_MACS)
opa_vnic_query_mcast_macs(adapter, macs_out);
else
opa_vnic_query_ucast_macs(adapter, macs_out);
}
/**
* vema_get_summary_counters -- Gets summary counters.
* @port: source port on which MAD was received
* @recvd_mad: Received mad contains fields to set vnic parameters
* @rsp_mad: Response mad to be built
*/
static void vema_get_summary_counters(struct opa_vnic_vema_port *port,
struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_mad *rsp_mad)
{
struct opa_veswport_summary_counters *cntrs;
struct opa_vnic_adapter *adapter;
adapter = vema_get_vport_adapter(recvd_mad, port);
if (adapter) {
cntrs = (struct opa_veswport_summary_counters *)rsp_mad->data;
opa_vnic_get_summary_counters(adapter, cntrs);
} else {
rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
}
}
/**
* vema_get_error_counters -- Gets summary counters.
* @port: source port on which MAD was received
* @recvd_mad: Received mad contains fields to set vnic parameters
* @rsp_mad: Response mad to be built
*/
static void vema_get_error_counters(struct opa_vnic_vema_port *port,
struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_mad *rsp_mad)
{
struct opa_veswport_error_counters *cntrs;
struct opa_vnic_adapter *adapter;
adapter = vema_get_vport_adapter(recvd_mad, port);
if (adapter) {
cntrs = (struct opa_veswport_error_counters *)rsp_mad->data;
opa_vnic_get_error_counters(adapter, cntrs);
} else {
rsp_mad->mad_hdr.status = OPA_VNIC_INVAL_ATTR;
}
}
/**
* vema_get -- Process received get MAD
* @port: source port on which MAD was received
* @recvd_mad: Received mad
* @rsp_mad: Response mad to be built
*/
static void vema_get(struct opa_vnic_vema_port *port,
struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_mad *rsp_mad)
{
u16 attr_id = be16_to_cpu(recvd_mad->mad_hdr.attr_id);
switch (attr_id) {
case OPA_EM_ATTR_CLASS_PORT_INFO:
vema_get_class_port_info(port, recvd_mad, rsp_mad);
break;
case OPA_EM_ATTR_VESWPORT_INFO:
vema_get_veswport_info(port, recvd_mad, rsp_mad);
break;
case OPA_EM_ATTR_VESWPORT_MAC_ENTRIES:
vema_get_mac_entries(port, recvd_mad, rsp_mad);
break;
case OPA_EM_ATTR_IFACE_UCAST_MACS:
case OPA_EM_ATTR_IFACE_MCAST_MACS:
vema_get_mac_list(port, recvd_mad, rsp_mad, attr_id);
break;
case OPA_EM_ATTR_VESWPORT_SUMMARY_COUNTERS:
vema_get_summary_counters(port, recvd_mad, rsp_mad);
break;
case OPA_EM_ATTR_VESWPORT_ERROR_COUNTERS:
vema_get_error_counters(port, recvd_mad, rsp_mad);
break;
default:
rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
break;
}
}
/**
* vema_set -- Process received set MAD
* @port: source port on which MAD was received
* @recvd_mad: Received mad contains fields to set vnic parameters
* @rsp_mad: Response mad to be built
*/
static void vema_set(struct opa_vnic_vema_port *port,
struct opa_vnic_vema_mad *recvd_mad,
struct opa_vnic_vema_mad *rsp_mad)
{
u16 attr_id = be16_to_cpu(recvd_mad->mad_hdr.attr_id);
switch (attr_id) {
case OPA_EM_ATTR_CLASS_PORT_INFO:
vema_set_class_port_info(port, recvd_mad, rsp_mad);
break;
case OPA_EM_ATTR_VESWPORT_INFO:
vema_set_veswport_info(port, recvd_mad, rsp_mad);
break;
case OPA_EM_ATTR_VESWPORT_MAC_ENTRIES:
vema_set_mac_entries(port, recvd_mad, rsp_mad);
break;
case OPA_EM_ATTR_DELETE_VESW:
vema_set_delete_vesw(port, recvd_mad, rsp_mad);
break;
default:
rsp_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
break;
}
}
/**
* vema_send -- Send handler for VEMA MAD agent
* @mad_agent: pointer to the mad agent
* @mad_wc: pointer to mad send work completion information
*
* Free all the data structures associated with the sent MAD
*/
static void vema_send(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_wc)
{
rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
ib_free_send_mad(mad_wc->send_buf);
}
/**
* vema_recv -- Recv handler for VEMA MAD agent
* @mad_agent: pointer to the mad agent
* @send_buf: Send buffer if found, else NULL
* @mad_wc: pointer to mad send work completion information
*
* Handle only set and get methods and respond to other methods
* as unsupported. Allocate response buffer and address handle
* for the response MAD.
*/
static void vema_recv(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf,
struct ib_mad_recv_wc *mad_wc)
{
struct opa_vnic_vema_port *port;
struct ib_ah *ah;
struct ib_mad_send_buf *rsp;
struct opa_vnic_vema_mad *vema_mad;
if (!mad_wc || !mad_wc->recv_buf.mad)
return;
port = mad_agent->context;
ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
mad_wc->recv_buf.grh, mad_agent->port_num);
if (IS_ERR(ah))
goto free_recv_mad;
rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
mad_wc->wc->pkey_index, 0,
IB_MGMT_VENDOR_HDR, OPA_VNIC_EMA_DATA,
GFP_KERNEL, OPA_MGMT_BASE_VERSION);
if (IS_ERR(rsp))
goto err_rsp;
rsp->ah = ah;
vema_mad = rsp->mad;
memcpy(vema_mad, mad_wc->recv_buf.mad, IB_MGMT_VENDOR_HDR);
vema_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
vema_mad->mad_hdr.status = 0;
/* Lock ensures network adapter is not removed */
mutex_lock(&port->lock);
switch (mad_wc->recv_buf.mad->mad_hdr.method) {
case IB_MGMT_METHOD_GET:
vema_get(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad,
vema_mad);
break;
case IB_MGMT_METHOD_SET:
vema_set(port, (struct opa_vnic_vema_mad *)mad_wc->recv_buf.mad,
vema_mad);
break;
default:
vema_mad->mad_hdr.status = OPA_VNIC_UNSUP_ATTR;
break;
}
mutex_unlock(&port->lock);
if (!ib_post_send_mad(rsp, NULL)) {
/*
* with post send successful ah and send mad
* will be destroyed in send handler
*/
goto free_recv_mad;
}
ib_free_send_mad(rsp);
err_rsp:
rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
free_recv_mad:
ib_free_recv_mad(mad_wc);
}
/**
* vema_get_port -- Gets the opa_vnic_vema_port
* @cport: pointer to control dev
* @port_num: Port number
*
* This function loops through the ports and returns
* the opa_vnic_vema port structure that is associated
* with the OPA port number
*
* Return: ptr to requested opa_vnic_vema_port strucure
* if success, NULL if not
*/
static struct opa_vnic_vema_port *
vema_get_port(struct opa_vnic_ctrl_port *cport, u8 port_num)
{
struct opa_vnic_vema_port *port = (void *)cport + sizeof(*cport);
if (port_num > cport->num_ports)
return NULL;
return port + (port_num - 1);
}
/**
* opa_vnic_vema_send_trap -- This function sends a trap to the EM
* @adapter: pointer to vnic adapter
* @data: pointer to trap data filled by calling function
* @lid: issuers lid (encap_slid from vesw_port_info)
*
* This function is called from the VNIC driver to send a trap if there
* is somethng the EM should be notified about. These events currently
* are
* 1) UNICAST INTERFACE MACADDRESS changes
* 2) MULTICAST INTERFACE MACADDRESS changes
* 3) ETHERNET LINK STATUS changes
* While allocating the send mad the remote site qpn used is 1
* as this is the well known QP.
*
*/
void opa_vnic_vema_send_trap(struct opa_vnic_adapter *adapter,
struct __opa_veswport_trap *data, u32 lid)
{
struct opa_vnic_ctrl_port *cport = adapter->cport;
struct ib_mad_send_buf *send_buf;
struct opa_vnic_vema_port *port;
struct ib_device *ibp;
struct opa_vnic_vema_mad_trap *trap_mad;
struct opa_class_port_info *class;
struct rdma_ah_attr ah_attr;
struct ib_ah *ah;
struct opa_veswport_trap *trap;
u32 trap_lid;
u16 pkey_idx;
if (!cport)
goto err_exit;
ibp = cport->ibdev;
port = vema_get_port(cport, data->opaportnum);
if (!port || !port->mad_agent)
goto err_exit;
if (time_before(jiffies, adapter->trap_timeout)) {
if (adapter->trap_count == OPA_VNIC_TRAP_BURST_LIMIT) {
v_warn("Trap rate exceeded\n");
goto err_exit;
} else {
adapter->trap_count++;
}
} else {
adapter->trap_count = 0;
}
class = &port->class_port_info;
/* Set up address handle */
memset(&ah_attr, 0, sizeof(ah_attr));
ah_attr.type = rdma_ah_find_type(ibp, port->port_num);
rdma_ah_set_sl(&ah_attr,
GET_TRAP_SL_FROM_CLASS_PORT_INFO(class->trap_sl_rsvd));
rdma_ah_set_port_num(&ah_attr, port->port_num);
trap_lid = be32_to_cpu(class->trap_lid);
/*
* check for trap lid validity, must not be zero
* The trap sink could change after we fashion the MAD but since traps
* are not guaranteed we won't use a lock as anyway the change will take
* place even with locking.
*/
if (!trap_lid) {
c_err("%s: Invalid dlid\n", __func__);
goto err_exit;
}
rdma_ah_set_dlid(&ah_attr, trap_lid);
ah = rdma_create_ah(port->mad_agent->qp->pd, &ah_attr, 0);
if (IS_ERR(ah)) {
c_err("%s:Couldn't create new AH = %p\n", __func__, ah);
c_err("%s:dlid = %d, sl = %d, port = %d\n", __func__,
rdma_ah_get_dlid(&ah_attr), rdma_ah_get_sl(&ah_attr),
rdma_ah_get_port_num(&ah_attr));
goto err_exit;
}
if (ib_find_pkey(ibp, data->opaportnum, IB_DEFAULT_PKEY_FULL,
&pkey_idx) < 0) {
c_err("%s:full key not found, defaulting to partial\n",
__func__);
if (ib_find_pkey(ibp, data->opaportnum, IB_DEFAULT_PKEY_PARTIAL,
&pkey_idx) < 0)
pkey_idx = 1;
}
send_buf = ib_create_send_mad(port->mad_agent, 1, pkey_idx, 0,
IB_MGMT_VENDOR_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC, OPA_MGMT_BASE_VERSION);
if (IS_ERR(send_buf)) {
c_err("%s:Couldn't allocate send buf\n", __func__);
goto err_sndbuf;
}
send_buf->ah = ah;
/* Set up common MAD hdr */
trap_mad = send_buf->mad;
trap_mad->mad_hdr.base_version = OPA_MGMT_BASE_VERSION;
trap_mad->mad_hdr.mgmt_class = OPA_MGMT_CLASS_INTEL_EMA;
trap_mad->mad_hdr.class_version = OPA_EMA_CLASS_VERSION;
trap_mad->mad_hdr.method = IB_MGMT_METHOD_TRAP;
port->tid++;
trap_mad->mad_hdr.tid = cpu_to_be64(port->tid);
trap_mad->mad_hdr.attr_id = IB_SMP_ATTR_NOTICE;
/* Set up vendor OUI */
trap_mad->oui[0] = INTEL_OUI_1;
trap_mad->oui[1] = INTEL_OUI_2;
trap_mad->oui[2] = INTEL_OUI_3;
/* Setup notice attribute portion */
trap_mad->notice.gen_type = OPA_INTEL_EMA_NOTICE_TYPE_INFO << 1;
trap_mad->notice.oui_1 = INTEL_OUI_1;
trap_mad->notice.oui_2 = INTEL_OUI_2;
trap_mad->notice.oui_3 = INTEL_OUI_3;
trap_mad->notice.issuer_lid = cpu_to_be32(lid);
/* copy the actual trap data */
trap = (struct opa_veswport_trap *)trap_mad->notice.raw_data;
trap->fabric_id = cpu_to_be16(data->fabric_id);
trap->veswid = cpu_to_be16(data->veswid);
trap->veswportnum = cpu_to_be32(data->veswportnum);
trap->opaportnum = cpu_to_be16(data->opaportnum);
trap->veswportindex = data->veswportindex;
trap->opcode = data->opcode;
/* If successful send set up rate limit timeout else bail */
if (ib_post_send_mad(send_buf, NULL)) {
ib_free_send_mad(send_buf);
} else {
if (adapter->trap_count)
return;
adapter->trap_timeout = jiffies +
usecs_to_jiffies(OPA_VNIC_TRAP_TIMEOUT);
return;
}
err_sndbuf:
rdma_destroy_ah(ah, 0);
err_exit:
v_err("Aborting trap\n");
}
static void opa_vnic_event(struct ib_event_handler *handler,
struct ib_event *record)
{
struct opa_vnic_vema_port *port =
container_of(handler, struct opa_vnic_vema_port, event_handler);
struct opa_vnic_ctrl_port *cport = port->cport;
struct opa_vnic_adapter *adapter;
unsigned long index;
if (record->element.port_num != port->port_num)
return;
c_dbg("OPA_VNIC received event %d on device %s port %d\n",
record->event, dev_name(&record->device->dev),
record->element.port_num);
if (record->event != IB_EVENT_PORT_ERR &&
record->event != IB_EVENT_PORT_ACTIVE)
return;
xa_for_each(&port->vports, index, adapter) {
if (record->event == IB_EVENT_PORT_ACTIVE)
netif_carrier_on(adapter->netdev);
else
netif_carrier_off(adapter->netdev);
}
}
/**
* vema_unregister -- Unregisters agent
* @cport: pointer to control port
*
* This deletes the registration by VEMA for MADs
*/
static void vema_unregister(struct opa_vnic_ctrl_port *cport)
{
struct opa_vnic_adapter *adapter;
unsigned long index;
int i;
for (i = 1; i <= cport->num_ports; i++) {
struct opa_vnic_vema_port *port = vema_get_port(cport, i);
if (!port->mad_agent)
continue;
/* Lock ensures no MAD is being processed */
mutex_lock(&port->lock);
xa_for_each(&port->vports, index, adapter)
opa_vnic_rem_netdev(adapter);
mutex_unlock(&port->lock);
ib_unregister_mad_agent(port->mad_agent);
port->mad_agent = NULL;
mutex_destroy(&port->lock);
xa_destroy(&port->vports);
ib_unregister_event_handler(&port->event_handler);
}
}
/**
* vema_register -- Registers agent
* @cport: pointer to control port
*
* This function registers the handlers for the VEMA MADs
*
* Return: returns 0 on success. non zero otherwise
*/
static int vema_register(struct opa_vnic_ctrl_port *cport)
{
struct ib_mad_reg_req reg_req = {
.mgmt_class = OPA_MGMT_CLASS_INTEL_EMA,
.mgmt_class_version = OPA_MGMT_BASE_VERSION,
.oui = { INTEL_OUI_1, INTEL_OUI_2, INTEL_OUI_3 }
};
int i;
set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
/* register ib event handler and mad agent for each port on dev */
for (i = 1; i <= cport->num_ports; i++) {
struct opa_vnic_vema_port *port = vema_get_port(cport, i);
int ret;
port->cport = cport;
port->port_num = i;
INIT_IB_EVENT_HANDLER(&port->event_handler,
cport->ibdev, opa_vnic_event);
ib_register_event_handler(&port->event_handler);
xa_init(&port->vports);
mutex_init(&port->lock);
port->mad_agent = ib_register_mad_agent(cport->ibdev, i,
IB_QPT_GSI, ®_req,
IB_MGMT_RMPP_VERSION,
vema_send, vema_recv,
port, 0);
if (IS_ERR(port->mad_agent)) {
ret = PTR_ERR(port->mad_agent);
port->mad_agent = NULL;
mutex_destroy(&port->lock);
vema_unregister(cport);
return ret;
}
}
return 0;
}
/**
* opa_vnic_ctrl_config_dev -- This function sends a trap to the EM
* by way of ib_modify_port to indicate support for ethernet on the
* fabric.
* @cport: pointer to control port
* @en: enable or disable ethernet on fabric support
*/
static void opa_vnic_ctrl_config_dev(struct opa_vnic_ctrl_port *cport, bool en)
{
struct ib_port_modify pm = { 0 };
int i;
if (en)
pm.set_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported;
else
pm.clr_port_cap_mask = OPA_CAP_MASK3_IsEthOnFabricSupported;
for (i = 1; i <= cport->num_ports; i++)
ib_modify_port(cport->ibdev, i, IB_PORT_OPA_MASK_CHG, &pm);
}
/**
* opa_vnic_vema_add_one -- Handle new ib device
* @device: ib device pointer
*
* Allocate the vnic control port and initialize it.
*/
static int opa_vnic_vema_add_one(struct ib_device *device)
{
struct opa_vnic_ctrl_port *cport;
int rc, size = sizeof(*cport);
if (!rdma_cap_opa_vnic(device))
return -EOPNOTSUPP;
size += device->phys_port_cnt * sizeof(struct opa_vnic_vema_port);
cport = kzalloc(size, GFP_KERNEL);
if (!cport)
return -ENOMEM;
cport->num_ports = device->phys_port_cnt;
cport->ibdev = device;
/* Initialize opa vnic management agent (vema) */
rc = vema_register(cport);
if (!rc)
c_info("VNIC client initialized\n");
ib_set_client_data(device, &opa_vnic_client, cport);
opa_vnic_ctrl_config_dev(cport, true);
return 0;
}
/**
* opa_vnic_vema_rem_one -- Handle ib device removal
* @device: ib device pointer
* @client_data: ib client data
*
* Uninitialize and free the vnic control port.
*/
static void opa_vnic_vema_rem_one(struct ib_device *device,
void *client_data)
{
struct opa_vnic_ctrl_port *cport = client_data;
c_info("removing VNIC client\n");
opa_vnic_ctrl_config_dev(cport, false);
vema_unregister(cport);
kfree(cport);
}
static int __init opa_vnic_init(void)
{
int rc;
rc = ib_register_client(&opa_vnic_client);
if (rc)
pr_err("VNIC driver register failed %d\n", rc);
return rc;
}
module_init(opa_vnic_init);
static void opa_vnic_deinit(void)
{
ib_unregister_client(&opa_vnic_client);
}
module_exit(opa_vnic_deinit);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Cornelis Networks");
MODULE_DESCRIPTION("Cornelis OPX Virtual Network driver");
| linux-master | drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c |
/*
* Copyright(c) 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* This file contains OPA VNIC encapsulation/decapsulation function.
*/
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
#include "opa_vnic_internal.h"
/* OPA 16B Header fields */
#define OPA_16B_LID_MASK 0xFFFFFull
#define OPA_16B_SLID_HIGH_SHFT 8
#define OPA_16B_SLID_MASK 0xF00ull
#define OPA_16B_DLID_MASK 0xF000ull
#define OPA_16B_DLID_HIGH_SHFT 12
#define OPA_16B_LEN_SHFT 20
#define OPA_16B_SC_SHFT 20
#define OPA_16B_RC_SHFT 25
#define OPA_16B_PKEY_SHFT 16
#define OPA_VNIC_L4_HDR_SHFT 16
/* L2+L4 hdr len is 20 bytes (5 quad words) */
#define OPA_VNIC_HDR_QW_LEN 5
static inline void opa_vnic_make_header(u8 *hdr, u32 slid, u32 dlid, u16 len,
u16 pkey, u16 entropy, u8 sc, u8 rc,
u8 l4_type, u16 l4_hdr)
{
/* h[1]: LT=1, 16B L2=10 */
u32 h[OPA_VNIC_HDR_QW_LEN] = {0, 0xc0000000, 0, 0, 0};
h[2] = l4_type;
h[3] = entropy;
h[4] = l4_hdr << OPA_VNIC_L4_HDR_SHFT;
/* Extract and set 4 upper bits and 20 lower bits of the lids */
h[0] |= (slid & OPA_16B_LID_MASK);
h[2] |= ((slid >> (20 - OPA_16B_SLID_HIGH_SHFT)) & OPA_16B_SLID_MASK);
h[1] |= (dlid & OPA_16B_LID_MASK);
h[2] |= ((dlid >> (20 - OPA_16B_DLID_HIGH_SHFT)) & OPA_16B_DLID_MASK);
h[0] |= (len << OPA_16B_LEN_SHFT);
h[1] |= (rc << OPA_16B_RC_SHFT);
h[1] |= (sc << OPA_16B_SC_SHFT);
h[2] |= ((u32)pkey << OPA_16B_PKEY_SHFT);
memcpy(hdr, h, OPA_VNIC_HDR_LEN);
}
/*
* Using a simple hash table for mac table implementation with the last octet
* of mac address as a key.
*/
static void opa_vnic_free_mac_tbl(struct hlist_head *mactbl)
{
struct opa_vnic_mac_tbl_node *node;
struct hlist_node *tmp;
int bkt;
if (!mactbl)
return;
vnic_hash_for_each_safe(mactbl, bkt, tmp, node, hlist) {
hash_del(&node->hlist);
kfree(node);
}
kfree(mactbl);
}
static struct hlist_head *opa_vnic_alloc_mac_tbl(void)
{
u32 size = sizeof(struct hlist_head) * OPA_VNIC_MAC_TBL_SIZE;
struct hlist_head *mactbl;
mactbl = kzalloc(size, GFP_KERNEL);
if (!mactbl)
return ERR_PTR(-ENOMEM);
vnic_hash_init(mactbl);
return mactbl;
}
/* opa_vnic_release_mac_tbl - empty and free the mac table */
void opa_vnic_release_mac_tbl(struct opa_vnic_adapter *adapter)
{
struct hlist_head *mactbl;
mutex_lock(&adapter->mactbl_lock);
mactbl = rcu_access_pointer(adapter->mactbl);
rcu_assign_pointer(adapter->mactbl, NULL);
synchronize_rcu();
opa_vnic_free_mac_tbl(mactbl);
adapter->info.vport.mac_tbl_digest = 0;
mutex_unlock(&adapter->mactbl_lock);
}
/*
* opa_vnic_query_mac_tbl - query the mac table for a section
*
* This function implements query of specific function of the mac table.
* The function also expects the requested range to be valid.
*/
void opa_vnic_query_mac_tbl(struct opa_vnic_adapter *adapter,
struct opa_veswport_mactable *tbl)
{
struct opa_vnic_mac_tbl_node *node;
struct hlist_head *mactbl;
int bkt;
u16 loffset, lnum_entries;
rcu_read_lock();
mactbl = rcu_dereference(adapter->mactbl);
if (!mactbl)
goto get_mac_done;
loffset = be16_to_cpu(tbl->offset);
lnum_entries = be16_to_cpu(tbl->num_entries);
vnic_hash_for_each(mactbl, bkt, node, hlist) {
struct __opa_vnic_mactable_entry *nentry = &node->entry;
struct opa_veswport_mactable_entry *entry;
if ((node->index < loffset) ||
(node->index >= (loffset + lnum_entries)))
continue;
/* populate entry in the tbl corresponding to the index */
entry = &tbl->tbl_entries[node->index - loffset];
memcpy(entry->mac_addr, nentry->mac_addr,
ARRAY_SIZE(entry->mac_addr));
memcpy(entry->mac_addr_mask, nentry->mac_addr_mask,
ARRAY_SIZE(entry->mac_addr_mask));
entry->dlid_sd = cpu_to_be32(nentry->dlid_sd);
}
tbl->mac_tbl_digest = cpu_to_be32(adapter->info.vport.mac_tbl_digest);
get_mac_done:
rcu_read_unlock();
}
/*
* opa_vnic_update_mac_tbl - update mac table section
*
* This function updates the specified section of the mac table.
* The procedure includes following steps.
* - Allocate a new mac (hash) table.
* - Add the specified entries to the new table.
* (except the ones that are requested to be deleted).
* - Add all the other entries from the old mac table.
* - If there is a failure, free the new table and return.
* - Switch to the new table.
* - Free the old table and return.
*
* The function also expects the requested range to be valid.
*/
int opa_vnic_update_mac_tbl(struct opa_vnic_adapter *adapter,
struct opa_veswport_mactable *tbl)
{
struct opa_vnic_mac_tbl_node *node, *new_node;
struct hlist_head *new_mactbl, *old_mactbl;
int i, bkt, rc = 0;
u8 key;
u16 loffset, lnum_entries;
mutex_lock(&adapter->mactbl_lock);
/* allocate new mac table */
new_mactbl = opa_vnic_alloc_mac_tbl();
if (IS_ERR(new_mactbl)) {
mutex_unlock(&adapter->mactbl_lock);
return PTR_ERR(new_mactbl);
}
loffset = be16_to_cpu(tbl->offset);
lnum_entries = be16_to_cpu(tbl->num_entries);
/* add updated entries to the new mac table */
for (i = 0; i < lnum_entries; i++) {
struct __opa_vnic_mactable_entry *nentry;
struct opa_veswport_mactable_entry *entry =
&tbl->tbl_entries[i];
u8 *mac_addr = entry->mac_addr;
u8 empty_mac[ETH_ALEN] = { 0 };
v_dbg("new mac entry %4d: %02x:%02x:%02x:%02x:%02x:%02x %x\n",
loffset + i, mac_addr[0], mac_addr[1], mac_addr[2],
mac_addr[3], mac_addr[4], mac_addr[5],
entry->dlid_sd);
/* if the entry is being removed, do not add it */
if (!memcmp(mac_addr, empty_mac, ARRAY_SIZE(empty_mac)))
continue;
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (!node) {
rc = -ENOMEM;
goto updt_done;
}
node->index = loffset + i;
nentry = &node->entry;
memcpy(nentry->mac_addr, entry->mac_addr,
ARRAY_SIZE(nentry->mac_addr));
memcpy(nentry->mac_addr_mask, entry->mac_addr_mask,
ARRAY_SIZE(nentry->mac_addr_mask));
nentry->dlid_sd = be32_to_cpu(entry->dlid_sd);
key = node->entry.mac_addr[OPA_VNIC_MAC_HASH_IDX];
vnic_hash_add(new_mactbl, &node->hlist, key);
}
/* add other entries from current mac table to new mac table */
old_mactbl = rcu_access_pointer(adapter->mactbl);
if (!old_mactbl)
goto switch_tbl;
vnic_hash_for_each(old_mactbl, bkt, node, hlist) {
if ((node->index >= loffset) &&
(node->index < (loffset + lnum_entries)))
continue;
new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
if (!new_node) {
rc = -ENOMEM;
goto updt_done;
}
new_node->index = node->index;
memcpy(&new_node->entry, &node->entry, sizeof(node->entry));
key = new_node->entry.mac_addr[OPA_VNIC_MAC_HASH_IDX];
vnic_hash_add(new_mactbl, &new_node->hlist, key);
}
switch_tbl:
/* switch to new table */
rcu_assign_pointer(adapter->mactbl, new_mactbl);
synchronize_rcu();
adapter->info.vport.mac_tbl_digest = be32_to_cpu(tbl->mac_tbl_digest);
updt_done:
/* upon failure, free the new table; otherwise, free the old table */
if (rc)
opa_vnic_free_mac_tbl(new_mactbl);
else
opa_vnic_free_mac_tbl(old_mactbl);
mutex_unlock(&adapter->mactbl_lock);
return rc;
}
/* opa_vnic_chk_mac_tbl - check mac table for dlid */
static uint32_t opa_vnic_chk_mac_tbl(struct opa_vnic_adapter *adapter,
struct ethhdr *mac_hdr)
{
struct opa_vnic_mac_tbl_node *node;
struct hlist_head *mactbl;
u32 dlid = 0;
u8 key;
rcu_read_lock();
mactbl = rcu_dereference(adapter->mactbl);
if (unlikely(!mactbl))
goto chk_done;
key = mac_hdr->h_dest[OPA_VNIC_MAC_HASH_IDX];
vnic_hash_for_each_possible(mactbl, node, hlist, key) {
struct __opa_vnic_mactable_entry *entry = &node->entry;
/* if related to source mac, skip */
if (unlikely(OPA_VNIC_DLID_SD_IS_SRC_MAC(entry->dlid_sd)))
continue;
if (!memcmp(node->entry.mac_addr, mac_hdr->h_dest,
ARRAY_SIZE(node->entry.mac_addr))) {
/* mac address found */
dlid = OPA_VNIC_DLID_SD_GET_DLID(node->entry.dlid_sd);
break;
}
}
chk_done:
rcu_read_unlock();
return dlid;
}
/* opa_vnic_get_dlid - find and return the DLID */
static uint32_t opa_vnic_get_dlid(struct opa_vnic_adapter *adapter,
struct sk_buff *skb, u8 def_port)
{
struct __opa_veswport_info *info = &adapter->info;
struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
u32 dlid;
dlid = opa_vnic_chk_mac_tbl(adapter, mac_hdr);
if (dlid)
return dlid;
if (is_multicast_ether_addr(mac_hdr->h_dest)) {
dlid = info->vesw.u_mcast_dlid;
} else {
if (is_local_ether_addr(mac_hdr->h_dest)) {
dlid = ((uint32_t)mac_hdr->h_dest[5] << 16) |
((uint32_t)mac_hdr->h_dest[4] << 8) |
mac_hdr->h_dest[3];
if (unlikely(!dlid))
v_warn("Null dlid in MAC address\n");
} else if (def_port != OPA_VNIC_INVALID_PORT) {
if (def_port < OPA_VESW_MAX_NUM_DEF_PORT)
dlid = info->vesw.u_ucast_dlid[def_port];
}
}
return dlid;
}
/* opa_vnic_get_sc - return the service class */
static u8 opa_vnic_get_sc(struct __opa_veswport_info *info,
struct sk_buff *skb)
{
struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
u16 vlan_tci;
u8 sc;
if (!__vlan_get_tag(skb, &vlan_tci)) {
u8 pcp = OPA_VNIC_VLAN_PCP(vlan_tci);
if (is_multicast_ether_addr(mac_hdr->h_dest))
sc = info->vport.pcp_to_sc_mc[pcp];
else
sc = info->vport.pcp_to_sc_uc[pcp];
} else {
if (is_multicast_ether_addr(mac_hdr->h_dest))
sc = info->vport.non_vlan_sc_mc;
else
sc = info->vport.non_vlan_sc_uc;
}
return sc;
}
u8 opa_vnic_get_vl(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
{
struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb);
struct __opa_veswport_info *info = &adapter->info;
u8 vl;
if (skb_vlan_tag_present(skb)) {
u8 pcp = skb_vlan_tag_get(skb) >> VLAN_PRIO_SHIFT;
if (is_multicast_ether_addr(mac_hdr->h_dest))
vl = info->vport.pcp_to_vl_mc[pcp];
else
vl = info->vport.pcp_to_vl_uc[pcp];
} else {
if (is_multicast_ether_addr(mac_hdr->h_dest))
vl = info->vport.non_vlan_vl_mc;
else
vl = info->vport.non_vlan_vl_uc;
}
return vl;
}
/* opa_vnic_get_rc - return the routing control */
static u8 opa_vnic_get_rc(struct __opa_veswport_info *info,
struct sk_buff *skb)
{
u8 proto, rout_ctrl;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IPV6):
proto = ipv6_hdr(skb)->nexthdr;
if (proto == IPPROTO_TCP)
rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
IPV6_TCP);
else if (proto == IPPROTO_UDP)
rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
IPV6_UDP);
else
rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV6);
break;
case htons(ETH_P_IP):
proto = ip_hdr(skb)->protocol;
if (proto == IPPROTO_TCP)
rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
IPV4_TCP);
else if (proto == IPPROTO_UDP)
rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc,
IPV4_UDP);
else
rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, IPV4);
break;
default:
rout_ctrl = OPA_VNIC_ENCAP_RC_EXT(info->vesw.rc, DEFAULT);
}
return rout_ctrl;
}
/* opa_vnic_calc_entropy - calculate the packet entropy */
u8 opa_vnic_calc_entropy(struct sk_buff *skb)
{
u32 hash = skb_get_hash(skb);
/* store XOR of all bytes in lower 8 bits */
hash ^= hash >> 8;
hash ^= hash >> 16;
/* return lower 8 bits as entropy */
return (u8)(hash & 0xFF);
}
/* opa_vnic_get_def_port - get default port based on entropy */
static inline u8 opa_vnic_get_def_port(struct opa_vnic_adapter *adapter,
u8 entropy)
{
u8 flow_id;
/* Add the upper and lower 4-bits of entropy to get the flow id */
flow_id = ((entropy & 0xf) + (entropy >> 4));
return adapter->flow_tbl[flow_id & (OPA_VNIC_FLOW_TBL_SIZE - 1)];
}
/* Calculate packet length including OPA header, crc and padding */
static inline int opa_vnic_wire_length(struct sk_buff *skb)
{
u32 pad_len;
/* padding for 8 bytes size alignment */
pad_len = -(skb->len + OPA_VNIC_ICRC_TAIL_LEN) & 0x7;
pad_len += OPA_VNIC_ICRC_TAIL_LEN;
return (skb->len + pad_len) >> 3;
}
/* opa_vnic_encap_skb - encapsulate skb packet with OPA header and meta data */
void opa_vnic_encap_skb(struct opa_vnic_adapter *adapter, struct sk_buff *skb)
{
struct __opa_veswport_info *info = &adapter->info;
struct opa_vnic_skb_mdata *mdata;
u8 def_port, sc, rc, entropy, *hdr;
u16 len, l4_hdr;
u32 dlid;
hdr = skb_push(skb, OPA_VNIC_HDR_LEN);
entropy = opa_vnic_calc_entropy(skb);
def_port = opa_vnic_get_def_port(adapter, entropy);
len = opa_vnic_wire_length(skb);
dlid = opa_vnic_get_dlid(adapter, skb, def_port);
sc = opa_vnic_get_sc(info, skb);
rc = opa_vnic_get_rc(info, skb);
l4_hdr = info->vesw.vesw_id;
mdata = skb_push(skb, sizeof(*mdata));
mdata->vl = opa_vnic_get_vl(adapter, skb);
mdata->entropy = entropy;
mdata->flags = 0;
if (unlikely(!dlid)) {
mdata->flags = OPA_VNIC_SKB_MDATA_ENCAP_ERR;
return;
}
opa_vnic_make_header(hdr, info->vport.encap_slid, dlid, len,
info->vesw.pkey, entropy, sc, rc,
OPA_VNIC_L4_ETHR, l4_hdr);
}
| linux-master | drivers/infiniband/ulp/opa_vnic/opa_vnic_encap.c |
/*
* Copyright(c) 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* This file contains OPA VNIC ethtool functions
*/
#include <linux/ethtool.h>
#include "opa_vnic_internal.h"
enum {NETDEV_STATS, VNIC_STATS};
struct vnic_stats {
char stat_string[ETH_GSTRING_LEN];
struct {
int sizeof_stat;
int stat_offset;
};
};
#define VNIC_STAT(m) { sizeof_field(struct opa_vnic_stats, m), \
offsetof(struct opa_vnic_stats, m) }
static struct vnic_stats vnic_gstrings_stats[] = {
/* NETDEV stats */
{"rx_packets", VNIC_STAT(netstats.rx_packets)},
{"tx_packets", VNIC_STAT(netstats.tx_packets)},
{"rx_bytes", VNIC_STAT(netstats.rx_bytes)},
{"tx_bytes", VNIC_STAT(netstats.tx_bytes)},
{"rx_errors", VNIC_STAT(netstats.rx_errors)},
{"tx_errors", VNIC_STAT(netstats.tx_errors)},
{"rx_dropped", VNIC_STAT(netstats.rx_dropped)},
{"tx_dropped", VNIC_STAT(netstats.tx_dropped)},
/* SUMMARY counters */
{"tx_unicast", VNIC_STAT(tx_grp.unicast)},
{"tx_mcastbcast", VNIC_STAT(tx_grp.mcastbcast)},
{"tx_untagged", VNIC_STAT(tx_grp.untagged)},
{"tx_vlan", VNIC_STAT(tx_grp.vlan)},
{"tx_64_size", VNIC_STAT(tx_grp.s_64)},
{"tx_65_127", VNIC_STAT(tx_grp.s_65_127)},
{"tx_128_255", VNIC_STAT(tx_grp.s_128_255)},
{"tx_256_511", VNIC_STAT(tx_grp.s_256_511)},
{"tx_512_1023", VNIC_STAT(tx_grp.s_512_1023)},
{"tx_1024_1518", VNIC_STAT(tx_grp.s_1024_1518)},
{"tx_1519_max", VNIC_STAT(tx_grp.s_1519_max)},
{"rx_unicast", VNIC_STAT(rx_grp.unicast)},
{"rx_mcastbcast", VNIC_STAT(rx_grp.mcastbcast)},
{"rx_untagged", VNIC_STAT(rx_grp.untagged)},
{"rx_vlan", VNIC_STAT(rx_grp.vlan)},
{"rx_64_size", VNIC_STAT(rx_grp.s_64)},
{"rx_65_127", VNIC_STAT(rx_grp.s_65_127)},
{"rx_128_255", VNIC_STAT(rx_grp.s_128_255)},
{"rx_256_511", VNIC_STAT(rx_grp.s_256_511)},
{"rx_512_1023", VNIC_STAT(rx_grp.s_512_1023)},
{"rx_1024_1518", VNIC_STAT(rx_grp.s_1024_1518)},
{"rx_1519_max", VNIC_STAT(rx_grp.s_1519_max)},
/* ERROR counters */
{"rx_fifo_errors", VNIC_STAT(netstats.rx_fifo_errors)},
{"rx_length_errors", VNIC_STAT(netstats.rx_length_errors)},
{"tx_fifo_errors", VNIC_STAT(netstats.tx_fifo_errors)},
{"tx_carrier_errors", VNIC_STAT(netstats.tx_carrier_errors)},
{"tx_dlid_zero", VNIC_STAT(tx_dlid_zero)},
{"tx_drop_state", VNIC_STAT(tx_drop_state)},
{"rx_drop_state", VNIC_STAT(rx_drop_state)},
{"rx_oversize", VNIC_STAT(rx_oversize)},
{"rx_runt", VNIC_STAT(rx_runt)},
};
#define VNIC_STATS_LEN ARRAY_SIZE(vnic_gstrings_stats)
/* vnic_get_drvinfo - get driver info */
static void vnic_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
strscpy(drvinfo->driver, opa_vnic_driver_name, sizeof(drvinfo->driver));
strscpy(drvinfo->bus_info, dev_name(netdev->dev.parent),
sizeof(drvinfo->bus_info));
}
/* vnic_get_sset_count - get string set count */
static int vnic_get_sset_count(struct net_device *netdev, int sset)
{
return (sset == ETH_SS_STATS) ? VNIC_STATS_LEN : -EOPNOTSUPP;
}
/* vnic_get_ethtool_stats - get statistics */
static void vnic_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
struct opa_vnic_stats vstats;
int i;
memset(&vstats, 0, sizeof(vstats));
spin_lock(&adapter->stats_lock);
adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats);
spin_unlock(&adapter->stats_lock);
for (i = 0; i < VNIC_STATS_LEN; i++) {
char *p = (char *)&vstats + vnic_gstrings_stats[i].stat_offset;
data[i] = (vnic_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
/* vnic_get_strings - get strings */
static void vnic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
int i;
if (stringset != ETH_SS_STATS)
return;
for (i = 0; i < VNIC_STATS_LEN; i++)
memcpy(data + i * ETH_GSTRING_LEN,
vnic_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
}
/* ethtool ops */
static const struct ethtool_ops opa_vnic_ethtool_ops = {
.get_drvinfo = vnic_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = vnic_get_strings,
.get_sset_count = vnic_get_sset_count,
.get_ethtool_stats = vnic_get_ethtool_stats,
};
/* opa_vnic_set_ethtool_ops - set ethtool ops */
void opa_vnic_set_ethtool_ops(struct net_device *netdev)
{
netdev->ethtool_ops = &opa_vnic_ethtool_ops;
}
| linux-master | drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c |
/*
* Copyright(c) 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* - Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
/*
* This file contains OPA Virtual Network Interface Controller (VNIC) driver
* netdev functionality.
*/
#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include "opa_vnic_internal.h"
#define OPA_TX_TIMEOUT_MS 1000
#define OPA_VNIC_SKB_HEADROOM \
ALIGN((OPA_VNIC_HDR_LEN + OPA_VNIC_SKB_MDATA_LEN), 8)
/* This function is overloaded for opa_vnic specific implementation */
static void opa_vnic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
struct opa_vnic_stats vstats;
memset(&vstats, 0, sizeof(vstats));
spin_lock(&adapter->stats_lock);
adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats);
spin_unlock(&adapter->stats_lock);
memcpy(stats, &vstats.netstats, sizeof(*stats));
}
/* opa_netdev_start_xmit - transmit function */
static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
v_dbg("xmit: queue %d skb len %d\n", skb->queue_mapping, skb->len);
/* pad to ensure mininum ethernet packet length */
if (unlikely(skb->len < ETH_ZLEN)) {
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
skb_put(skb, ETH_ZLEN - skb->len);
}
opa_vnic_encap_skb(adapter, skb);
return adapter->rn_ops->ndo_start_xmit(skb, netdev);
}
static u16 opa_vnic_select_queue(struct net_device *netdev, struct sk_buff *skb,
struct net_device *sb_dev)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
struct opa_vnic_skb_mdata *mdata;
int rc;
/* pass entropy and vl as metadata in skb */
mdata = skb_push(skb, sizeof(*mdata));
mdata->entropy = opa_vnic_calc_entropy(skb);
mdata->vl = opa_vnic_get_vl(adapter, skb);
rc = adapter->rn_ops->ndo_select_queue(netdev, skb, sb_dev);
skb_pull(skb, sizeof(*mdata));
return rc;
}
static void opa_vnic_update_state(struct opa_vnic_adapter *adapter, bool up)
{
struct __opa_veswport_info *info = &adapter->info;
mutex_lock(&adapter->lock);
/* Operational state can only be DROP_ALL or FORWARDING */
if ((info->vport.config_state == OPA_VNIC_STATE_FORWARDING) && up) {
info->vport.oper_state = OPA_VNIC_STATE_FORWARDING;
info->vport.eth_link_status = OPA_VNIC_ETH_LINK_UP;
} else {
info->vport.oper_state = OPA_VNIC_STATE_DROP_ALL;
info->vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
}
if (info->vport.config_state == OPA_VNIC_STATE_FORWARDING)
netif_dormant_off(adapter->netdev);
else
netif_dormant_on(adapter->netdev);
mutex_unlock(&adapter->lock);
}
/* opa_vnic_process_vema_config - process vema configuration updates */
void opa_vnic_process_vema_config(struct opa_vnic_adapter *adapter)
{
struct __opa_veswport_info *info = &adapter->info;
struct rdma_netdev *rn = netdev_priv(adapter->netdev);
u8 port_num[OPA_VESW_MAX_NUM_DEF_PORT] = { 0 };
struct net_device *netdev = adapter->netdev;
u8 i, port_count = 0;
u16 port_mask;
/* If the base_mac_addr is changed, update the interface mac address */
if (memcmp(info->vport.base_mac_addr, adapter->vema_mac_addr,
ARRAY_SIZE(info->vport.base_mac_addr))) {
struct sockaddr saddr;
memcpy(saddr.sa_data, info->vport.base_mac_addr,
ARRAY_SIZE(info->vport.base_mac_addr));
mutex_lock(&adapter->lock);
eth_commit_mac_addr_change(netdev, &saddr);
memcpy(adapter->vema_mac_addr,
info->vport.base_mac_addr, ETH_ALEN);
mutex_unlock(&adapter->lock);
}
rn->set_id(netdev, info->vesw.vesw_id);
/* Handle MTU limit change */
rtnl_lock();
netdev->max_mtu = max_t(unsigned int, info->vesw.eth_mtu,
netdev->min_mtu);
if (netdev->mtu > netdev->max_mtu)
dev_set_mtu(netdev, netdev->max_mtu);
rtnl_unlock();
/* Update flow to default port redirection table */
port_mask = info->vesw.def_port_mask;
for (i = 0; i < OPA_VESW_MAX_NUM_DEF_PORT; i++) {
if (port_mask & 1)
port_num[port_count++] = i;
port_mask >>= 1;
}
/*
* Build the flow table. Flow table is required when destination LID
* is not available. Up to OPA_VNIC_FLOW_TBL_SIZE flows supported.
* Each flow need a default port number to get its dlid from the
* u_ucast_dlid array.
*/
for (i = 0; i < OPA_VNIC_FLOW_TBL_SIZE; i++)
adapter->flow_tbl[i] = port_count ? port_num[i % port_count] :
OPA_VNIC_INVALID_PORT;
/* update state */
opa_vnic_update_state(adapter, !!(netdev->flags & IFF_UP));
}
/*
* Set the power on default values in adapter's vema interface structure.
*/
static inline void opa_vnic_set_pod_values(struct opa_vnic_adapter *adapter)
{
adapter->info.vport.max_mac_tbl_ent = OPA_VNIC_MAC_TBL_MAX_ENTRIES;
adapter->info.vport.max_smac_ent = OPA_VNIC_MAX_SMAC_LIMIT;
adapter->info.vport.config_state = OPA_VNIC_STATE_DROP_ALL;
adapter->info.vport.eth_link_status = OPA_VNIC_ETH_LINK_DOWN;
adapter->info.vesw.eth_mtu = ETH_DATA_LEN;
}
/* opa_vnic_set_mac_addr - change mac address */
static int opa_vnic_set_mac_addr(struct net_device *netdev, void *addr)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
struct sockaddr *sa = addr;
int rc;
if (!memcmp(netdev->dev_addr, sa->sa_data, ETH_ALEN))
return 0;
mutex_lock(&adapter->lock);
rc = eth_mac_addr(netdev, addr);
mutex_unlock(&adapter->lock);
if (rc)
return rc;
adapter->info.vport.uc_macs_gen_count++;
opa_vnic_vema_report_event(adapter,
OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE);
return 0;
}
/*
* opa_vnic_mac_send_event - post event on possible mac list exchange
* Send trap when digest from uc/mc mac list differs from previous run.
* Digest is evaluated similar to how cksum does.
*/
static void opa_vnic_mac_send_event(struct net_device *netdev, u8 event)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
struct netdev_hw_addr *ha;
struct netdev_hw_addr_list *hw_list;
u32 *ref_crc;
u32 l, crc = 0;
switch (event) {
case OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE:
hw_list = &netdev->uc;
adapter->info.vport.uc_macs_gen_count++;
ref_crc = &adapter->umac_hash;
break;
case OPA_VESWPORT_TRAP_IFACE_MCAST_MAC_CHANGE:
hw_list = &netdev->mc;
adapter->info.vport.mc_macs_gen_count++;
ref_crc = &adapter->mmac_hash;
break;
default:
return;
}
netdev_hw_addr_list_for_each(ha, hw_list) {
crc = crc32_le(crc, ha->addr, ETH_ALEN);
}
l = netdev_hw_addr_list_count(hw_list) * ETH_ALEN;
crc = ~crc32_le(crc, (void *)&l, sizeof(l));
if (crc != *ref_crc) {
*ref_crc = crc;
opa_vnic_vema_report_event(adapter, event);
}
}
/* opa_vnic_set_rx_mode - handle uc/mc mac list change */
static void opa_vnic_set_rx_mode(struct net_device *netdev)
{
opa_vnic_mac_send_event(netdev,
OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE);
opa_vnic_mac_send_event(netdev,
OPA_VESWPORT_TRAP_IFACE_MCAST_MAC_CHANGE);
}
/* opa_netdev_open - activate network interface */
static int opa_netdev_open(struct net_device *netdev)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
int rc;
rc = adapter->rn_ops->ndo_open(adapter->netdev);
if (rc) {
v_dbg("open failed %d\n", rc);
return rc;
}
/* Update status and send trap */
opa_vnic_update_state(adapter, true);
opa_vnic_vema_report_event(adapter,
OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE);
return 0;
}
/* opa_netdev_close - disable network interface */
static int opa_netdev_close(struct net_device *netdev)
{
struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);
int rc;
rc = adapter->rn_ops->ndo_stop(adapter->netdev);
if (rc) {
v_dbg("close failed %d\n", rc);
return rc;
}
/* Update status and send trap */
opa_vnic_update_state(adapter, false);
opa_vnic_vema_report_event(adapter,
OPA_VESWPORT_TRAP_ETH_LINK_STATUS_CHANGE);
return 0;
}
/* netdev ops */
static const struct net_device_ops opa_netdev_ops = {
.ndo_open = opa_netdev_open,
.ndo_stop = opa_netdev_close,
.ndo_start_xmit = opa_netdev_start_xmit,
.ndo_get_stats64 = opa_vnic_get_stats64,
.ndo_set_rx_mode = opa_vnic_set_rx_mode,
.ndo_select_queue = opa_vnic_select_queue,
.ndo_set_mac_address = opa_vnic_set_mac_addr,
};
/* opa_vnic_add_netdev - create vnic netdev interface */
struct opa_vnic_adapter *opa_vnic_add_netdev(struct ib_device *ibdev,
u8 port_num, u8 vport_num)
{
struct opa_vnic_adapter *adapter;
struct net_device *netdev;
struct rdma_netdev *rn;
int rc;
netdev = ibdev->ops.alloc_rdma_netdev(ibdev, port_num,
RDMA_NETDEV_OPA_VNIC,
"veth%d", NET_NAME_UNKNOWN,
ether_setup);
if (!netdev)
return ERR_PTR(-ENOMEM);
else if (IS_ERR(netdev))
return ERR_CAST(netdev);
rn = netdev_priv(netdev);
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
rc = -ENOMEM;
goto adapter_err;
}
rn->clnt_priv = adapter;
rn->hca = ibdev;
rn->port_num = port_num;
adapter->netdev = netdev;
adapter->ibdev = ibdev;
adapter->port_num = port_num;
adapter->vport_num = vport_num;
adapter->rn_ops = netdev->netdev_ops;
netdev->netdev_ops = &opa_netdev_ops;
netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
netdev->hard_header_len += OPA_VNIC_SKB_HEADROOM;
mutex_init(&adapter->lock);
mutex_init(&adapter->mactbl_lock);
spin_lock_init(&adapter->stats_lock);
SET_NETDEV_DEV(netdev, ibdev->dev.parent);
opa_vnic_set_ethtool_ops(netdev);
opa_vnic_set_pod_values(adapter);
rc = register_netdev(netdev);
if (rc)
goto netdev_err;
netif_carrier_off(netdev);
netif_dormant_on(netdev);
v_info("initialized\n");
return adapter;
netdev_err:
mutex_destroy(&adapter->lock);
mutex_destroy(&adapter->mactbl_lock);
kfree(adapter);
adapter_err:
rn->free_rdma_netdev(netdev);
return ERR_PTR(rc);
}
/* opa_vnic_rem_netdev - remove vnic netdev interface */
void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct rdma_netdev *rn = netdev_priv(netdev);
v_info("removing\n");
unregister_netdev(netdev);
opa_vnic_release_mac_tbl(adapter);
mutex_destroy(&adapter->lock);
mutex_destroy(&adapter->mactbl_lock);
kfree(adapter);
rn->free_rdma_netdev(netdev);
}
| linux-master | drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c |
/*
* Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
* Copyright (C) 2008 - 2011 Bart Van Assche <[email protected]>.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/kthread.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/atomic.h>
#include <linux/inet.h>
#include <rdma/ib_cache.h>
#include <scsi/scsi_proto.h>
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "ib_srpt.h"
/* Name of this kernel module. */
#define DRV_NAME "ib_srpt"
#define SRPT_ID_STRING "Linux SRP target"
#undef pr_fmt
#define pr_fmt(fmt) DRV_NAME " " fmt
MODULE_AUTHOR("Vu Pham and Bart Van Assche");
MODULE_DESCRIPTION("SCSI RDMA Protocol target driver");
MODULE_LICENSE("Dual BSD/GPL");
/*
* Global Variables
*/
static u64 srpt_service_guid;
static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
module_param(srp_max_req_size, int, 0444);
MODULE_PARM_DESC(srp_max_req_size,
"Maximum size of SRP request messages in bytes.");
static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
module_param(srpt_srq_size, int, 0444);
MODULE_PARM_DESC(srpt_srq_size,
"Shared receive queue (SRQ) size.");
static int srpt_get_u64_x(char *buffer, const struct kernel_param *kp)
{
return sprintf(buffer, "0x%016llx\n", *(u64 *)kp->arg);
}
module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
0444);
MODULE_PARM_DESC(srpt_service_guid,
"Using this value for ioc_guid, id_ext, and cm_listen_id instead of using the node_guid of the first HCA.");
static struct ib_client srpt_client;
/* Protects both rdma_cm_port and rdma_cm_id. */
static DEFINE_MUTEX(rdma_cm_mutex);
/* Port number RDMA/CM will bind to. */
static u16 rdma_cm_port;
static struct rdma_cm_id *rdma_cm_id;
static void srpt_release_cmd(struct se_cmd *se_cmd);
static void srpt_free_ch(struct kref *kref);
static int srpt_queue_status(struct se_cmd *cmd);
static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
/*
* The only allowed channel state changes are those that change the channel
* state into a state with a higher numerical value. Hence the new > prev test.
*/
static bool srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new)
{
unsigned long flags;
enum rdma_ch_state prev;
bool changed = false;
spin_lock_irqsave(&ch->spinlock, flags);
prev = ch->state;
if (new > prev) {
ch->state = new;
changed = true;
}
spin_unlock_irqrestore(&ch->spinlock, flags);
return changed;
}
/**
* srpt_event_handler - asynchronous IB event callback function
* @handler: IB event handler registered by ib_register_event_handler().
* @event: Description of the event that occurred.
*
* Callback function called by the InfiniBand core when an asynchronous IB
* event occurs. This callback may occur in interrupt context. See also
* section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
* Architecture Specification.
*/
static void srpt_event_handler(struct ib_event_handler *handler,
struct ib_event *event)
{
struct srpt_device *sdev =
container_of(handler, struct srpt_device, event_handler);
struct srpt_port *sport;
u8 port_num;
pr_debug("ASYNC event= %d on device= %s\n", event->event,
dev_name(&sdev->device->dev));
switch (event->event) {
case IB_EVENT_PORT_ERR:
port_num = event->element.port_num - 1;
if (port_num < sdev->device->phys_port_cnt) {
sport = &sdev->port[port_num];
sport->lid = 0;
sport->sm_lid = 0;
} else {
WARN(true, "event %d: port_num %d out of range 1..%d\n",
event->event, port_num + 1,
sdev->device->phys_port_cnt);
}
break;
case IB_EVENT_PORT_ACTIVE:
case IB_EVENT_LID_CHANGE:
case IB_EVENT_PKEY_CHANGE:
case IB_EVENT_SM_CHANGE:
case IB_EVENT_CLIENT_REREGISTER:
case IB_EVENT_GID_CHANGE:
/* Refresh port data asynchronously. */
port_num = event->element.port_num - 1;
if (port_num < sdev->device->phys_port_cnt) {
sport = &sdev->port[port_num];
if (!sport->lid && !sport->sm_lid)
schedule_work(&sport->work);
} else {
WARN(true, "event %d: port_num %d out of range 1..%d\n",
event->event, port_num + 1,
sdev->device->phys_port_cnt);
}
break;
default:
pr_err("received unrecognized IB event %d\n", event->event);
break;
}
}
/**
* srpt_srq_event - SRQ event callback function
* @event: Description of the event that occurred.
* @ctx: Context pointer specified at SRQ creation time.
*/
static void srpt_srq_event(struct ib_event *event, void *ctx)
{
pr_debug("SRQ event %d\n", event->event);
}
static const char *get_ch_state_name(enum rdma_ch_state s)
{
switch (s) {
case CH_CONNECTING:
return "connecting";
case CH_LIVE:
return "live";
case CH_DISCONNECTING:
return "disconnecting";
case CH_DRAINING:
return "draining";
case CH_DISCONNECTED:
return "disconnected";
}
return "???";
}
/**
* srpt_qp_event - QP event callback function
* @event: Description of the event that occurred.
* @ch: SRPT RDMA channel.
*/
static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
{
pr_debug("QP event %d on ch=%p sess_name=%s-%d state=%s\n",
event->event, ch, ch->sess_name, ch->qp->qp_num,
get_ch_state_name(ch->state));
switch (event->event) {
case IB_EVENT_COMM_EST:
if (ch->using_rdma_cm)
rdma_notify(ch->rdma_cm.cm_id, event->event);
else
ib_cm_notify(ch->ib_cm.cm_id, event->event);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
pr_debug("%s-%d, state %s: received Last WQE event.\n",
ch->sess_name, ch->qp->qp_num,
get_ch_state_name(ch->state));
break;
default:
pr_err("received unrecognized IB QP event %d\n", event->event);
break;
}
}
/**
* srpt_set_ioc - initialize a IOUnitInfo structure
* @c_list: controller list.
* @slot: one-based slot number.
* @value: four-bit value.
*
* Copies the lowest four bits of value in element slot of the array of four
* bit elements called c_list (controller list). The index slot is one-based.
*/
static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
{
u16 id;
u8 tmp;
id = (slot - 1) / 2;
if (slot & 0x1) {
tmp = c_list[id] & 0xf;
c_list[id] = (value << 4) | tmp;
} else {
tmp = c_list[id] & 0xf0;
c_list[id] = (value & 0xf) | tmp;
}
}
/**
* srpt_get_class_port_info - copy ClassPortInfo to a management datagram
* @mad: Datagram that will be sent as response to DM_ATTR_CLASS_PORT_INFO.
*
* See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
* Specification.
*/
static void srpt_get_class_port_info(struct ib_dm_mad *mad)
{
struct ib_class_port_info *cif;
cif = (struct ib_class_port_info *)mad->data;
memset(cif, 0, sizeof(*cif));
cif->base_version = 1;
cif->class_version = 1;
ib_set_cpi_resp_time(cif, 20);
mad->mad_hdr.status = 0;
}
/**
* srpt_get_iou - write IOUnitInfo to a management datagram
* @mad: Datagram that will be sent as response to DM_ATTR_IOU_INFO.
*
* See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
* Specification. See also section B.7, table B.6 in the SRP r16a document.
*/
static void srpt_get_iou(struct ib_dm_mad *mad)
{
struct ib_dm_iou_info *ioui;
u8 slot;
int i;
ioui = (struct ib_dm_iou_info *)mad->data;
ioui->change_id = cpu_to_be16(1);
ioui->max_controllers = 16;
/* set present for slot 1 and empty for the rest */
srpt_set_ioc(ioui->controller_list, 1, 1);
for (i = 1, slot = 2; i < 16; i++, slot++)
srpt_set_ioc(ioui->controller_list, slot, 0);
mad->mad_hdr.status = 0;
}
/**
* srpt_get_ioc - write IOControllerprofile to a management datagram
* @sport: HCA port through which the MAD has been received.
* @slot: Slot number specified in DM_ATTR_IOC_PROFILE query.
* @mad: Datagram that will be sent as response to DM_ATTR_IOC_PROFILE.
*
* See also section 16.3.3.4 IOControllerProfile in the InfiniBand
* Architecture Specification. See also section B.7, table B.7 in the SRP
* r16a document.
*/
static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
struct ib_dm_mad *mad)
{
struct srpt_device *sdev = sport->sdev;
struct ib_dm_ioc_profile *iocp;
int send_queue_depth;
iocp = (struct ib_dm_ioc_profile *)mad->data;
if (!slot || slot > 16) {
mad->mad_hdr.status
= cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
return;
}
if (slot > 2) {
mad->mad_hdr.status
= cpu_to_be16(DM_MAD_STATUS_NO_IOC);
return;
}
if (sdev->use_srq)
send_queue_depth = sdev->srq_size;
else
send_queue_depth = min(MAX_SRPT_RQ_SIZE,
sdev->device->attrs.max_qp_wr);
memset(iocp, 0, sizeof(*iocp));
strcpy(iocp->id_string, SRPT_ID_STRING);
iocp->guid = cpu_to_be64(srpt_service_guid);
iocp->vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
iocp->device_id = cpu_to_be32(sdev->device->attrs.vendor_part_id);
iocp->device_version = cpu_to_be16(sdev->device->attrs.hw_ver);
iocp->subsys_vendor_id = cpu_to_be32(sdev->device->attrs.vendor_id);
iocp->subsys_device_id = 0x0;
iocp->io_class = cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
iocp->io_subclass = cpu_to_be16(SRP_IO_SUBCLASS);
iocp->protocol = cpu_to_be16(SRP_PROTOCOL);
iocp->protocol_version = cpu_to_be16(SRP_PROTOCOL_VERSION);
iocp->send_queue_depth = cpu_to_be16(send_queue_depth);
iocp->rdma_read_depth = 4;
iocp->send_size = cpu_to_be32(srp_max_req_size);
iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
1U << 24));
iocp->num_svc_entries = 1;
iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
mad->mad_hdr.status = 0;
}
/**
* srpt_get_svc_entries - write ServiceEntries to a management datagram
* @ioc_guid: I/O controller GUID to use in reply.
* @slot: I/O controller number.
* @hi: End of the range of service entries to be specified in the reply.
* @lo: Start of the range of service entries to be specified in the reply..
* @mad: Datagram that will be sent as response to DM_ATTR_SVC_ENTRIES.
*
* See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
* Specification. See also section B.7, table B.8 in the SRP r16a document.
*/
static void srpt_get_svc_entries(u64 ioc_guid,
u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
{
struct ib_dm_svc_entries *svc_entries;
WARN_ON(!ioc_guid);
if (!slot || slot > 16) {
mad->mad_hdr.status
= cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
return;
}
if (slot > 2 || lo > hi || hi > 1) {
mad->mad_hdr.status
= cpu_to_be16(DM_MAD_STATUS_NO_IOC);
return;
}
svc_entries = (struct ib_dm_svc_entries *)mad->data;
memset(svc_entries, 0, sizeof(*svc_entries));
svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
snprintf(svc_entries->service_entries[0].name,
sizeof(svc_entries->service_entries[0].name),
"%s%016llx",
SRP_SERVICE_NAME_PREFIX,
ioc_guid);
mad->mad_hdr.status = 0;
}
/**
* srpt_mgmt_method_get - process a received management datagram
* @sp: HCA port through which the MAD has been received.
* @rq_mad: received MAD.
* @rsp_mad: response MAD.
*/
static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
struct ib_dm_mad *rsp_mad)
{
u16 attr_id;
u32 slot;
u8 hi, lo;
attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
switch (attr_id) {
case DM_ATTR_CLASS_PORT_INFO:
srpt_get_class_port_info(rsp_mad);
break;
case DM_ATTR_IOU_INFO:
srpt_get_iou(rsp_mad);
break;
case DM_ATTR_IOC_PROFILE:
slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
srpt_get_ioc(sp, slot, rsp_mad);
break;
case DM_ATTR_SVC_ENTRIES:
slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
hi = (u8) ((slot >> 8) & 0xff);
lo = (u8) (slot & 0xff);
slot = (u16) ((slot >> 16) & 0xffff);
srpt_get_svc_entries(srpt_service_guid,
slot, hi, lo, rsp_mad);
break;
default:
rsp_mad->mad_hdr.status =
cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
break;
}
}
/**
* srpt_mad_send_handler - MAD send completion callback
* @mad_agent: Return value of ib_register_mad_agent().
* @mad_wc: Work completion reporting that the MAD has been sent.
*/
static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_wc)
{
rdma_destroy_ah(mad_wc->send_buf->ah, RDMA_DESTROY_AH_SLEEPABLE);
ib_free_send_mad(mad_wc->send_buf);
}
/**
* srpt_mad_recv_handler - MAD reception callback function
* @mad_agent: Return value of ib_register_mad_agent().
* @send_buf: Not used.
* @mad_wc: Work completion reporting that a MAD has been received.
*/
static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf,
struct ib_mad_recv_wc *mad_wc)
{
struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
struct ib_ah *ah;
struct ib_mad_send_buf *rsp;
struct ib_dm_mad *dm_mad;
if (!mad_wc || !mad_wc->recv_buf.mad)
return;
ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
mad_wc->recv_buf.grh, mad_agent->port_num);
if (IS_ERR(ah))
goto err;
BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
mad_wc->wc->pkey_index, 0,
IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
GFP_KERNEL,
IB_MGMT_BASE_VERSION);
if (IS_ERR(rsp))
goto err_rsp;
rsp->ah = ah;
dm_mad = rsp->mad;
memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof(*dm_mad));
dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
dm_mad->mad_hdr.status = 0;
switch (mad_wc->recv_buf.mad->mad_hdr.method) {
case IB_MGMT_METHOD_GET:
srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
break;
case IB_MGMT_METHOD_SET:
dm_mad->mad_hdr.status =
cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
break;
default:
dm_mad->mad_hdr.status =
cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
break;
}
if (!ib_post_send_mad(rsp, NULL)) {
ib_free_recv_mad(mad_wc);
/* will destroy_ah & free_send_mad in send completion */
return;
}
ib_free_send_mad(rsp);
err_rsp:
rdma_destroy_ah(ah, RDMA_DESTROY_AH_SLEEPABLE);
err:
ib_free_recv_mad(mad_wc);
}
static int srpt_format_guid(char *buf, unsigned int size, const __be64 *guid)
{
const __be16 *g = (const __be16 *)guid;
return snprintf(buf, size, "%04x:%04x:%04x:%04x",
be16_to_cpu(g[0]), be16_to_cpu(g[1]),
be16_to_cpu(g[2]), be16_to_cpu(g[3]));
}
/**
* srpt_refresh_port - configure a HCA port
* @sport: SRPT HCA port.
*
* Enable InfiniBand management datagram processing, update the cached sm_lid,
* lid and gid values, and register a callback function for processing MADs
* on the specified port.
*
* Note: It is safe to call this function more than once for the same port.
*/
static int srpt_refresh_port(struct srpt_port *sport)
{
struct ib_mad_agent *mad_agent;
struct ib_mad_reg_req reg_req;
struct ib_port_modify port_modify;
struct ib_port_attr port_attr;
int ret;
ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
if (ret)
return ret;
sport->sm_lid = port_attr.sm_lid;
sport->lid = port_attr.lid;
ret = rdma_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
if (ret)
return ret;
srpt_format_guid(sport->guid_name, ARRAY_SIZE(sport->guid_name),
&sport->gid.global.interface_id);
snprintf(sport->gid_name, ARRAY_SIZE(sport->gid_name),
"0x%016llx%016llx",
be64_to_cpu(sport->gid.global.subnet_prefix),
be64_to_cpu(sport->gid.global.interface_id));
if (rdma_protocol_iwarp(sport->sdev->device, sport->port))
return 0;
memset(&port_modify, 0, sizeof(port_modify));
port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
port_modify.clr_port_cap_mask = 0;
ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
if (ret) {
pr_warn("%s-%d: enabling device management failed (%d). Note: this is expected if SR-IOV is enabled.\n",
dev_name(&sport->sdev->device->dev), sport->port, ret);
return 0;
}
if (!sport->mad_agent) {
memset(®_req, 0, sizeof(reg_req));
reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
mad_agent = ib_register_mad_agent(sport->sdev->device,
sport->port,
IB_QPT_GSI,
®_req, 0,
srpt_mad_send_handler,
srpt_mad_recv_handler,
sport, 0);
if (IS_ERR(mad_agent)) {
pr_err("%s-%d: MAD agent registration failed (%ld). Note: this is expected if SR-IOV is enabled.\n",
dev_name(&sport->sdev->device->dev), sport->port,
PTR_ERR(mad_agent));
sport->mad_agent = NULL;
memset(&port_modify, 0, sizeof(port_modify));
port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
ib_modify_port(sport->sdev->device, sport->port, 0,
&port_modify);
return 0;
}
sport->mad_agent = mad_agent;
}
return 0;
}
/**
* srpt_unregister_mad_agent - unregister MAD callback functions
* @sdev: SRPT HCA pointer.
* @port_cnt: number of ports with registered MAD
*
* Note: It is safe to call this function more than once for the same device.
*/
static void srpt_unregister_mad_agent(struct srpt_device *sdev, int port_cnt)
{
struct ib_port_modify port_modify = {
.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
};
struct srpt_port *sport;
int i;
for (i = 1; i <= port_cnt; i++) {
sport = &sdev->port[i - 1];
WARN_ON(sport->port != i);
if (sport->mad_agent) {
ib_modify_port(sdev->device, i, 0, &port_modify);
ib_unregister_mad_agent(sport->mad_agent);
sport->mad_agent = NULL;
}
}
}
/**
* srpt_alloc_ioctx - allocate a SRPT I/O context structure
* @sdev: SRPT HCA pointer.
* @ioctx_size: I/O context size.
* @buf_cache: I/O buffer cache.
* @dir: DMA data direction.
*/
static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
int ioctx_size,
struct kmem_cache *buf_cache,
enum dma_data_direction dir)
{
struct srpt_ioctx *ioctx;
ioctx = kzalloc(ioctx_size, GFP_KERNEL);
if (!ioctx)
goto err;
ioctx->buf = kmem_cache_alloc(buf_cache, GFP_KERNEL);
if (!ioctx->buf)
goto err_free_ioctx;
ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf,
kmem_cache_size(buf_cache), dir);
if (ib_dma_mapping_error(sdev->device, ioctx->dma))
goto err_free_buf;
return ioctx;
err_free_buf:
kmem_cache_free(buf_cache, ioctx->buf);
err_free_ioctx:
kfree(ioctx);
err:
return NULL;
}
/**
* srpt_free_ioctx - free a SRPT I/O context structure
* @sdev: SRPT HCA pointer.
* @ioctx: I/O context pointer.
* @buf_cache: I/O buffer cache.
* @dir: DMA data direction.
*/
static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
struct kmem_cache *buf_cache,
enum dma_data_direction dir)
{
if (!ioctx)
return;
ib_dma_unmap_single(sdev->device, ioctx->dma,
kmem_cache_size(buf_cache), dir);
kmem_cache_free(buf_cache, ioctx->buf);
kfree(ioctx);
}
/**
* srpt_alloc_ioctx_ring - allocate a ring of SRPT I/O context structures
* @sdev: Device to allocate the I/O context ring for.
* @ring_size: Number of elements in the I/O context ring.
* @ioctx_size: I/O context size.
* @buf_cache: I/O buffer cache.
* @alignment_offset: Offset in each ring buffer at which the SRP information
* unit starts.
* @dir: DMA data direction.
*/
static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
int ring_size, int ioctx_size,
struct kmem_cache *buf_cache,
int alignment_offset,
enum dma_data_direction dir)
{
struct srpt_ioctx **ring;
int i;
WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx) &&
ioctx_size != sizeof(struct srpt_send_ioctx));
ring = kvmalloc_array(ring_size, sizeof(ring[0]), GFP_KERNEL);
if (!ring)
goto out;
for (i = 0; i < ring_size; ++i) {
ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, buf_cache, dir);
if (!ring[i])
goto err;
ring[i]->index = i;
ring[i]->offset = alignment_offset;
}
goto out;
err:
while (--i >= 0)
srpt_free_ioctx(sdev, ring[i], buf_cache, dir);
kvfree(ring);
ring = NULL;
out:
return ring;
}
/**
* srpt_free_ioctx_ring - free the ring of SRPT I/O context structures
* @ioctx_ring: I/O context ring to be freed.
* @sdev: SRPT HCA pointer.
* @ring_size: Number of ring elements.
* @buf_cache: I/O buffer cache.
* @dir: DMA data direction.
*/
static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
struct srpt_device *sdev, int ring_size,
struct kmem_cache *buf_cache,
enum dma_data_direction dir)
{
int i;
if (!ioctx_ring)
return;
for (i = 0; i < ring_size; ++i)
srpt_free_ioctx(sdev, ioctx_ring[i], buf_cache, dir);
kvfree(ioctx_ring);
}
/**
* srpt_set_cmd_state - set the state of a SCSI command
* @ioctx: Send I/O context.
* @new: New I/O context state.
*
* Does not modify the state of aborted commands. Returns the previous command
* state.
*/
static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
enum srpt_command_state new)
{
enum srpt_command_state previous;
previous = ioctx->state;
if (previous != SRPT_STATE_DONE)
ioctx->state = new;
return previous;
}
/**
* srpt_test_and_set_cmd_state - test and set the state of a command
* @ioctx: Send I/O context.
* @old: Current I/O context state.
* @new: New I/O context state.
*
* Returns true if and only if the previous command state was equal to 'old'.
*/
static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
enum srpt_command_state old,
enum srpt_command_state new)
{
enum srpt_command_state previous;
WARN_ON(!ioctx);
WARN_ON(old == SRPT_STATE_DONE);
WARN_ON(new == SRPT_STATE_NEW);
previous = ioctx->state;
if (previous == old)
ioctx->state = new;
return previous == old;
}
/**
* srpt_post_recv - post an IB receive request
* @sdev: SRPT HCA pointer.
* @ch: SRPT RDMA channel.
* @ioctx: Receive I/O context pointer.
*/
static int srpt_post_recv(struct srpt_device *sdev, struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *ioctx)
{
struct ib_sge list;
struct ib_recv_wr wr;
BUG_ON(!sdev);
list.addr = ioctx->ioctx.dma + ioctx->ioctx.offset;
list.length = srp_max_req_size;
list.lkey = sdev->lkey;
ioctx->ioctx.cqe.done = srpt_recv_done;
wr.wr_cqe = &ioctx->ioctx.cqe;
wr.next = NULL;
wr.sg_list = &list;
wr.num_sge = 1;
if (sdev->use_srq)
return ib_post_srq_recv(sdev->srq, &wr, NULL);
else
return ib_post_recv(ch->qp, &wr, NULL);
}
/**
* srpt_zerolength_write - perform a zero-length RDMA write
* @ch: SRPT RDMA channel.
*
* A quote from the InfiniBand specification: C9-88: For an HCA responder
* using Reliable Connection service, for each zero-length RDMA READ or WRITE
* request, the R_Key shall not be validated, even if the request includes
* Immediate data.
*/
static int srpt_zerolength_write(struct srpt_rdma_ch *ch)
{
struct ib_rdma_wr wr = {
.wr = {
.next = NULL,
{ .wr_cqe = &ch->zw_cqe, },
.opcode = IB_WR_RDMA_WRITE,
.send_flags = IB_SEND_SIGNALED,
}
};
pr_debug("%s-%d: queued zerolength write\n", ch->sess_name,
ch->qp->qp_num);
return ib_post_send(ch->qp, &wr.wr, NULL);
}
static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct srpt_rdma_ch *ch = wc->qp->qp_context;
pr_debug("%s-%d wc->status %d\n", ch->sess_name, ch->qp->qp_num,
wc->status);
if (wc->status == IB_WC_SUCCESS) {
srpt_process_wait_list(ch);
} else {
if (srpt_set_ch_state(ch, CH_DISCONNECTED))
schedule_work(&ch->release_work);
else
pr_debug("%s-%d: already disconnected.\n",
ch->sess_name, ch->qp->qp_num);
}
}
static int srpt_alloc_rw_ctxs(struct srpt_send_ioctx *ioctx,
struct srp_direct_buf *db, int nbufs, struct scatterlist **sg,
unsigned *sg_cnt)
{
enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
struct scatterlist *prev = NULL;
unsigned prev_nents;
int ret, i;
if (nbufs == 1) {
ioctx->rw_ctxs = &ioctx->s_rw_ctx;
} else {
ioctx->rw_ctxs = kmalloc_array(nbufs, sizeof(*ioctx->rw_ctxs),
GFP_KERNEL);
if (!ioctx->rw_ctxs)
return -ENOMEM;
}
for (i = ioctx->n_rw_ctx; i < nbufs; i++, db++) {
struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
u64 remote_addr = be64_to_cpu(db->va);
u32 size = be32_to_cpu(db->len);
u32 rkey = be32_to_cpu(db->key);
ret = target_alloc_sgl(&ctx->sg, &ctx->nents, size, false,
i < nbufs - 1);
if (ret)
goto unwind;
ret = rdma_rw_ctx_init(&ctx->rw, ch->qp, ch->sport->port,
ctx->sg, ctx->nents, 0, remote_addr, rkey, dir);
if (ret < 0) {
target_free_sgl(ctx->sg, ctx->nents);
goto unwind;
}
ioctx->n_rdma += ret;
ioctx->n_rw_ctx++;
if (prev) {
sg_unmark_end(&prev[prev_nents - 1]);
sg_chain(prev, prev_nents + 1, ctx->sg);
} else {
*sg = ctx->sg;
}
prev = ctx->sg;
prev_nents = ctx->nents;
*sg_cnt += ctx->nents;
}
return 0;
unwind:
while (--i >= 0) {
struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
ctx->sg, ctx->nents, dir);
target_free_sgl(ctx->sg, ctx->nents);
}
if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
kfree(ioctx->rw_ctxs);
return ret;
}
static void srpt_free_rw_ctxs(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx)
{
enum dma_data_direction dir = target_reverse_dma_direction(&ioctx->cmd);
int i;
for (i = 0; i < ioctx->n_rw_ctx; i++) {
struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
rdma_rw_ctx_destroy(&ctx->rw, ch->qp, ch->sport->port,
ctx->sg, ctx->nents, dir);
target_free_sgl(ctx->sg, ctx->nents);
}
if (ioctx->rw_ctxs != &ioctx->s_rw_ctx)
kfree(ioctx->rw_ctxs);
}
static inline void *srpt_get_desc_buf(struct srp_cmd *srp_cmd)
{
/*
* The pointer computations below will only be compiled correctly
* if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
* whether srp_cmd::add_data has been declared as a byte pointer.
*/
BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) &&
!__same_type(srp_cmd->add_data[0], (u8)0));
/*
* According to the SRP spec, the lower two bits of the 'ADDITIONAL
* CDB LENGTH' field are reserved and the size in bytes of this field
* is four times the value specified in bits 3..7. Hence the "& ~3".
*/
return srp_cmd->add_data + (srp_cmd->add_cdb_len & ~3);
}
/**
* srpt_get_desc_tbl - parse the data descriptors of a SRP_CMD request
* @recv_ioctx: I/O context associated with the received command @srp_cmd.
* @ioctx: I/O context that will be used for responding to the initiator.
* @srp_cmd: Pointer to the SRP_CMD request data.
* @dir: Pointer to the variable to which the transfer direction will be
* written.
* @sg: [out] scatterlist for the parsed SRP_CMD.
* @sg_cnt: [out] length of @sg.
* @data_len: Pointer to the variable to which the total data length of all
* descriptors in the SRP_CMD request will be written.
* @imm_data_offset: [in] Offset in SRP_CMD requests at which immediate data
* starts.
*
* This function initializes ioctx->nrbuf and ioctx->r_bufs.
*
* Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
* -ENOMEM when memory allocation fails and zero upon success.
*/
static int srpt_get_desc_tbl(struct srpt_recv_ioctx *recv_ioctx,
struct srpt_send_ioctx *ioctx,
struct srp_cmd *srp_cmd, enum dma_data_direction *dir,
struct scatterlist **sg, unsigned int *sg_cnt, u64 *data_len,
u16 imm_data_offset)
{
BUG_ON(!dir);
BUG_ON(!data_len);
/*
* The lower four bits of the buffer format field contain the DATA-IN
* buffer descriptor format, and the highest four bits contain the
* DATA-OUT buffer descriptor format.
*/
if (srp_cmd->buf_fmt & 0xf)
/* DATA-IN: transfer data from target to initiator (read). */
*dir = DMA_FROM_DEVICE;
else if (srp_cmd->buf_fmt >> 4)
/* DATA-OUT: transfer data from initiator to target (write). */
*dir = DMA_TO_DEVICE;
else
*dir = DMA_NONE;
/* initialize data_direction early as srpt_alloc_rw_ctxs needs it */
ioctx->cmd.data_direction = *dir;
if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
struct srp_direct_buf *db = srpt_get_desc_buf(srp_cmd);
*data_len = be32_to_cpu(db->len);
return srpt_alloc_rw_ctxs(ioctx, db, 1, sg, sg_cnt);
} else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
struct srp_indirect_buf *idb = srpt_get_desc_buf(srp_cmd);
int nbufs = be32_to_cpu(idb->table_desc.len) /
sizeof(struct srp_direct_buf);
if (nbufs >
(srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
pr_err("received unsupported SRP_CMD request type (%u out + %u in != %u / %zu)\n",
srp_cmd->data_out_desc_cnt,
srp_cmd->data_in_desc_cnt,
be32_to_cpu(idb->table_desc.len),
sizeof(struct srp_direct_buf));
return -EINVAL;
}
*data_len = be32_to_cpu(idb->len);
return srpt_alloc_rw_ctxs(ioctx, idb->desc_list, nbufs,
sg, sg_cnt);
} else if ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_IMM) {
struct srp_imm_buf *imm_buf = srpt_get_desc_buf(srp_cmd);
void *data = (void *)srp_cmd + imm_data_offset;
uint32_t len = be32_to_cpu(imm_buf->len);
uint32_t req_size = imm_data_offset + len;
if (req_size > srp_max_req_size) {
pr_err("Immediate data (length %d + %d) exceeds request size %d\n",
imm_data_offset, len, srp_max_req_size);
return -EINVAL;
}
if (recv_ioctx->byte_len < req_size) {
pr_err("Received too few data - %d < %d\n",
recv_ioctx->byte_len, req_size);
return -EIO;
}
/*
* The immediate data buffer descriptor must occur before the
* immediate data itself.
*/
if ((void *)(imm_buf + 1) > (void *)data) {
pr_err("Received invalid write request\n");
return -EINVAL;
}
*data_len = len;
ioctx->recv_ioctx = recv_ioctx;
if ((uintptr_t)data & 511) {
pr_warn_once("Internal error - the receive buffers are not aligned properly.\n");
return -EINVAL;
}
sg_init_one(&ioctx->imm_sg, data, len);
*sg = &ioctx->imm_sg;
*sg_cnt = 1;
return 0;
} else {
*data_len = 0;
return 0;
}
}
/**
* srpt_init_ch_qp - initialize queue pair attributes
* @ch: SRPT RDMA channel.
* @qp: Queue pair pointer.
*
* Initialized the attributes of queue pair 'qp' by allowing local write,
* remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
*/
static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
{
struct ib_qp_attr *attr;
int ret;
WARN_ON_ONCE(ch->using_rdma_cm);
attr = kzalloc(sizeof(*attr), GFP_KERNEL);
if (!attr)
return -ENOMEM;
attr->qp_state = IB_QPS_INIT;
attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
attr->port_num = ch->sport->port;
ret = ib_find_cached_pkey(ch->sport->sdev->device, ch->sport->port,
ch->pkey, &attr->pkey_index);
if (ret < 0)
pr_err("Translating pkey %#x failed (%d) - using index 0\n",
ch->pkey, ret);
ret = ib_modify_qp(qp, attr,
IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
IB_QP_PKEY_INDEX);
kfree(attr);
return ret;
}
/**
* srpt_ch_qp_rtr - change the state of a channel to 'ready to receive' (RTR)
* @ch: channel of the queue pair.
* @qp: queue pair to change the state of.
*
* Returns zero upon success and a negative value upon failure.
*
* Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
* If this structure ever becomes larger, it might be necessary to allocate
* it dynamically instead of on the stack.
*/
static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
{
struct ib_qp_attr qp_attr;
int attr_mask;
int ret;
WARN_ON_ONCE(ch->using_rdma_cm);
qp_attr.qp_state = IB_QPS_RTR;
ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
if (ret)
goto out;
qp_attr.max_dest_rd_atomic = 4;
ret = ib_modify_qp(qp, &qp_attr, attr_mask);
out:
return ret;
}
/**
* srpt_ch_qp_rts - change the state of a channel to 'ready to send' (RTS)
* @ch: channel of the queue pair.
* @qp: queue pair to change the state of.
*
* Returns zero upon success and a negative value upon failure.
*
* Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
* If this structure ever becomes larger, it might be necessary to allocate
* it dynamically instead of on the stack.
*/
static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
{
struct ib_qp_attr qp_attr;
int attr_mask;
int ret;
qp_attr.qp_state = IB_QPS_RTS;
ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask);
if (ret)
goto out;
qp_attr.max_rd_atomic = 4;
ret = ib_modify_qp(qp, &qp_attr, attr_mask);
out:
return ret;
}
/**
* srpt_ch_qp_err - set the channel queue pair state to 'error'
* @ch: SRPT RDMA channel.
*/
static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
{
struct ib_qp_attr qp_attr;
qp_attr.qp_state = IB_QPS_ERR;
return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
}
/**
* srpt_get_send_ioctx - obtain an I/O context for sending to the initiator
* @ch: SRPT RDMA channel.
*/
static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
{
struct srpt_send_ioctx *ioctx;
int tag, cpu;
BUG_ON(!ch);
tag = sbitmap_queue_get(&ch->sess->sess_tag_pool, &cpu);
if (tag < 0)
return NULL;
ioctx = ch->ioctx_ring[tag];
BUG_ON(ioctx->ch != ch);
ioctx->state = SRPT_STATE_NEW;
WARN_ON_ONCE(ioctx->recv_ioctx);
ioctx->n_rdma = 0;
ioctx->n_rw_ctx = 0;
ioctx->queue_status_only = false;
/*
* transport_init_se_cmd() does not initialize all fields, so do it
* here.
*/
memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
ioctx->cmd.map_tag = tag;
ioctx->cmd.map_cpu = cpu;
return ioctx;
}
/**
* srpt_abort_cmd - abort a SCSI command
* @ioctx: I/O context associated with the SCSI command.
*/
static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
{
enum srpt_command_state state;
BUG_ON(!ioctx);
/*
* If the command is in a state where the target core is waiting for
* the ib_srpt driver, change the state to the next state.
*/
state = ioctx->state;
switch (state) {
case SRPT_STATE_NEED_DATA:
ioctx->state = SRPT_STATE_DATA_IN;
break;
case SRPT_STATE_CMD_RSP_SENT:
case SRPT_STATE_MGMT_RSP_SENT:
ioctx->state = SRPT_STATE_DONE;
break;
default:
WARN_ONCE(true, "%s: unexpected I/O context state %d\n",
__func__, state);
break;
}
pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state,
ioctx->state, ioctx->cmd.tag);
switch (state) {
case SRPT_STATE_NEW:
case SRPT_STATE_DATA_IN:
case SRPT_STATE_MGMT:
case SRPT_STATE_DONE:
/*
* Do nothing - defer abort processing until
* srpt_queue_response() is invoked.
*/
break;
case SRPT_STATE_NEED_DATA:
pr_debug("tag %#llx: RDMA read error\n", ioctx->cmd.tag);
transport_generic_request_failure(&ioctx->cmd,
TCM_CHECK_CONDITION_ABORT_CMD);
break;
case SRPT_STATE_CMD_RSP_SENT:
/*
* SRP_RSP sending failed or the SRP_RSP send completion has
* not been received in time.
*/
transport_generic_free_cmd(&ioctx->cmd, 0);
break;
case SRPT_STATE_MGMT_RSP_SENT:
transport_generic_free_cmd(&ioctx->cmd, 0);
break;
default:
WARN(1, "Unexpected command state (%d)", state);
break;
}
return state;
}
/**
* srpt_rdma_read_done - RDMA read completion callback
* @cq: Completion queue.
* @wc: Work completion.
*
* XXX: what is now target_execute_cmd used to be asynchronous, and unmapping
* the data that has been transferred via IB RDMA had to be postponed until the
* check_stop_free() callback. None of this is necessary anymore and needs to
* be cleaned up.
*/
static void srpt_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct srpt_rdma_ch *ch = wc->qp->qp_context;
struct srpt_send_ioctx *ioctx =
container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe);
WARN_ON(ioctx->n_rdma <= 0);
atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
ioctx->n_rdma = 0;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
pr_info("RDMA_READ for ioctx 0x%p failed with status %d\n",
ioctx, wc->status);
srpt_abort_cmd(ioctx);
return;
}
if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
SRPT_STATE_DATA_IN))
target_execute_cmd(&ioctx->cmd);
else
pr_err("%s[%d]: wrong state = %d\n", __func__,
__LINE__, ioctx->state);
}
/**
* srpt_build_cmd_rsp - build a SRP_RSP response
* @ch: RDMA channel through which the request has been received.
* @ioctx: I/O context associated with the SRP_CMD request. The response will
* be built in the buffer ioctx->buf points at and hence this function will
* overwrite the request data.
* @tag: tag of the request for which this response is being generated.
* @status: value for the STATUS field of the SRP_RSP information unit.
*
* Returns the size in bytes of the SRP_RSP response.
*
* An SRP_RSP response contains a SCSI status or service response. See also
* section 6.9 in the SRP r16a document for the format of an SRP_RSP
* response. See also SPC-2 for more information about sense data.
*/
static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx, u64 tag,
int status)
{
struct se_cmd *cmd = &ioctx->cmd;
struct srp_rsp *srp_rsp;
const u8 *sense_data;
int sense_data_len, max_sense_len;
u32 resid = cmd->residual_count;
/*
* The lowest bit of all SAM-3 status codes is zero (see also
* paragraph 5.3 in SAM-3).
*/
WARN_ON(status & 1);
srp_rsp = ioctx->ioctx.buf;
BUG_ON(!srp_rsp);
sense_data = ioctx->sense_data;
sense_data_len = ioctx->cmd.scsi_sense_length;
WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
memset(srp_rsp, 0, sizeof(*srp_rsp));
srp_rsp->opcode = SRP_RSP;
srp_rsp->req_lim_delta =
cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
srp_rsp->tag = tag;
srp_rsp->status = status;
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
if (cmd->data_direction == DMA_TO_DEVICE) {
/* residual data from an underflow write */
srp_rsp->flags = SRP_RSP_FLAG_DOUNDER;
srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
} else if (cmd->data_direction == DMA_FROM_DEVICE) {
/* residual data from an underflow read */
srp_rsp->flags = SRP_RSP_FLAG_DIUNDER;
srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
}
} else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
if (cmd->data_direction == DMA_TO_DEVICE) {
/* residual data from an overflow write */
srp_rsp->flags = SRP_RSP_FLAG_DOOVER;
srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
} else if (cmd->data_direction == DMA_FROM_DEVICE) {
/* residual data from an overflow read */
srp_rsp->flags = SRP_RSP_FLAG_DIOVER;
srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
}
}
if (sense_data_len) {
BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
if (sense_data_len > max_sense_len) {
pr_warn("truncated sense data from %d to %d bytes\n",
sense_data_len, max_sense_len);
sense_data_len = max_sense_len;
}
srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
memcpy(srp_rsp->data, sense_data, sense_data_len);
}
return sizeof(*srp_rsp) + sense_data_len;
}
/**
* srpt_build_tskmgmt_rsp - build a task management response
* @ch: RDMA channel through which the request has been received.
* @ioctx: I/O context in which the SRP_RSP response will be built.
* @rsp_code: RSP_CODE that will be stored in the response.
* @tag: Tag of the request for which this response is being generated.
*
* Returns the size in bytes of the SRP_RSP response.
*
* An SRP_RSP response contains a SCSI status or service response. See also
* section 6.9 in the SRP r16a document for the format of an SRP_RSP
* response.
*/
static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
struct srpt_send_ioctx *ioctx,
u8 rsp_code, u64 tag)
{
struct srp_rsp *srp_rsp;
int resp_data_len;
int resp_len;
resp_data_len = 4;
resp_len = sizeof(*srp_rsp) + resp_data_len;
srp_rsp = ioctx->ioctx.buf;
BUG_ON(!srp_rsp);
memset(srp_rsp, 0, sizeof(*srp_rsp));
srp_rsp->opcode = SRP_RSP;
srp_rsp->req_lim_delta =
cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
srp_rsp->tag = tag;
srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
srp_rsp->data[3] = rsp_code;
return resp_len;
}
static int srpt_check_stop_free(struct se_cmd *cmd)
{
struct srpt_send_ioctx *ioctx = container_of(cmd,
struct srpt_send_ioctx, cmd);
return target_put_sess_cmd(&ioctx->cmd);
}
/**
* srpt_handle_cmd - process a SRP_CMD information unit
* @ch: SRPT RDMA channel.
* @recv_ioctx: Receive I/O context.
* @send_ioctx: Send I/O context.
*/
static void srpt_handle_cmd(struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *recv_ioctx,
struct srpt_send_ioctx *send_ioctx)
{
struct se_cmd *cmd;
struct srp_cmd *srp_cmd;
struct scatterlist *sg = NULL;
unsigned sg_cnt = 0;
u64 data_len;
enum dma_data_direction dir;
int rc;
BUG_ON(!send_ioctx);
srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
cmd = &send_ioctx->cmd;
cmd->tag = srp_cmd->tag;
switch (srp_cmd->task_attr) {
case SRP_CMD_SIMPLE_Q:
cmd->sam_task_attr = TCM_SIMPLE_TAG;
break;
case SRP_CMD_ORDERED_Q:
default:
cmd->sam_task_attr = TCM_ORDERED_TAG;
break;
case SRP_CMD_HEAD_OF_Q:
cmd->sam_task_attr = TCM_HEAD_TAG;
break;
case SRP_CMD_ACA:
cmd->sam_task_attr = TCM_ACA_TAG;
break;
}
rc = srpt_get_desc_tbl(recv_ioctx, send_ioctx, srp_cmd, &dir,
&sg, &sg_cnt, &data_len, ch->imm_data_offset);
if (rc) {
if (rc != -EAGAIN) {
pr_err("0x%llx: parsing SRP descriptor table failed.\n",
srp_cmd->tag);
}
goto busy;
}
rc = target_init_cmd(cmd, ch->sess, &send_ioctx->sense_data[0],
scsilun_to_int(&srp_cmd->lun), data_len,
TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
if (rc != 0) {
pr_debug("target_submit_cmd() returned %d for tag %#llx\n", rc,
srp_cmd->tag);
goto busy;
}
if (target_submit_prep(cmd, srp_cmd->cdb, sg, sg_cnt, NULL, 0, NULL, 0,
GFP_KERNEL))
return;
target_submit(cmd);
return;
busy:
target_send_busy(cmd);
}
static int srp_tmr_to_tcm(int fn)
{
switch (fn) {
case SRP_TSK_ABORT_TASK:
return TMR_ABORT_TASK;
case SRP_TSK_ABORT_TASK_SET:
return TMR_ABORT_TASK_SET;
case SRP_TSK_CLEAR_TASK_SET:
return TMR_CLEAR_TASK_SET;
case SRP_TSK_LUN_RESET:
return TMR_LUN_RESET;
case SRP_TSK_CLEAR_ACA:
return TMR_CLEAR_ACA;
default:
return -1;
}
}
/**
* srpt_handle_tsk_mgmt - process a SRP_TSK_MGMT information unit
* @ch: SRPT RDMA channel.
* @recv_ioctx: Receive I/O context.
* @send_ioctx: Send I/O context.
*
* Returns 0 if and only if the request will be processed by the target core.
*
* For more information about SRP_TSK_MGMT information units, see also section
* 6.7 in the SRP r16a document.
*/
static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
struct srpt_recv_ioctx *recv_ioctx,
struct srpt_send_ioctx *send_ioctx)
{
struct srp_tsk_mgmt *srp_tsk;
struct se_cmd *cmd;
struct se_session *sess = ch->sess;
int tcm_tmr;
int rc;
BUG_ON(!send_ioctx);
srp_tsk = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
cmd = &send_ioctx->cmd;
pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld ch %p sess %p\n",
srp_tsk->tsk_mgmt_func, srp_tsk->task_tag, srp_tsk->tag, ch,
ch->sess);
srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
send_ioctx->cmd.tag = srp_tsk->tag;
tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
rc = target_submit_tmr(&send_ioctx->cmd, sess, NULL,
scsilun_to_int(&srp_tsk->lun), srp_tsk, tcm_tmr,
GFP_KERNEL, srp_tsk->task_tag,
TARGET_SCF_ACK_KREF);
if (rc != 0) {
send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
cmd->se_tfo->queue_tm_rsp(cmd);
}
return;
}
/**
* srpt_handle_new_iu - process a newly received information unit
* @ch: RDMA channel through which the information unit has been received.
* @recv_ioctx: Receive I/O context associated with the information unit.
*/
static bool
srpt_handle_new_iu(struct srpt_rdma_ch *ch, struct srpt_recv_ioctx *recv_ioctx)
{
struct srpt_send_ioctx *send_ioctx = NULL;
struct srp_cmd *srp_cmd;
bool res = false;
u8 opcode;
BUG_ON(!ch);
BUG_ON(!recv_ioctx);
if (unlikely(ch->state == CH_CONNECTING))
goto push;
ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
recv_ioctx->ioctx.dma,
recv_ioctx->ioctx.offset + srp_max_req_size,
DMA_FROM_DEVICE);
srp_cmd = recv_ioctx->ioctx.buf + recv_ioctx->ioctx.offset;
opcode = srp_cmd->opcode;
if (opcode == SRP_CMD || opcode == SRP_TSK_MGMT) {
send_ioctx = srpt_get_send_ioctx(ch);
if (unlikely(!send_ioctx))
goto push;
}
if (!list_empty(&recv_ioctx->wait_list)) {
WARN_ON_ONCE(!ch->processing_wait_list);
list_del_init(&recv_ioctx->wait_list);
}
switch (opcode) {
case SRP_CMD:
srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
break;
case SRP_TSK_MGMT:
srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
break;
case SRP_I_LOGOUT:
pr_err("Not yet implemented: SRP_I_LOGOUT\n");
break;
case SRP_CRED_RSP:
pr_debug("received SRP_CRED_RSP\n");
break;
case SRP_AER_RSP:
pr_debug("received SRP_AER_RSP\n");
break;
case SRP_RSP:
pr_err("Received SRP_RSP\n");
break;
default:
pr_err("received IU with unknown opcode 0x%x\n", opcode);
break;
}
if (!send_ioctx || !send_ioctx->recv_ioctx)
srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
res = true;
out:
return res;
push:
if (list_empty(&recv_ioctx->wait_list)) {
WARN_ON_ONCE(ch->processing_wait_list);
list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
}
goto out;
}
static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct srpt_rdma_ch *ch = wc->qp->qp_context;
struct srpt_recv_ioctx *ioctx =
container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe);
if (wc->status == IB_WC_SUCCESS) {
int req_lim;
req_lim = atomic_dec_return(&ch->req_lim);
if (unlikely(req_lim < 0))
pr_err("req_lim = %d < 0\n", req_lim);
ioctx->byte_len = wc->byte_len;
srpt_handle_new_iu(ch, ioctx);
} else {
pr_info_ratelimited("receiving failed for ioctx %p with status %d\n",
ioctx, wc->status);
}
}
/*
* This function must be called from the context in which RDMA completions are
* processed because it accesses the wait list without protection against
* access from other threads.
*/
static void srpt_process_wait_list(struct srpt_rdma_ch *ch)
{
struct srpt_recv_ioctx *recv_ioctx, *tmp;
WARN_ON_ONCE(ch->state == CH_CONNECTING);
if (list_empty(&ch->cmd_wait_list))
return;
WARN_ON_ONCE(ch->processing_wait_list);
ch->processing_wait_list = true;
list_for_each_entry_safe(recv_ioctx, tmp, &ch->cmd_wait_list,
wait_list) {
if (!srpt_handle_new_iu(ch, recv_ioctx))
break;
}
ch->processing_wait_list = false;
}
/**
* srpt_send_done - send completion callback
* @cq: Completion queue.
* @wc: Work completion.
*
* Note: Although this has not yet been observed during tests, at least in
* theory it is possible that the srpt_get_send_ioctx() call invoked by
* srpt_handle_new_iu() fails. This is possible because the req_lim_delta
* value in each response is set to one, and it is possible that this response
* makes the initiator send a new request before the send completion for that
* response has been processed. This could e.g. happen if the call to
* srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
* if IB retransmission causes generation of the send completion to be
* delayed. Incoming information units for which srpt_get_send_ioctx() fails
* are queued on cmd_wait_list. The code below processes these delayed
* requests one at a time.
*/
static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct srpt_rdma_ch *ch = wc->qp->qp_context;
struct srpt_send_ioctx *ioctx =
container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe);
enum srpt_command_state state;
state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
WARN_ON(state != SRPT_STATE_CMD_RSP_SENT &&
state != SRPT_STATE_MGMT_RSP_SENT);
atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
if (wc->status != IB_WC_SUCCESS)
pr_info("sending response for ioctx 0x%p failed with status %d\n",
ioctx, wc->status);
if (state != SRPT_STATE_DONE) {
transport_generic_free_cmd(&ioctx->cmd, 0);
} else {
pr_err("IB completion has been received too late for wr_id = %u.\n",
ioctx->ioctx.index);
}
srpt_process_wait_list(ch);
}
/**
* srpt_create_ch_ib - create receive and send completion queues
* @ch: SRPT RDMA channel.
*/
static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
{
struct ib_qp_init_attr *qp_init;
struct srpt_port *sport = ch->sport;
struct srpt_device *sdev = sport->sdev;
const struct ib_device_attr *attrs = &sdev->device->attrs;
int sq_size = sport->port_attrib.srp_sq_size;
int i, ret;
WARN_ON(ch->rq_size < 1);
ret = -ENOMEM;
qp_init = kzalloc(sizeof(*qp_init), GFP_KERNEL);
if (!qp_init)
goto out;
retry:
ch->cq = ib_cq_pool_get(sdev->device, ch->rq_size + sq_size, -1,
IB_POLL_WORKQUEUE);
if (IS_ERR(ch->cq)) {
ret = PTR_ERR(ch->cq);
pr_err("failed to create CQ cqe= %d ret= %d\n",
ch->rq_size + sq_size, ret);
goto out;
}
ch->cq_size = ch->rq_size + sq_size;
qp_init->qp_context = (void *)ch;
qp_init->event_handler
= (void(*)(struct ib_event *, void*))srpt_qp_event;
qp_init->send_cq = ch->cq;
qp_init->recv_cq = ch->cq;
qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
qp_init->qp_type = IB_QPT_RC;
/*
* We divide up our send queue size into half SEND WRs to send the
* completions, and half R/W contexts to actually do the RDMA
* READ/WRITE transfers. Note that we need to allocate CQ slots for
* both both, as RDMA contexts will also post completions for the
* RDMA READ case.
*/
qp_init->cap.max_send_wr = min(sq_size / 2, attrs->max_qp_wr);
qp_init->cap.max_rdma_ctxs = sq_size / 2;
qp_init->cap.max_send_sge = attrs->max_send_sge;
qp_init->cap.max_recv_sge = 1;
qp_init->port_num = ch->sport->port;
if (sdev->use_srq)
qp_init->srq = sdev->srq;
else
qp_init->cap.max_recv_wr = ch->rq_size;
if (ch->using_rdma_cm) {
ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init);
ch->qp = ch->rdma_cm.cm_id->qp;
} else {
ch->qp = ib_create_qp(sdev->pd, qp_init);
if (!IS_ERR(ch->qp)) {
ret = srpt_init_ch_qp(ch, ch->qp);
if (ret)
ib_destroy_qp(ch->qp);
} else {
ret = PTR_ERR(ch->qp);
}
}
if (ret) {
bool retry = sq_size > MIN_SRPT_SQ_SIZE;
if (retry) {
pr_debug("failed to create queue pair with sq_size = %d (%d) - retrying\n",
sq_size, ret);
ib_cq_pool_put(ch->cq, ch->cq_size);
sq_size = max(sq_size / 2, MIN_SRPT_SQ_SIZE);
goto retry;
} else {
pr_err("failed to create queue pair with sq_size = %d (%d)\n",
sq_size, ret);
goto err_destroy_cq;
}
}
atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d ch= %p\n",
__func__, ch->cq->cqe, qp_init->cap.max_send_sge,
qp_init->cap.max_send_wr, ch);
if (!sdev->use_srq)
for (i = 0; i < ch->rq_size; i++)
srpt_post_recv(sdev, ch, ch->ioctx_recv_ring[i]);
out:
kfree(qp_init);
return ret;
err_destroy_cq:
ch->qp = NULL;
ib_cq_pool_put(ch->cq, ch->cq_size);
goto out;
}
static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
{
ib_destroy_qp(ch->qp);
ib_cq_pool_put(ch->cq, ch->cq_size);
}
/**
* srpt_close_ch - close a RDMA channel
* @ch: SRPT RDMA channel.
*
* Make sure all resources associated with the channel will be deallocated at
* an appropriate time.
*
* Returns true if and only if the channel state has been modified into
* CH_DRAINING.
*/
static bool srpt_close_ch(struct srpt_rdma_ch *ch)
{
int ret;
if (!srpt_set_ch_state(ch, CH_DRAINING)) {
pr_debug("%s: already closed\n", ch->sess_name);
return false;
}
kref_get(&ch->kref);
ret = srpt_ch_qp_err(ch);
if (ret < 0)
pr_err("%s-%d: changing queue pair into error state failed: %d\n",
ch->sess_name, ch->qp->qp_num, ret);
ret = srpt_zerolength_write(ch);
if (ret < 0) {
pr_err("%s-%d: queuing zero-length write failed: %d\n",
ch->sess_name, ch->qp->qp_num, ret);
if (srpt_set_ch_state(ch, CH_DISCONNECTED))
schedule_work(&ch->release_work);
else
WARN_ON_ONCE(true);
}
kref_put(&ch->kref, srpt_free_ch);
return true;
}
/*
* Change the channel state into CH_DISCONNECTING. If a channel has not yet
* reached the connected state, close it. If a channel is in the connected
* state, send a DREQ. If a DREQ has been received, send a DREP. Note: it is
* the responsibility of the caller to ensure that this function is not
* invoked concurrently with the code that accepts a connection. This means
* that this function must either be invoked from inside a CM callback
* function or that it must be invoked with the srpt_port.mutex held.
*/
static int srpt_disconnect_ch(struct srpt_rdma_ch *ch)
{
int ret;
if (!srpt_set_ch_state(ch, CH_DISCONNECTING))
return -ENOTCONN;
if (ch->using_rdma_cm) {
ret = rdma_disconnect(ch->rdma_cm.cm_id);
} else {
ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0);
if (ret < 0)
ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0);
}
if (ret < 0 && srpt_close_ch(ch))
ret = 0;
return ret;
}
/* Send DREQ and wait for DREP. */
static void srpt_disconnect_ch_sync(struct srpt_rdma_ch *ch)
{
DECLARE_COMPLETION_ONSTACK(closed);
struct srpt_port *sport = ch->sport;
pr_debug("ch %s-%d state %d\n", ch->sess_name, ch->qp->qp_num,
ch->state);
ch->closed = &closed;
mutex_lock(&sport->mutex);
srpt_disconnect_ch(ch);
mutex_unlock(&sport->mutex);
while (wait_for_completion_timeout(&closed, 5 * HZ) == 0)
pr_info("%s(%s-%d state %d): still waiting ...\n", __func__,
ch->sess_name, ch->qp->qp_num, ch->state);
}
static void __srpt_close_all_ch(struct srpt_port *sport)
{
struct srpt_nexus *nexus;
struct srpt_rdma_ch *ch;
lockdep_assert_held(&sport->mutex);
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
if (srpt_disconnect_ch(ch) >= 0)
pr_info("Closing channel %s-%d because target %s_%d has been disabled\n",
ch->sess_name, ch->qp->qp_num,
dev_name(&sport->sdev->device->dev),
sport->port);
srpt_close_ch(ch);
}
}
}
/*
* Look up (i_port_id, t_port_id) in sport->nexus_list. Create an entry if
* it does not yet exist.
*/
static struct srpt_nexus *srpt_get_nexus(struct srpt_port *sport,
const u8 i_port_id[16],
const u8 t_port_id[16])
{
struct srpt_nexus *nexus = NULL, *tmp_nexus = NULL, *n;
for (;;) {
mutex_lock(&sport->mutex);
list_for_each_entry(n, &sport->nexus_list, entry) {
if (memcmp(n->i_port_id, i_port_id, 16) == 0 &&
memcmp(n->t_port_id, t_port_id, 16) == 0) {
nexus = n;
break;
}
}
if (!nexus && tmp_nexus) {
list_add_tail_rcu(&tmp_nexus->entry,
&sport->nexus_list);
swap(nexus, tmp_nexus);
}
mutex_unlock(&sport->mutex);
if (nexus)
break;
tmp_nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
if (!tmp_nexus) {
nexus = ERR_PTR(-ENOMEM);
break;
}
INIT_LIST_HEAD(&tmp_nexus->ch_list);
memcpy(tmp_nexus->i_port_id, i_port_id, 16);
memcpy(tmp_nexus->t_port_id, t_port_id, 16);
}
kfree(tmp_nexus);
return nexus;
}
static void srpt_set_enabled(struct srpt_port *sport, bool enabled)
__must_hold(&sport->mutex)
{
lockdep_assert_held(&sport->mutex);
if (sport->enabled == enabled)
return;
sport->enabled = enabled;
if (!enabled)
__srpt_close_all_ch(sport);
}
static void srpt_drop_sport_ref(struct srpt_port *sport)
{
if (atomic_dec_return(&sport->refcount) == 0 && sport->freed_channels)
complete(sport->freed_channels);
}
static void srpt_free_ch(struct kref *kref)
{
struct srpt_rdma_ch *ch = container_of(kref, struct srpt_rdma_ch, kref);
srpt_drop_sport_ref(ch->sport);
kfree_rcu(ch, rcu);
}
/*
* Shut down the SCSI target session, tell the connection manager to
* disconnect the associated RDMA channel, transition the QP to the error
* state and remove the channel from the channel list. This function is
* typically called from inside srpt_zerolength_write_done(). Concurrent
* srpt_zerolength_write() calls from inside srpt_close_ch() are possible
* as long as the channel is on sport->nexus_list.
*/
static void srpt_release_channel_work(struct work_struct *w)
{
struct srpt_rdma_ch *ch;
struct srpt_device *sdev;
struct srpt_port *sport;
struct se_session *se_sess;
ch = container_of(w, struct srpt_rdma_ch, release_work);
pr_debug("%s-%d\n", ch->sess_name, ch->qp->qp_num);
sdev = ch->sport->sdev;
BUG_ON(!sdev);
se_sess = ch->sess;
BUG_ON(!se_sess);
target_stop_session(se_sess);
target_wait_for_sess_cmds(se_sess);
target_remove_session(se_sess);
ch->sess = NULL;
if (ch->using_rdma_cm)
rdma_destroy_id(ch->rdma_cm.cm_id);
else
ib_destroy_cm_id(ch->ib_cm.cm_id);
sport = ch->sport;
mutex_lock(&sport->mutex);
list_del_rcu(&ch->list);
mutex_unlock(&sport->mutex);
if (ch->closed)
complete(ch->closed);
srpt_destroy_ch_ib(ch);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
ch->rsp_buf_cache, DMA_TO_DEVICE);
kmem_cache_destroy(ch->rsp_buf_cache);
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
sdev, ch->rq_size,
ch->req_buf_cache, DMA_FROM_DEVICE);
kmem_cache_destroy(ch->req_buf_cache);
kref_put(&ch->kref, srpt_free_ch);
}
/**
* srpt_cm_req_recv - process the event IB_CM_REQ_RECEIVED
* @sdev: HCA through which the login request was received.
* @ib_cm_id: IB/CM connection identifier in case of IB/CM.
* @rdma_cm_id: RDMA/CM connection identifier in case of RDMA/CM.
* @port_num: Port through which the REQ message was received.
* @pkey: P_Key of the incoming connection.
* @req: SRP login request.
* @src_addr: GID (IB/CM) or IP address (RDMA/CM) of the port that submitted
* the login request.
*
* Ownership of the cm_id is transferred to the target session if this
* function returns zero. Otherwise the caller remains the owner of cm_id.
*/
static int srpt_cm_req_recv(struct srpt_device *const sdev,
struct ib_cm_id *ib_cm_id,
struct rdma_cm_id *rdma_cm_id,
u8 port_num, __be16 pkey,
const struct srp_login_req *req,
const char *src_addr)
{
struct srpt_port *sport = &sdev->port[port_num - 1];
struct srpt_nexus *nexus;
struct srp_login_rsp *rsp = NULL;
struct srp_login_rej *rej = NULL;
union {
struct rdma_conn_param rdma_cm;
struct ib_cm_rep_param ib_cm;
} *rep_param = NULL;
struct srpt_rdma_ch *ch = NULL;
char i_port_id[36];
u32 it_iu_len;
int i, tag_num, tag_size, ret;
struct srpt_tpg *stpg;
WARN_ON_ONCE(irqs_disabled());
it_iu_len = be32_to_cpu(req->req_it_iu_len);
pr_info("Received SRP_LOGIN_REQ with i_port_id %pI6, t_port_id %pI6 and it_iu_len %d on port %d (guid=%pI6); pkey %#04x\n",
req->initiator_port_id, req->target_port_id, it_iu_len,
port_num, &sport->gid, be16_to_cpu(pkey));
nexus = srpt_get_nexus(sport, req->initiator_port_id,
req->target_port_id);
if (IS_ERR(nexus)) {
ret = PTR_ERR(nexus);
goto out;
}
ret = -ENOMEM;
rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
rej = kzalloc(sizeof(*rej), GFP_KERNEL);
rep_param = kzalloc(sizeof(*rep_param), GFP_KERNEL);
if (!rsp || !rej || !rep_param)
goto out;
ret = -EINVAL;
if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
rej->reason = cpu_to_be32(
SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
pr_err("rejected SRP_LOGIN_REQ because its length (%d bytes) is out of range (%d .. %d)\n",
it_iu_len, 64, srp_max_req_size);
goto reject;
}
if (!sport->enabled) {
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_info("rejected SRP_LOGIN_REQ because target port %s_%d has not yet been enabled\n",
dev_name(&sport->sdev->device->dev), port_num);
goto reject;
}
if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
|| *(__be64 *)(req->target_port_id + 8) !=
cpu_to_be64(srpt_service_guid)) {
rej->reason = cpu_to_be32(
SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
pr_err("rejected SRP_LOGIN_REQ because it has an invalid target port identifier.\n");
goto reject;
}
ret = -ENOMEM;
ch = kzalloc(sizeof(*ch), GFP_KERNEL);
if (!ch) {
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because out of memory.\n");
goto reject;
}
kref_init(&ch->kref);
ch->pkey = be16_to_cpu(pkey);
ch->nexus = nexus;
ch->zw_cqe.done = srpt_zerolength_write_done;
INIT_WORK(&ch->release_work, srpt_release_channel_work);
ch->sport = sport;
if (rdma_cm_id) {
ch->using_rdma_cm = true;
ch->rdma_cm.cm_id = rdma_cm_id;
rdma_cm_id->context = ch;
} else {
ch->ib_cm.cm_id = ib_cm_id;
ib_cm_id->context = ch;
}
/*
* ch->rq_size should be at least as large as the initiator queue
* depth to avoid that the initiator driver has to report QUEUE_FULL
* to the SCSI mid-layer.
*/
ch->rq_size = min(MAX_SRPT_RQ_SIZE, sdev->device->attrs.max_qp_wr);
spin_lock_init(&ch->spinlock);
ch->state = CH_CONNECTING;
INIT_LIST_HEAD(&ch->cmd_wait_list);
ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
512, 0, NULL);
if (!ch->rsp_buf_cache)
goto free_ch;
ch->ioctx_ring = (struct srpt_send_ioctx **)
srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
sizeof(*ch->ioctx_ring[0]),
ch->rsp_buf_cache, 0, DMA_TO_DEVICE);
if (!ch->ioctx_ring) {
pr_err("rejected SRP_LOGIN_REQ because creating a new QP SQ ring failed.\n");
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
goto free_rsp_cache;
}
for (i = 0; i < ch->rq_size; i++)
ch->ioctx_ring[i]->ch = ch;
if (!sdev->use_srq) {
u16 imm_data_offset = req->req_flags & SRP_IMMED_REQUESTED ?
be16_to_cpu(req->imm_data_offset) : 0;
u16 alignment_offset;
u32 req_sz;
if (req->req_flags & SRP_IMMED_REQUESTED)
pr_debug("imm_data_offset = %d\n",
be16_to_cpu(req->imm_data_offset));
if (imm_data_offset >= sizeof(struct srp_cmd)) {
ch->imm_data_offset = imm_data_offset;
rsp->rsp_flags |= SRP_LOGIN_RSP_IMMED_SUPP;
} else {
ch->imm_data_offset = 0;
}
alignment_offset = round_up(imm_data_offset, 512) -
imm_data_offset;
req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
512, 0, NULL);
if (!ch->req_buf_cache)
goto free_rsp_ring;
ch->ioctx_recv_ring = (struct srpt_recv_ioctx **)
srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
sizeof(*ch->ioctx_recv_ring[0]),
ch->req_buf_cache,
alignment_offset,
DMA_FROM_DEVICE);
if (!ch->ioctx_recv_ring) {
pr_err("rejected SRP_LOGIN_REQ because creating a new QP RQ ring failed.\n");
rej->reason =
cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
goto free_recv_cache;
}
for (i = 0; i < ch->rq_size; i++)
INIT_LIST_HEAD(&ch->ioctx_recv_ring[i]->wait_list);
}
ret = srpt_create_ch_ib(ch);
if (ret) {
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because creating a new RDMA channel failed.\n");
goto free_recv_ring;
}
strscpy(ch->sess_name, src_addr, sizeof(ch->sess_name));
snprintf(i_port_id, sizeof(i_port_id), "0x%016llx%016llx",
be64_to_cpu(*(__be64 *)nexus->i_port_id),
be64_to_cpu(*(__be64 *)(nexus->i_port_id + 8)));
pr_debug("registering src addr %s or i_port_id %s\n", ch->sess_name,
i_port_id);
tag_num = ch->rq_size;
tag_size = 1; /* ib_srpt does not use se_sess->sess_cmd_map */
if (sport->guid_id) {
mutex_lock(&sport->guid_id->mutex);
list_for_each_entry(stpg, &sport->guid_id->tpg_list, entry) {
if (!IS_ERR_OR_NULL(ch->sess))
break;
ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
ch->sess_name, ch, NULL);
}
mutex_unlock(&sport->guid_id->mutex);
}
if (sport->gid_id) {
mutex_lock(&sport->gid_id->mutex);
list_for_each_entry(stpg, &sport->gid_id->tpg_list, entry) {
if (!IS_ERR_OR_NULL(ch->sess))
break;
ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL, i_port_id,
ch, NULL);
if (!IS_ERR_OR_NULL(ch->sess))
break;
/* Retry without leading "0x" */
ch->sess = target_setup_session(&stpg->tpg, tag_num,
tag_size, TARGET_PROT_NORMAL,
i_port_id + 2, ch, NULL);
}
mutex_unlock(&sport->gid_id->mutex);
}
if (IS_ERR_OR_NULL(ch->sess)) {
WARN_ON_ONCE(ch->sess == NULL);
ret = PTR_ERR(ch->sess);
ch->sess = NULL;
pr_info("Rejected login for initiator %s: ret = %d.\n",
ch->sess_name, ret);
rej->reason = cpu_to_be32(ret == -ENOMEM ?
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES :
SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
goto destroy_ib;
}
/*
* Once a session has been created destruction of srpt_rdma_ch objects
* will decrement sport->refcount. Hence increment sport->refcount now.
*/
atomic_inc(&sport->refcount);
mutex_lock(&sport->mutex);
if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
struct srpt_rdma_ch *ch2;
list_for_each_entry(ch2, &nexus->ch_list, list) {
if (srpt_disconnect_ch(ch2) < 0)
continue;
pr_info("Relogin - closed existing channel %s\n",
ch2->sess_name);
rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
}
} else {
rsp->rsp_flags |= SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
}
list_add_tail_rcu(&ch->list, &nexus->ch_list);
if (!sport->enabled) {
rej->reason = cpu_to_be32(
SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_info("rejected SRP_LOGIN_REQ because target %s_%d is not enabled\n",
dev_name(&sdev->device->dev), port_num);
mutex_unlock(&sport->mutex);
ret = -EINVAL;
goto reject;
}
mutex_unlock(&sport->mutex);
ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rtr(ch, ch->qp);
if (ret) {
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("rejected SRP_LOGIN_REQ because enabling RTR failed (error code = %d)\n",
ret);
goto reject;
}
pr_debug("Establish connection sess=%p name=%s ch=%p\n", ch->sess,
ch->sess_name, ch);
/* create srp_login_response */
rsp->opcode = SRP_LOGIN_RSP;
rsp->tag = req->tag;
rsp->max_it_iu_len = cpu_to_be32(srp_max_req_size);
rsp->max_ti_iu_len = req->req_it_iu_len;
ch->max_ti_iu_len = it_iu_len;
rsp->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
atomic_set(&ch->req_lim, ch->rq_size);
atomic_set(&ch->req_lim_delta, 0);
/* create cm reply */
if (ch->using_rdma_cm) {
rep_param->rdma_cm.private_data = (void *)rsp;
rep_param->rdma_cm.private_data_len = sizeof(*rsp);
rep_param->rdma_cm.rnr_retry_count = 7;
rep_param->rdma_cm.flow_control = 1;
rep_param->rdma_cm.responder_resources = 4;
rep_param->rdma_cm.initiator_depth = 4;
} else {
rep_param->ib_cm.qp_num = ch->qp->qp_num;
rep_param->ib_cm.private_data = (void *)rsp;
rep_param->ib_cm.private_data_len = sizeof(*rsp);
rep_param->ib_cm.rnr_retry_count = 7;
rep_param->ib_cm.flow_control = 1;
rep_param->ib_cm.failover_accepted = 0;
rep_param->ib_cm.srq = 1;
rep_param->ib_cm.responder_resources = 4;
rep_param->ib_cm.initiator_depth = 4;
}
/*
* Hold the sport mutex while accepting a connection to avoid that
* srpt_disconnect_ch() is invoked concurrently with this code.
*/
mutex_lock(&sport->mutex);
if (sport->enabled && ch->state == CH_CONNECTING) {
if (ch->using_rdma_cm)
ret = rdma_accept(rdma_cm_id, &rep_param->rdma_cm);
else
ret = ib_send_cm_rep(ib_cm_id, &rep_param->ib_cm);
} else {
ret = -EINVAL;
}
mutex_unlock(&sport->mutex);
switch (ret) {
case 0:
break;
case -EINVAL:
goto reject;
default:
rej->reason = cpu_to_be32(SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
pr_err("sending SRP_LOGIN_REQ response failed (error code = %d)\n",
ret);
goto reject;
}
goto out;
destroy_ib:
srpt_destroy_ch_ib(ch);
free_recv_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
ch->sport->sdev, ch->rq_size,
ch->req_buf_cache, DMA_FROM_DEVICE);
free_recv_cache:
kmem_cache_destroy(ch->req_buf_cache);
free_rsp_ring:
srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
ch->sport->sdev, ch->rq_size,
ch->rsp_buf_cache, DMA_TO_DEVICE);
free_rsp_cache:
kmem_cache_destroy(ch->rsp_buf_cache);
free_ch:
if (rdma_cm_id)
rdma_cm_id->context = NULL;
else
ib_cm_id->context = NULL;
kfree(ch);
ch = NULL;
WARN_ON_ONCE(ret == 0);
reject:
pr_info("Rejecting login with reason %#x\n", be32_to_cpu(rej->reason));
rej->opcode = SRP_LOGIN_REJ;
rej->tag = req->tag;
rej->buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
if (rdma_cm_id)
rdma_reject(rdma_cm_id, rej, sizeof(*rej),
IB_CM_REJ_CONSUMER_DEFINED);
else
ib_send_cm_rej(ib_cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
rej, sizeof(*rej));
if (ch && ch->sess) {
srpt_close_ch(ch);
/*
* Tell the caller not to free cm_id since
* srpt_release_channel_work() will do that.
*/
ret = 0;
}
out:
kfree(rep_param);
kfree(rsp);
kfree(rej);
return ret;
}
static int srpt_ib_cm_req_recv(struct ib_cm_id *cm_id,
const struct ib_cm_req_event_param *param,
void *private_data)
{
char sguid[40];
srpt_format_guid(sguid, sizeof(sguid),
¶m->primary_path->dgid.global.interface_id);
return srpt_cm_req_recv(cm_id->context, cm_id, NULL, param->port,
param->primary_path->pkey,
private_data, sguid);
}
static int srpt_rdma_cm_req_recv(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
struct srpt_device *sdev;
struct srp_login_req req;
const struct srp_login_req_rdma *req_rdma;
struct sa_path_rec *path_rec = cm_id->route.path_rec;
char src_addr[40];
sdev = ib_get_client_data(cm_id->device, &srpt_client);
if (!sdev)
return -ECONNREFUSED;
if (event->param.conn.private_data_len < sizeof(*req_rdma))
return -EINVAL;
/* Transform srp_login_req_rdma into srp_login_req. */
req_rdma = event->param.conn.private_data;
memset(&req, 0, sizeof(req));
req.opcode = req_rdma->opcode;
req.tag = req_rdma->tag;
req.req_it_iu_len = req_rdma->req_it_iu_len;
req.req_buf_fmt = req_rdma->req_buf_fmt;
req.req_flags = req_rdma->req_flags;
memcpy(req.initiator_port_id, req_rdma->initiator_port_id, 16);
memcpy(req.target_port_id, req_rdma->target_port_id, 16);
req.imm_data_offset = req_rdma->imm_data_offset;
snprintf(src_addr, sizeof(src_addr), "%pIS",
&cm_id->route.addr.src_addr);
return srpt_cm_req_recv(sdev, NULL, cm_id, cm_id->port_num,
path_rec ? path_rec->pkey : 0, &req, src_addr);
}
static void srpt_cm_rej_recv(struct srpt_rdma_ch *ch,
enum ib_cm_rej_reason reason,
const u8 *private_data,
u8 private_data_len)
{
char *priv = NULL;
int i;
if (private_data_len && (priv = kmalloc(private_data_len * 3 + 1,
GFP_KERNEL))) {
for (i = 0; i < private_data_len; i++)
sprintf(priv + 3 * i, " %02x", private_data[i]);
}
pr_info("Received CM REJ for ch %s-%d; reason %d%s%s.\n",
ch->sess_name, ch->qp->qp_num, reason, private_data_len ?
"; private data" : "", priv ? priv : " (?)");
kfree(priv);
}
/**
* srpt_cm_rtu_recv - process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event
* @ch: SRPT RDMA channel.
*
* An RTU (ready to use) message indicates that the connection has been
* established and that the recipient may begin transmitting.
*/
static void srpt_cm_rtu_recv(struct srpt_rdma_ch *ch)
{
int ret;
ret = ch->using_rdma_cm ? 0 : srpt_ch_qp_rts(ch, ch->qp);
if (ret < 0) {
pr_err("%s-%d: QP transition to RTS failed\n", ch->sess_name,
ch->qp->qp_num);
srpt_close_ch(ch);
return;
}
/*
* Note: calling srpt_close_ch() if the transition to the LIVE state
* fails is not necessary since that means that that function has
* already been invoked from another thread.
*/
if (!srpt_set_ch_state(ch, CH_LIVE)) {
pr_err("%s-%d: channel transition to LIVE state failed\n",
ch->sess_name, ch->qp->qp_num);
return;
}
/* Trigger wait list processing. */
ret = srpt_zerolength_write(ch);
WARN_ONCE(ret < 0, "%d\n", ret);
}
/**
* srpt_cm_handler - IB connection manager callback function
* @cm_id: IB/CM connection identifier.
* @event: IB/CM event.
*
* A non-zero return value will cause the caller destroy the CM ID.
*
* Note: srpt_cm_handler() must only return a non-zero value when transferring
* ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
* a non-zero value in any other case will trigger a race with the
* ib_destroy_cm_id() call in srpt_release_channel().
*/
static int srpt_cm_handler(struct ib_cm_id *cm_id,
const struct ib_cm_event *event)
{
struct srpt_rdma_ch *ch = cm_id->context;
int ret;
ret = 0;
switch (event->event) {
case IB_CM_REQ_RECEIVED:
ret = srpt_ib_cm_req_recv(cm_id, &event->param.req_rcvd,
event->private_data);
break;
case IB_CM_REJ_RECEIVED:
srpt_cm_rej_recv(ch, event->param.rej_rcvd.reason,
event->private_data,
IB_CM_REJ_PRIVATE_DATA_SIZE);
break;
case IB_CM_RTU_RECEIVED:
case IB_CM_USER_ESTABLISHED:
srpt_cm_rtu_recv(ch);
break;
case IB_CM_DREQ_RECEIVED:
srpt_disconnect_ch(ch);
break;
case IB_CM_DREP_RECEIVED:
pr_info("Received CM DREP message for ch %s-%d.\n",
ch->sess_name, ch->qp->qp_num);
srpt_close_ch(ch);
break;
case IB_CM_TIMEWAIT_EXIT:
pr_info("Received CM TimeWait exit for ch %s-%d.\n",
ch->sess_name, ch->qp->qp_num);
srpt_close_ch(ch);
break;
case IB_CM_REP_ERROR:
pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
ch->qp->qp_num);
break;
case IB_CM_DREQ_ERROR:
pr_info("Received CM DREQ ERROR event.\n");
break;
case IB_CM_MRA_RECEIVED:
pr_info("Received CM MRA event\n");
break;
default:
pr_err("received unrecognized CM event %d\n", event->event);
break;
}
return ret;
}
static int srpt_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
struct srpt_rdma_ch *ch = cm_id->context;
int ret = 0;
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = srpt_rdma_cm_req_recv(cm_id, event);
break;
case RDMA_CM_EVENT_REJECTED:
srpt_cm_rej_recv(ch, event->status,
event->param.conn.private_data,
event->param.conn.private_data_len);
break;
case RDMA_CM_EVENT_ESTABLISHED:
srpt_cm_rtu_recv(ch);
break;
case RDMA_CM_EVENT_DISCONNECTED:
if (ch->state < CH_DISCONNECTING)
srpt_disconnect_ch(ch);
else
srpt_close_ch(ch);
break;
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
srpt_close_ch(ch);
break;
case RDMA_CM_EVENT_UNREACHABLE:
pr_info("Received CM REP error for ch %s-%d.\n", ch->sess_name,
ch->qp->qp_num);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_ADDR_CHANGE:
break;
default:
pr_err("received unrecognized RDMA CM event %d\n",
event->event);
break;
}
return ret;
}
/*
* srpt_write_pending - Start data transfer from initiator to target (write).
*/
static int srpt_write_pending(struct se_cmd *se_cmd)
{
struct srpt_send_ioctx *ioctx =
container_of(se_cmd, struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
struct ib_send_wr *first_wr = NULL;
struct ib_cqe *cqe = &ioctx->rdma_cqe;
enum srpt_command_state new_state;
int ret, i;
if (ioctx->recv_ioctx) {
srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
target_execute_cmd(&ioctx->cmd);
return 0;
}
new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
WARN_ON(new_state == SRPT_STATE_DONE);
if (atomic_sub_return(ioctx->n_rdma, &ch->sq_wr_avail) < 0) {
pr_warn("%s: IB send queue full (needed %d)\n",
__func__, ioctx->n_rdma);
ret = -ENOMEM;
goto out_undo;
}
cqe->done = srpt_rdma_read_done;
for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp, ch->sport->port,
cqe, first_wr);
cqe = NULL;
}
ret = ib_post_send(ch->qp, first_wr, NULL);
if (ret) {
pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n",
__func__, ret, ioctx->n_rdma,
atomic_read(&ch->sq_wr_avail));
goto out_undo;
}
return 0;
out_undo:
atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
return ret;
}
static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
{
switch (tcm_mgmt_status) {
case TMR_FUNCTION_COMPLETE:
return SRP_TSK_MGMT_SUCCESS;
case TMR_FUNCTION_REJECTED:
return SRP_TSK_MGMT_FUNC_NOT_SUPP;
}
return SRP_TSK_MGMT_FAILED;
}
/**
* srpt_queue_response - transmit the response to a SCSI command
* @cmd: SCSI target command.
*
* Callback function called by the TCM core. Must not block since it can be
* invoked on the context of the IB completion handler.
*/
static void srpt_queue_response(struct se_cmd *cmd)
{
struct srpt_send_ioctx *ioctx =
container_of(cmd, struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
struct srpt_device *sdev = ch->sport->sdev;
struct ib_send_wr send_wr, *first_wr = &send_wr;
struct ib_sge sge;
enum srpt_command_state state;
int resp_len, ret, i;
u8 srp_tm_status;
state = ioctx->state;
switch (state) {
case SRPT_STATE_NEW:
case SRPT_STATE_DATA_IN:
ioctx->state = SRPT_STATE_CMD_RSP_SENT;
break;
case SRPT_STATE_MGMT:
ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
break;
default:
WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
ch, ioctx->ioctx.index, ioctx->state);
break;
}
if (WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))
return;
/* For read commands, transfer the data to the initiator. */
if (ioctx->cmd.data_direction == DMA_FROM_DEVICE &&
ioctx->cmd.data_length &&
!ioctx->queue_status_only) {
for (i = ioctx->n_rw_ctx - 1; i >= 0; i--) {
struct srpt_rw_ctx *ctx = &ioctx->rw_ctxs[i];
first_wr = rdma_rw_ctx_wrs(&ctx->rw, ch->qp,
ch->sport->port, NULL, first_wr);
}
}
if (state != SRPT_STATE_MGMT)
resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->cmd.tag,
cmd->scsi_status);
else {
srp_tm_status
= tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
ioctx->cmd.tag);
}
atomic_inc(&ch->req_lim);
if (unlikely(atomic_sub_return(1 + ioctx->n_rdma,
&ch->sq_wr_avail) < 0)) {
pr_warn("%s: IB send queue full (needed %d)\n",
__func__, ioctx->n_rdma);
goto out;
}
ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, resp_len,
DMA_TO_DEVICE);
sge.addr = ioctx->ioctx.dma;
sge.length = resp_len;
sge.lkey = sdev->lkey;
ioctx->ioctx.cqe.done = srpt_send_done;
send_wr.next = NULL;
send_wr.wr_cqe = &ioctx->ioctx.cqe;
send_wr.sg_list = &sge;
send_wr.num_sge = 1;
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(ch->qp, first_wr, NULL);
if (ret < 0) {
pr_err("%s: sending cmd response failed for tag %llu (%d)\n",
__func__, ioctx->cmd.tag, ret);
goto out;
}
return;
out:
atomic_add(1 + ioctx->n_rdma, &ch->sq_wr_avail);
atomic_dec(&ch->req_lim);
srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
target_put_sess_cmd(&ioctx->cmd);
}
static int srpt_queue_data_in(struct se_cmd *cmd)
{
srpt_queue_response(cmd);
return 0;
}
static void srpt_queue_tm_rsp(struct se_cmd *cmd)
{
srpt_queue_response(cmd);
}
/*
* This function is called for aborted commands if no response is sent to the
* initiator. Make sure that the credits freed by aborting a command are
* returned to the initiator the next time a response is sent by incrementing
* ch->req_lim_delta.
*/
static void srpt_aborted_task(struct se_cmd *cmd)
{
struct srpt_send_ioctx *ioctx = container_of(cmd,
struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
atomic_inc(&ch->req_lim_delta);
}
static int srpt_queue_status(struct se_cmd *cmd)
{
struct srpt_send_ioctx *ioctx;
ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
BUG_ON(ioctx->sense_data != cmd->sense_buffer);
if (cmd->se_cmd_flags &
(SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
ioctx->queue_status_only = true;
srpt_queue_response(cmd);
return 0;
}
static void srpt_refresh_port_work(struct work_struct *work)
{
struct srpt_port *sport = container_of(work, struct srpt_port, work);
srpt_refresh_port(sport);
}
/**
* srpt_release_sport - disable login and wait for associated channels
* @sport: SRPT HCA port.
*/
static int srpt_release_sport(struct srpt_port *sport)
{
DECLARE_COMPLETION_ONSTACK(c);
struct srpt_nexus *nexus, *next_n;
struct srpt_rdma_ch *ch;
WARN_ON_ONCE(irqs_disabled());
sport->freed_channels = &c;
mutex_lock(&sport->mutex);
srpt_set_enabled(sport, false);
mutex_unlock(&sport->mutex);
while (atomic_read(&sport->refcount) > 0 &&
wait_for_completion_timeout(&c, 5 * HZ) <= 0) {
pr_info("%s_%d: waiting for unregistration of %d sessions ...\n",
dev_name(&sport->sdev->device->dev), sport->port,
atomic_read(&sport->refcount));
rcu_read_lock();
list_for_each_entry(nexus, &sport->nexus_list, entry) {
list_for_each_entry(ch, &nexus->ch_list, list) {
pr_info("%s-%d: state %s\n",
ch->sess_name, ch->qp->qp_num,
get_ch_state_name(ch->state));
}
}
rcu_read_unlock();
}
mutex_lock(&sport->mutex);
list_for_each_entry_safe(nexus, next_n, &sport->nexus_list, entry) {
list_del(&nexus->entry);
kfree_rcu(nexus, rcu);
}
mutex_unlock(&sport->mutex);
return 0;
}
struct port_and_port_id {
struct srpt_port *sport;
struct srpt_port_id **port_id;
};
static struct port_and_port_id __srpt_lookup_port(const char *name)
{
struct ib_device *dev;
struct srpt_device *sdev;
struct srpt_port *sport;
int i;
list_for_each_entry(sdev, &srpt_dev_list, list) {
dev = sdev->device;
if (!dev)
continue;
for (i = 0; i < dev->phys_port_cnt; i++) {
sport = &sdev->port[i];
if (strcmp(sport->guid_name, name) == 0) {
kref_get(&sdev->refcnt);
return (struct port_and_port_id){
sport, &sport->guid_id};
}
if (strcmp(sport->gid_name, name) == 0) {
kref_get(&sdev->refcnt);
return (struct port_and_port_id){
sport, &sport->gid_id};
}
}
}
return (struct port_and_port_id){};
}
/**
* srpt_lookup_port() - Look up an RDMA port by name
* @name: ASCII port name
*
* Increments the RDMA port reference count if an RDMA port pointer is returned.
* The caller must drop that reference count by calling srpt_port_put_ref().
*/
static struct port_and_port_id srpt_lookup_port(const char *name)
{
struct port_and_port_id papi;
spin_lock(&srpt_dev_lock);
papi = __srpt_lookup_port(name);
spin_unlock(&srpt_dev_lock);
return papi;
}
static void srpt_free_srq(struct srpt_device *sdev)
{
if (!sdev->srq)
return;
ib_destroy_srq(sdev->srq);
srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
sdev->srq_size, sdev->req_buf_cache,
DMA_FROM_DEVICE);
kmem_cache_destroy(sdev->req_buf_cache);
sdev->srq = NULL;
}
static int srpt_alloc_srq(struct srpt_device *sdev)
{
struct ib_srq_init_attr srq_attr = {
.event_handler = srpt_srq_event,
.srq_context = (void *)sdev,
.attr.max_wr = sdev->srq_size,
.attr.max_sge = 1,
.srq_type = IB_SRQT_BASIC,
};
struct ib_device *device = sdev->device;
struct ib_srq *srq;
int i;
WARN_ON_ONCE(sdev->srq);
srq = ib_create_srq(sdev->pd, &srq_attr);
if (IS_ERR(srq)) {
pr_debug("ib_create_srq() failed: %ld\n", PTR_ERR(srq));
return PTR_ERR(srq);
}
pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
srp_max_req_size, 0, 0, NULL);
if (!sdev->req_buf_cache)
goto free_srq;
sdev->ioctx_ring = (struct srpt_recv_ioctx **)
srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
sizeof(*sdev->ioctx_ring[0]),
sdev->req_buf_cache, 0, DMA_FROM_DEVICE);
if (!sdev->ioctx_ring)
goto free_cache;
sdev->use_srq = true;
sdev->srq = srq;
for (i = 0; i < sdev->srq_size; ++i) {
INIT_LIST_HEAD(&sdev->ioctx_ring[i]->wait_list);
srpt_post_recv(sdev, NULL, sdev->ioctx_ring[i]);
}
return 0;
free_cache:
kmem_cache_destroy(sdev->req_buf_cache);
free_srq:
ib_destroy_srq(srq);
return -ENOMEM;
}
static int srpt_use_srq(struct srpt_device *sdev, bool use_srq)
{
struct ib_device *device = sdev->device;
int ret = 0;
if (!use_srq) {
srpt_free_srq(sdev);
sdev->use_srq = false;
} else if (use_srq && !sdev->srq) {
ret = srpt_alloc_srq(sdev);
}
pr_debug("%s(%s): use_srq = %d; ret = %d\n", __func__,
dev_name(&device->dev), sdev->use_srq, ret);
return ret;
}
static void srpt_free_sdev(struct kref *refcnt)
{
struct srpt_device *sdev = container_of(refcnt, typeof(*sdev), refcnt);
kfree(sdev);
}
static void srpt_sdev_put(struct srpt_device *sdev)
{
kref_put(&sdev->refcnt, srpt_free_sdev);
}
/**
* srpt_add_one - InfiniBand device addition callback function
* @device: Describes a HCA.
*/
static int srpt_add_one(struct ib_device *device)
{
struct srpt_device *sdev;
struct srpt_port *sport;
int ret;
u32 i;
pr_debug("device = %p\n", device);
sdev = kzalloc(struct_size(sdev, port, device->phys_port_cnt),
GFP_KERNEL);
if (!sdev)
return -ENOMEM;
kref_init(&sdev->refcnt);
sdev->device = device;
mutex_init(&sdev->sdev_mutex);
sdev->pd = ib_alloc_pd(device, 0);
if (IS_ERR(sdev->pd)) {
ret = PTR_ERR(sdev->pd);
goto free_dev;
}
sdev->lkey = sdev->pd->local_dma_lkey;
sdev->srq_size = min(srpt_srq_size, sdev->device->attrs.max_srq_wr);
srpt_use_srq(sdev, sdev->port[0].port_attrib.use_srq);
if (!srpt_service_guid)
srpt_service_guid = be64_to_cpu(device->node_guid);
if (rdma_port_get_link_layer(device, 1) == IB_LINK_LAYER_INFINIBAND)
sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
if (IS_ERR(sdev->cm_id)) {
pr_info("ib_create_cm_id() failed: %ld\n",
PTR_ERR(sdev->cm_id));
ret = PTR_ERR(sdev->cm_id);
sdev->cm_id = NULL;
if (!rdma_cm_id)
goto err_ring;
}
/* print out target login information */
pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,pkey=ffff,service_id=%016llx\n",
srpt_service_guid, srpt_service_guid, srpt_service_guid);
/*
* We do not have a consistent service_id (ie. also id_ext of target_id)
* to identify this target. We currently use the guid of the first HCA
* in the system as service_id; therefore, the target_id will change
* if this HCA is gone bad and replaced by different HCA
*/
ret = sdev->cm_id ?
ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid)) :
0;
if (ret < 0) {
pr_err("ib_cm_listen() failed: %d (cm_id state = %d)\n", ret,
sdev->cm_id->state);
goto err_cm;
}
INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
srpt_event_handler);
ib_register_event_handler(&sdev->event_handler);
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
INIT_LIST_HEAD(&sport->nexus_list);
mutex_init(&sport->mutex);
sport->sdev = sdev;
sport->port = i;
sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
sport->port_attrib.use_srq = false;
INIT_WORK(&sport->work, srpt_refresh_port_work);
ret = srpt_refresh_port(sport);
if (ret) {
pr_err("MAD registration failed for %s-%d.\n",
dev_name(&sdev->device->dev), i);
i--;
goto err_port;
}
}
spin_lock(&srpt_dev_lock);
list_add_tail(&sdev->list, &srpt_dev_list);
spin_unlock(&srpt_dev_lock);
ib_set_client_data(device, &srpt_client, sdev);
pr_debug("added %s.\n", dev_name(&device->dev));
return 0;
err_port:
srpt_unregister_mad_agent(sdev, i);
ib_unregister_event_handler(&sdev->event_handler);
err_cm:
if (sdev->cm_id)
ib_destroy_cm_id(sdev->cm_id);
err_ring:
srpt_free_srq(sdev);
ib_dealloc_pd(sdev->pd);
free_dev:
srpt_sdev_put(sdev);
pr_info("%s(%s) failed.\n", __func__, dev_name(&device->dev));
return ret;
}
/**
* srpt_remove_one - InfiniBand device removal callback function
* @device: Describes a HCA.
* @client_data: The value passed as the third argument to ib_set_client_data().
*/
static void srpt_remove_one(struct ib_device *device, void *client_data)
{
struct srpt_device *sdev = client_data;
int i;
srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt);
ib_unregister_event_handler(&sdev->event_handler);
/* Cancel any work queued by the just unregistered IB event handler. */
for (i = 0; i < sdev->device->phys_port_cnt; i++)
cancel_work_sync(&sdev->port[i].work);
if (sdev->cm_id)
ib_destroy_cm_id(sdev->cm_id);
ib_set_client_data(device, &srpt_client, NULL);
/*
* Unregistering a target must happen after destroying sdev->cm_id
* such that no new SRP_LOGIN_REQ information units can arrive while
* destroying the target.
*/
spin_lock(&srpt_dev_lock);
list_del(&sdev->list);
spin_unlock(&srpt_dev_lock);
for (i = 0; i < sdev->device->phys_port_cnt; i++)
srpt_release_sport(&sdev->port[i]);
srpt_free_srq(sdev);
ib_dealloc_pd(sdev->pd);
srpt_sdev_put(sdev);
}
static struct ib_client srpt_client = {
.name = DRV_NAME,
.add = srpt_add_one,
.remove = srpt_remove_one
};
static int srpt_check_true(struct se_portal_group *se_tpg)
{
return 1;
}
static struct srpt_port *srpt_tpg_to_sport(struct se_portal_group *tpg)
{
return tpg->se_tpg_wwn->priv;
}
static struct srpt_port_id *srpt_wwn_to_sport_id(struct se_wwn *wwn)
{
struct srpt_port *sport = wwn->priv;
if (sport->guid_id && &sport->guid_id->wwn == wwn)
return sport->guid_id;
if (sport->gid_id && &sport->gid_id->wwn == wwn)
return sport->gid_id;
WARN_ON_ONCE(true);
return NULL;
}
static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
{
struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
return stpg->sport_id->name;
}
static u16 srpt_get_tag(struct se_portal_group *tpg)
{
return 1;
}
static void srpt_release_cmd(struct se_cmd *se_cmd)
{
struct srpt_send_ioctx *ioctx = container_of(se_cmd,
struct srpt_send_ioctx, cmd);
struct srpt_rdma_ch *ch = ioctx->ch;
struct srpt_recv_ioctx *recv_ioctx = ioctx->recv_ioctx;
WARN_ON_ONCE(ioctx->state != SRPT_STATE_DONE &&
!(ioctx->cmd.transport_state & CMD_T_ABORTED));
if (recv_ioctx) {
WARN_ON_ONCE(!list_empty(&recv_ioctx->wait_list));
ioctx->recv_ioctx = NULL;
srpt_post_recv(ch->sport->sdev, ch, recv_ioctx);
}
if (ioctx->n_rw_ctx) {
srpt_free_rw_ctxs(ch, ioctx);
ioctx->n_rw_ctx = 0;
}
target_free_tag(se_cmd->se_sess, se_cmd);
}
/**
* srpt_close_session - forcibly close a session
* @se_sess: SCSI target session.
*
* Callback function invoked by the TCM core to clean up sessions associated
* with a node ACL when the user invokes
* rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
*/
static void srpt_close_session(struct se_session *se_sess)
{
struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr;
srpt_disconnect_ch_sync(ch);
}
/* Note: only used from inside debug printk's by the TCM core. */
static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
{
struct srpt_send_ioctx *ioctx;
ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
return ioctx->state;
}
static int srpt_parse_guid(u64 *guid, const char *name)
{
u16 w[4];
int ret = -EINVAL;
if (sscanf(name, "%hx:%hx:%hx:%hx", &w[0], &w[1], &w[2], &w[3]) != 4)
goto out;
*guid = get_unaligned_be64(w);
ret = 0;
out:
return ret;
}
/**
* srpt_parse_i_port_id - parse an initiator port ID
* @name: ASCII representation of a 128-bit initiator port ID.
* @i_port_id: Binary 128-bit port ID.
*/
static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
{
const char *p;
unsigned len, count, leading_zero_bytes;
int ret;
p = name;
if (strncasecmp(p, "0x", 2) == 0)
p += 2;
ret = -EINVAL;
len = strlen(p);
if (len % 2)
goto out;
count = min(len / 2, 16U);
leading_zero_bytes = 16 - count;
memset(i_port_id, 0, leading_zero_bytes);
ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
out:
return ret;
}
/*
* configfs callback function invoked for mkdir
* /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
*
* i_port_id must be an initiator port GUID, GID or IP address. See also the
* target_alloc_session() calls in this driver. Examples of valid initiator
* port IDs:
* 0x0000000000000000505400fffe4a0b7b
* 0000000000000000505400fffe4a0b7b
* 5054:00ff:fe4a:0b7b
* 192.168.122.76
*/
static int srpt_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
{
struct sockaddr_storage sa;
u64 guid;
u8 i_port_id[16];
int ret;
ret = srpt_parse_guid(&guid, name);
if (ret < 0)
ret = srpt_parse_i_port_id(i_port_id, name);
if (ret < 0)
ret = inet_pton_with_scope(&init_net, AF_UNSPEC, name, NULL,
&sa);
if (ret < 0)
pr_err("invalid initiator port ID %s\n", name);
return ret;
}
static ssize_t srpt_tpg_attrib_srp_max_rdma_size_show(struct config_item *item,
char *page)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
return sysfs_emit(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
}
static ssize_t srpt_tpg_attrib_srp_max_rdma_size_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
unsigned long val;
int ret;
ret = kstrtoul(page, 0, &val);
if (ret < 0) {
pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RDMA_SIZE) {
pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
MAX_SRPT_RDMA_SIZE);
return -EINVAL;
}
if (val < DEFAULT_MAX_RDMA_SIZE) {
pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
val, DEFAULT_MAX_RDMA_SIZE);
return -EINVAL;
}
sport->port_attrib.srp_max_rdma_size = val;
return count;
}
static ssize_t srpt_tpg_attrib_srp_max_rsp_size_show(struct config_item *item,
char *page)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
return sysfs_emit(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
}
static ssize_t srpt_tpg_attrib_srp_max_rsp_size_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
unsigned long val;
int ret;
ret = kstrtoul(page, 0, &val);
if (ret < 0) {
pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RSP_SIZE) {
pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
MAX_SRPT_RSP_SIZE);
return -EINVAL;
}
if (val < MIN_MAX_RSP_SIZE) {
pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
MIN_MAX_RSP_SIZE);
return -EINVAL;
}
sport->port_attrib.srp_max_rsp_size = val;
return count;
}
static ssize_t srpt_tpg_attrib_srp_sq_size_show(struct config_item *item,
char *page)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
return sysfs_emit(page, "%u\n", sport->port_attrib.srp_sq_size);
}
static ssize_t srpt_tpg_attrib_srp_sq_size_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
unsigned long val;
int ret;
ret = kstrtoul(page, 0, &val);
if (ret < 0) {
pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_SRQ_SIZE) {
pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
MAX_SRPT_SRQ_SIZE);
return -EINVAL;
}
if (val < MIN_SRPT_SRQ_SIZE) {
pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
MIN_SRPT_SRQ_SIZE);
return -EINVAL;
}
sport->port_attrib.srp_sq_size = val;
return count;
}
static ssize_t srpt_tpg_attrib_use_srq_show(struct config_item *item,
char *page)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
return sysfs_emit(page, "%d\n", sport->port_attrib.use_srq);
}
static ssize_t srpt_tpg_attrib_use_srq_store(struct config_item *item,
const char *page, size_t count)
{
struct se_portal_group *se_tpg = attrib_to_tpg(item);
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
struct srpt_device *sdev = sport->sdev;
unsigned long val;
bool enabled;
int ret;
ret = kstrtoul(page, 0, &val);
if (ret < 0)
return ret;
if (val != !!val)
return -EINVAL;
ret = mutex_lock_interruptible(&sdev->sdev_mutex);
if (ret < 0)
return ret;
ret = mutex_lock_interruptible(&sport->mutex);
if (ret < 0)
goto unlock_sdev;
enabled = sport->enabled;
/* Log out all initiator systems before changing 'use_srq'. */
srpt_set_enabled(sport, false);
sport->port_attrib.use_srq = val;
srpt_use_srq(sdev, sport->port_attrib.use_srq);
srpt_set_enabled(sport, enabled);
ret = count;
mutex_unlock(&sport->mutex);
unlock_sdev:
mutex_unlock(&sdev->sdev_mutex);
return ret;
}
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rdma_size);
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_max_rsp_size);
CONFIGFS_ATTR(srpt_tpg_attrib_, srp_sq_size);
CONFIGFS_ATTR(srpt_tpg_attrib_, use_srq);
static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
&srpt_tpg_attrib_attr_srp_max_rdma_size,
&srpt_tpg_attrib_attr_srp_max_rsp_size,
&srpt_tpg_attrib_attr_srp_sq_size,
&srpt_tpg_attrib_attr_use_srq,
NULL,
};
static struct rdma_cm_id *srpt_create_rdma_id(struct sockaddr *listen_addr)
{
struct rdma_cm_id *rdma_cm_id;
int ret;
rdma_cm_id = rdma_create_id(&init_net, srpt_rdma_cm_handler,
NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(rdma_cm_id)) {
pr_err("RDMA/CM ID creation failed: %ld\n",
PTR_ERR(rdma_cm_id));
goto out;
}
ret = rdma_bind_addr(rdma_cm_id, listen_addr);
if (ret) {
char addr_str[64];
snprintf(addr_str, sizeof(addr_str), "%pISp", listen_addr);
pr_err("Binding RDMA/CM ID to address %s failed: %d\n",
addr_str, ret);
rdma_destroy_id(rdma_cm_id);
rdma_cm_id = ERR_PTR(ret);
goto out;
}
ret = rdma_listen(rdma_cm_id, 128);
if (ret) {
pr_err("rdma_listen() failed: %d\n", ret);
rdma_destroy_id(rdma_cm_id);
rdma_cm_id = ERR_PTR(ret);
}
out:
return rdma_cm_id;
}
static ssize_t srpt_rdma_cm_port_show(struct config_item *item, char *page)
{
return sysfs_emit(page, "%d\n", rdma_cm_port);
}
static ssize_t srpt_rdma_cm_port_store(struct config_item *item,
const char *page, size_t count)
{
struct sockaddr_in addr4 = { .sin_family = AF_INET };
struct sockaddr_in6 addr6 = { .sin6_family = AF_INET6 };
struct rdma_cm_id *new_id = NULL;
u16 val;
int ret;
ret = kstrtou16(page, 0, &val);
if (ret < 0)
return ret;
ret = count;
if (rdma_cm_port == val)
goto out;
if (val) {
addr6.sin6_port = cpu_to_be16(val);
new_id = srpt_create_rdma_id((struct sockaddr *)&addr6);
if (IS_ERR(new_id)) {
addr4.sin_port = cpu_to_be16(val);
new_id = srpt_create_rdma_id((struct sockaddr *)&addr4);
if (IS_ERR(new_id)) {
ret = PTR_ERR(new_id);
goto out;
}
}
}
mutex_lock(&rdma_cm_mutex);
rdma_cm_port = val;
swap(rdma_cm_id, new_id);
mutex_unlock(&rdma_cm_mutex);
if (new_id)
rdma_destroy_id(new_id);
ret = count;
out:
return ret;
}
CONFIGFS_ATTR(srpt_, rdma_cm_port);
static struct configfs_attribute *srpt_da_attrs[] = {
&srpt_attr_rdma_cm_port,
NULL,
};
static int srpt_enable_tpg(struct se_portal_group *se_tpg, bool enable)
{
struct srpt_port *sport = srpt_tpg_to_sport(se_tpg);
mutex_lock(&sport->mutex);
srpt_set_enabled(sport, enable);
mutex_unlock(&sport->mutex);
return 0;
}
/**
* srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
* @wwn: Corresponds to $driver/$port.
* @name: $tpg.
*/
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
const char *name)
{
struct srpt_port_id *sport_id = srpt_wwn_to_sport_id(wwn);
struct srpt_tpg *stpg;
int res = -ENOMEM;
stpg = kzalloc(sizeof(*stpg), GFP_KERNEL);
if (!stpg)
return ERR_PTR(res);
stpg->sport_id = sport_id;
res = core_tpg_register(wwn, &stpg->tpg, SCSI_PROTOCOL_SRP);
if (res) {
kfree(stpg);
return ERR_PTR(res);
}
mutex_lock(&sport_id->mutex);
list_add_tail(&stpg->entry, &sport_id->tpg_list);
mutex_unlock(&sport_id->mutex);
return &stpg->tpg;
}
/**
* srpt_drop_tpg - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port/$tpg
* @tpg: Target portal group to deregister.
*/
static void srpt_drop_tpg(struct se_portal_group *tpg)
{
struct srpt_tpg *stpg = container_of(tpg, typeof(*stpg), tpg);
struct srpt_port_id *sport_id = stpg->sport_id;
struct srpt_port *sport = srpt_tpg_to_sport(tpg);
mutex_lock(&sport_id->mutex);
list_del(&stpg->entry);
mutex_unlock(&sport_id->mutex);
sport->enabled = false;
core_tpg_deregister(tpg);
kfree(stpg);
}
/**
* srpt_make_tport - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port
* @tf: Not used.
* @group: Not used.
* @name: $port.
*/
static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
struct config_group *group,
const char *name)
{
struct port_and_port_id papi = srpt_lookup_port(name);
struct srpt_port *sport = papi.sport;
struct srpt_port_id *port_id;
if (!papi.port_id)
return ERR_PTR(-EINVAL);
if (*papi.port_id) {
/* Attempt to create a directory that already exists. */
WARN_ON_ONCE(true);
return &(*papi.port_id)->wwn;
}
port_id = kzalloc(sizeof(*port_id), GFP_KERNEL);
if (!port_id) {
srpt_sdev_put(sport->sdev);
return ERR_PTR(-ENOMEM);
}
mutex_init(&port_id->mutex);
INIT_LIST_HEAD(&port_id->tpg_list);
port_id->wwn.priv = sport;
memcpy(port_id->name, port_id == sport->guid_id ? sport->guid_name :
sport->gid_name, ARRAY_SIZE(port_id->name));
*papi.port_id = port_id;
return &port_id->wwn;
}
/**
* srpt_drop_tport - configfs callback invoked for rmdir /sys/kernel/config/target/$driver/$port
* @wwn: $port.
*/
static void srpt_drop_tport(struct se_wwn *wwn)
{
struct srpt_port_id *port_id = container_of(wwn, typeof(*port_id), wwn);
struct srpt_port *sport = wwn->priv;
if (sport->guid_id == port_id)
sport->guid_id = NULL;
else if (sport->gid_id == port_id)
sport->gid_id = NULL;
else
WARN_ON_ONCE(true);
srpt_sdev_put(sport->sdev);
kfree(port_id);
}
static ssize_t srpt_wwn_version_show(struct config_item *item, char *buf)
{
return sysfs_emit(buf, "\n");
}
CONFIGFS_ATTR_RO(srpt_wwn_, version);
static struct configfs_attribute *srpt_wwn_attrs[] = {
&srpt_wwn_attr_version,
NULL,
};
static const struct target_core_fabric_ops srpt_template = {
.module = THIS_MODULE,
.fabric_name = "srpt",
.tpg_get_wwn = srpt_get_fabric_wwn,
.tpg_get_tag = srpt_get_tag,
.tpg_check_demo_mode_cache = srpt_check_true,
.tpg_check_demo_mode_write_protect = srpt_check_true,
.release_cmd = srpt_release_cmd,
.check_stop_free = srpt_check_stop_free,
.close_session = srpt_close_session,
.sess_get_initiator_sid = NULL,
.write_pending = srpt_write_pending,
.get_cmd_state = srpt_get_tcm_cmd_state,
.queue_data_in = srpt_queue_data_in,
.queue_status = srpt_queue_status,
.queue_tm_rsp = srpt_queue_tm_rsp,
.aborted_task = srpt_aborted_task,
/*
* Setup function pointers for generic logic in
* target_core_fabric_configfs.c
*/
.fabric_make_wwn = srpt_make_tport,
.fabric_drop_wwn = srpt_drop_tport,
.fabric_make_tpg = srpt_make_tpg,
.fabric_enable_tpg = srpt_enable_tpg,
.fabric_drop_tpg = srpt_drop_tpg,
.fabric_init_nodeacl = srpt_init_nodeacl,
.tfc_discovery_attrs = srpt_da_attrs,
.tfc_wwn_attrs = srpt_wwn_attrs,
.tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs,
};
/**
* srpt_init_module - kernel module initialization
*
* Note: Since ib_register_client() registers callback functions, and since at
* least one of these callback functions (srpt_add_one()) calls target core
* functions, this driver must be registered with the target core before
* ib_register_client() is called.
*/
static int __init srpt_init_module(void)
{
int ret;
ret = -EINVAL;
if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
pr_err("invalid value %d for kernel module parameter srp_max_req_size -- must be at least %d.\n",
srp_max_req_size, MIN_MAX_REQ_SIZE);
goto out;
}
if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
|| srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
pr_err("invalid value %d for kernel module parameter srpt_srq_size -- must be in the range [%d..%d].\n",
srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
goto out;
}
ret = target_register_template(&srpt_template);
if (ret)
goto out;
ret = ib_register_client(&srpt_client);
if (ret) {
pr_err("couldn't register IB client\n");
goto out_unregister_target;
}
return 0;
out_unregister_target:
target_unregister_template(&srpt_template);
out:
return ret;
}
static void __exit srpt_cleanup_module(void)
{
if (rdma_cm_id)
rdma_destroy_id(rdma_cm_id);
ib_unregister_client(&srpt_client);
target_unregister_template(&srpt_template);
}
module_init(srpt_init_module);
module_exit(srpt_cleanup_module);
| linux-master | drivers/infiniband/ulp/srpt/ib_srpt.c |
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/sched/signal.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include "ipoib.h"
static ssize_t parent_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_device *dev = to_net_dev(d);
struct ipoib_dev_priv *priv = ipoib_priv(dev);
return sysfs_emit(buf, "%s\n", priv->parent->name);
}
static DEVICE_ATTR_RO(parent);
static bool is_child_unique(struct ipoib_dev_priv *ppriv,
struct ipoib_dev_priv *priv)
{
struct ipoib_dev_priv *tpriv;
ASSERT_RTNL();
/*
* Since the legacy sysfs interface uses pkey for deletion it cannot
* support more than one interface with the same pkey, it creates
* ambiguity. The RTNL interface deletes using the netdev so it does
* not have a problem to support duplicated pkeys.
*/
if (priv->child_type != IPOIB_LEGACY_CHILD)
return true;
/*
* First ensure this isn't a duplicate. We check the parent device and
* then all of the legacy child interfaces to make sure the Pkey
* doesn't match.
*/
if (ppriv->pkey == priv->pkey)
return false;
list_for_each_entry(tpriv, &ppriv->child_intfs, list) {
if (tpriv->pkey == priv->pkey &&
tpriv->child_type == IPOIB_LEGACY_CHILD)
return false;
}
return true;
}
/*
* NOTE: If this function fails then the priv->dev will remain valid, however
* priv will have been freed and must not be touched by caller in the error
* case.
*
* If (ndev->reg_state == NETREG_UNINITIALIZED) then it is up to the caller to
* free the net_device (just as rtnl_newlink does) otherwise the net_device
* will be freed when the rtnl is unlocked.
*/
int __ipoib_vlan_add(struct ipoib_dev_priv *ppriv, struct ipoib_dev_priv *priv,
u16 pkey, int type)
{
struct net_device *ndev = priv->dev;
int result;
struct rdma_netdev *rn = netdev_priv(ndev);
ASSERT_RTNL();
/*
* We do not need to touch priv if register_netdevice fails, so just
* always use this flow.
*/
ndev->priv_destructor = ipoib_intf_free;
/*
* Racing with unregister of the parent must be prevented by the
* caller.
*/
WARN_ON(ppriv->dev->reg_state != NETREG_REGISTERED);
if (pkey == 0 || pkey == 0x8000) {
result = -EINVAL;
goto out_early;
}
rn->mtu = priv->mcast_mtu;
priv->parent = ppriv->dev;
priv->pkey = pkey;
priv->child_type = type;
if (!is_child_unique(ppriv, priv)) {
result = -ENOTUNIQ;
goto out_early;
}
result = register_netdevice(ndev);
if (result) {
ipoib_warn(priv, "failed to initialize; error %i", result);
/*
* register_netdevice sometimes calls priv_destructor,
* sometimes not. Make sure it was done.
*/
goto out_early;
}
/* RTNL childs don't need proprietary sysfs entries */
if (type == IPOIB_LEGACY_CHILD) {
if (ipoib_cm_add_mode_attr(ndev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(ndev))
goto sysfs_failed;
if (ipoib_add_umcast_attr(ndev))
goto sysfs_failed;
if (device_create_file(&ndev->dev, &dev_attr_parent))
goto sysfs_failed;
}
return 0;
sysfs_failed:
unregister_netdevice(priv->dev);
return -ENOMEM;
out_early:
if (ndev->priv_destructor)
ndev->priv_destructor(ndev);
return result;
}
int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv;
char intf_name[IFNAMSIZ];
struct net_device *ndev;
int result;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (!rtnl_trylock())
return restart_syscall();
if (pdev->reg_state != NETREG_REGISTERED) {
rtnl_unlock();
return -EPERM;
}
ppriv = ipoib_priv(pdev);
snprintf(intf_name, sizeof(intf_name), "%s.%04x",
ppriv->dev->name, pkey);
ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
if (IS_ERR(ndev)) {
result = PTR_ERR(ndev);
goto out;
}
priv = ipoib_priv(ndev);
ndev->rtnl_link_ops = ipoib_get_link_ops();
result = __ipoib_vlan_add(ppriv, priv, pkey, IPOIB_LEGACY_CHILD);
if (result && ndev->reg_state == NETREG_UNINITIALIZED)
free_netdev(ndev);
out:
rtnl_unlock();
return result;
}
struct ipoib_vlan_delete_work {
struct work_struct work;
struct net_device *dev;
};
/*
* sysfs callbacks of a netdevice cannot obtain the rtnl lock as
* unregister_netdev ultimately deletes the sysfs files while holding the rtnl
* lock. This deadlocks the system.
*
* A callback can use rtnl_trylock to avoid the deadlock but it cannot call
* unregister_netdev as that internally takes and releases the rtnl_lock. So
* instead we find the netdev to unregister and then do the actual unregister
* from the global work queue where we can obtain the rtnl_lock safely.
*/
static void ipoib_vlan_delete_task(struct work_struct *work)
{
struct ipoib_vlan_delete_work *pwork =
container_of(work, struct ipoib_vlan_delete_work, work);
struct net_device *dev = pwork->dev;
rtnl_lock();
/* Unregistering tasks can race with another task or parent removal */
if (dev->reg_state == NETREG_REGISTERED) {
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
ipoib_dbg(ppriv, "delete child vlan %s\n", dev->name);
unregister_netdevice(dev);
}
rtnl_unlock();
kfree(pwork);
}
int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
{
struct ipoib_dev_priv *ppriv, *priv, *tpriv;
int rc;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (!rtnl_trylock())
return restart_syscall();
if (pdev->reg_state != NETREG_REGISTERED) {
rtnl_unlock();
return -EPERM;
}
ppriv = ipoib_priv(pdev);
rc = -ENODEV;
list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
if (priv->pkey == pkey &&
priv->child_type == IPOIB_LEGACY_CHILD) {
struct ipoib_vlan_delete_work *work;
work = kmalloc(sizeof(*work), GFP_KERNEL);
if (!work) {
rc = -ENOMEM;
goto out;
}
down_write(&ppriv->vlan_rwsem);
list_del_init(&priv->list);
up_write(&ppriv->vlan_rwsem);
work->dev = priv->dev;
INIT_WORK(&work->work, ipoib_vlan_delete_task);
queue_work(ipoib_workqueue, &work->work);
rc = 0;
break;
}
}
out:
rtnl_unlock();
return rc;
}
| linux-master | drivers/infiniband/ulp/ipoib/ipoib_vlan.c |
/*
* Copyright (c) 2012 Mellanox Technologies. - All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/netdevice.h>
#include <linux/if_arp.h> /* For ARPHRD_xxx */
#include <net/rtnetlink.h>
#include "ipoib.h"
static const struct nla_policy ipoib_policy[IFLA_IPOIB_MAX + 1] = {
[IFLA_IPOIB_PKEY] = { .type = NLA_U16 },
[IFLA_IPOIB_MODE] = { .type = NLA_U16 },
[IFLA_IPOIB_UMCAST] = { .type = NLA_U16 },
};
static unsigned int ipoib_get_max_num_queues(void)
{
return min_t(unsigned int, num_possible_cpus(), 128);
}
static int ipoib_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
u16 val;
if (nla_put_u16(skb, IFLA_IPOIB_PKEY, priv->pkey))
goto nla_put_failure;
val = test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
if (nla_put_u16(skb, IFLA_IPOIB_MODE, val))
goto nla_put_failure;
val = test_bit(IPOIB_FLAG_UMCAST, &priv->flags);
if (nla_put_u16(skb, IFLA_IPOIB_UMCAST, val))
goto nla_put_failure;
return 0;
nla_put_failure:
return -EMSGSIZE;
}
static int ipoib_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
u16 mode, umcast;
int ret = 0;
if (data[IFLA_IPOIB_MODE]) {
mode = nla_get_u16(data[IFLA_IPOIB_MODE]);
if (mode == IPOIB_MODE_DATAGRAM)
ret = ipoib_set_mode(dev, "datagram\n");
else if (mode == IPOIB_MODE_CONNECTED)
ret = ipoib_set_mode(dev, "connected\n");
else
ret = -EINVAL;
if (ret < 0)
goto out_err;
}
if (data[IFLA_IPOIB_UMCAST]) {
umcast = nla_get_u16(data[IFLA_IPOIB_UMCAST]);
ipoib_set_umcast(dev, umcast);
}
out_err:
return ret;
}
static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct net_device *pdev;
struct ipoib_dev_priv *ppriv;
u16 child_pkey;
int err;
if (!tb[IFLA_LINK])
return -EINVAL;
pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (!pdev || pdev->type != ARPHRD_INFINIBAND)
return -ENODEV;
ppriv = ipoib_priv(pdev);
if (test_bit(IPOIB_FLAG_SUBINTERFACE, &ppriv->flags)) {
ipoib_warn(ppriv, "child creation disallowed for child devices\n");
return -EINVAL;
}
if (!data || !data[IFLA_IPOIB_PKEY]) {
ipoib_dbg(ppriv, "no pkey specified, using parent pkey\n");
child_pkey = ppriv->pkey;
} else
child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
err = ipoib_intf_init(ppriv->ca, ppriv->port, dev->name, dev);
if (err) {
ipoib_warn(ppriv, "failed to initialize pkey device\n");
return err;
}
err = __ipoib_vlan_add(ppriv, ipoib_priv(dev),
child_pkey, IPOIB_RTNL_CHILD);
if (err)
return err;
if (data) {
err = ipoib_changelink(dev, tb, data, extack);
if (err) {
unregister_netdevice(dev);
return err;
}
}
return 0;
}
static void ipoib_del_child_link(struct net_device *dev, struct list_head *head)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (!priv->parent)
return;
unregister_netdevice_queue(dev, head);
}
static size_t ipoib_get_size(const struct net_device *dev)
{
return nla_total_size(2) + /* IFLA_IPOIB_PKEY */
nla_total_size(2) + /* IFLA_IPOIB_MODE */
nla_total_size(2); /* IFLA_IPOIB_UMCAST */
}
static struct rtnl_link_ops ipoib_link_ops __read_mostly = {
.kind = "ipoib",
.netns_refund = true,
.maxtype = IFLA_IPOIB_MAX,
.policy = ipoib_policy,
.priv_size = sizeof(struct ipoib_dev_priv),
.setup = ipoib_setup_common,
.newlink = ipoib_new_child_link,
.dellink = ipoib_del_child_link,
.changelink = ipoib_changelink,
.get_size = ipoib_get_size,
.fill_info = ipoib_fill_info,
.get_num_rx_queues = ipoib_get_max_num_queues,
.get_num_tx_queues = ipoib_get_max_num_queues,
};
struct rtnl_link_ops *ipoib_get_link_ops(void)
{
return &ipoib_link_ops;
}
int __init ipoib_netlink_init(void)
{
return rtnl_link_register(&ipoib_link_ops);
}
void __exit ipoib_netlink_fini(void)
{
rtnl_link_unregister(&ipoib_link_ops);
}
MODULE_ALIAS_RTNL_LINK("ipoib");
| linux-master | drivers/infiniband/ulp/ipoib/ipoib_netlink.c |
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/slab.h>
#include "ipoib.h"
int ipoib_mcast_attach(struct net_device *dev, struct ib_device *hca,
union ib_gid *mgid, u16 mlid, int set_qkey, u32 qkey)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_attr *qp_attr = NULL;
int ret;
u16 pkey_index;
if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) {
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
ret = -ENXIO;
goto out;
}
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
if (set_qkey) {
ret = -ENOMEM;
qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
if (!qp_attr)
goto out;
/* set correct QKey for QP */
qp_attr->qkey = qkey;
ret = ib_modify_qp(priv->qp, qp_attr, IB_QP_QKEY);
if (ret) {
ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret);
goto out;
}
}
/* attach QP to multicast group */
ret = ib_attach_mcast(priv->qp, mgid, mlid);
if (ret)
ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret);
out:
kfree(qp_attr);
return ret;
}
int ipoib_mcast_detach(struct net_device *dev, struct ib_device *hca,
union ib_gid *mgid, u16 mlid)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret;
ret = ib_detach_mcast(priv->qp, mgid, mlid);
return ret;
}
int ipoib_init_qp(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret;
struct ib_qp_attr qp_attr;
int attr_mask;
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return -1;
qp_attr.qp_state = IB_QPS_INIT;
qp_attr.qkey = 0;
qp_attr.port_num = priv->port;
qp_attr.pkey_index = priv->pkey_index;
attr_mask =
IB_QP_QKEY |
IB_QP_PORT |
IB_QP_PKEY_INDEX |
IB_QP_STATE;
ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to init, ret = %d\n", ret);
goto out_fail;
}
qp_attr.qp_state = IB_QPS_RTR;
/* Can't set this in a INIT->RTR transition */
attr_mask &= ~IB_QP_PORT;
ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTR, ret = %d\n", ret);
goto out_fail;
}
qp_attr.qp_state = IB_QPS_RTS;
qp_attr.sq_psn = 0;
attr_mask |= IB_QP_SQ_PSN;
attr_mask &= ~IB_QP_PKEY_INDEX;
ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTS, ret = %d\n", ret);
goto out_fail;
}
return 0;
out_fail:
qp_attr.qp_state = IB_QPS_RESET;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to RESET state\n");
return ret;
}
int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_init_attr init_attr = {
.cap = {
.max_send_wr = ipoib_sendq_size,
.max_recv_wr = ipoib_recvq_size,
.max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
MAX_SKB_FRAGS + 1),
.max_recv_sge = IPOIB_UD_RX_SG
},
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_UD
};
struct ib_cq_init_attr cq_attr = {};
int ret, size, req_vec;
int i;
static atomic_t counter;
size = ipoib_recvq_size + 1;
ret = ipoib_cm_dev_init(dev);
if (!ret) {
size += ipoib_sendq_size;
if (ipoib_cm_has_srq(dev))
size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */
else
size += ipoib_recvq_size * ipoib_max_conn_qp;
} else
if (ret != -EOPNOTSUPP)
return ret;
req_vec = atomic_inc_return(&counter) * 2;
cq_attr.cqe = size;
cq_attr.comp_vector = req_vec % priv->ca->num_comp_vectors;
priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL,
priv, &cq_attr);
if (IS_ERR(priv->recv_cq)) {
pr_warn("%s: failed to create receive CQ\n", ca->name);
goto out_cm_dev_cleanup;
}
cq_attr.cqe = ipoib_sendq_size;
cq_attr.comp_vector = (req_vec + 1) % priv->ca->num_comp_vectors;
priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL,
priv, &cq_attr);
if (IS_ERR(priv->send_cq)) {
pr_warn("%s: failed to create send CQ\n", ca->name);
goto out_free_recv_cq;
}
if (ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP))
goto out_free_send_cq;
init_attr.send_cq = priv->send_cq;
init_attr.recv_cq = priv->recv_cq;
if (priv->kernel_caps & IBK_UD_TSO)
init_attr.create_flags |= IB_QP_CREATE_IPOIB_UD_LSO;
if (priv->kernel_caps & IBK_BLOCK_MULTICAST_LOOPBACK)
init_attr.create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
if (priv->hca_caps & IB_DEVICE_MANAGED_FLOW_STEERING)
init_attr.create_flags |= IB_QP_CREATE_NETIF_QP;
if (priv->kernel_caps & IBK_RDMA_NETDEV_OPA)
init_attr.create_flags |= IB_QP_CREATE_NETDEV_USE;
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
pr_warn("%s: failed to create QP\n", ca->name);
goto out_free_send_cq;
}
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
goto out_free_send_cq;
for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
priv->tx_sge[i].lkey = priv->pd->local_dma_lkey;
priv->tx_wr.wr.opcode = IB_WR_SEND;
priv->tx_wr.wr.sg_list = priv->tx_sge;
priv->tx_wr.wr.send_flags = IB_SEND_SIGNALED;
priv->rx_sge[0].lkey = priv->pd->local_dma_lkey;
priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
priv->rx_wr.num_sge = 1;
priv->rx_wr.next = NULL;
priv->rx_wr.sg_list = priv->rx_sge;
if (init_attr.cap.max_send_sge > 1)
dev->features |= NETIF_F_SG;
priv->max_send_sge = init_attr.cap.max_send_sge;
return 0;
out_free_send_cq:
ib_destroy_cq(priv->send_cq);
out_free_recv_cq:
ib_destroy_cq(priv->recv_cq);
out_cm_dev_cleanup:
ipoib_cm_dev_cleanup(dev);
return -ENODEV;
}
void ipoib_transport_dev_cleanup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (priv->qp) {
if (ib_destroy_qp(priv->qp))
ipoib_warn(priv, "ib_qp_destroy failed\n");
priv->qp = NULL;
}
ib_destroy_cq(priv->send_cq);
ib_destroy_cq(priv->recv_cq);
}
void ipoib_event(struct ib_event_handler *handler,
struct ib_event *record)
{
struct ipoib_dev_priv *priv =
container_of(handler, struct ipoib_dev_priv, event_handler);
if (record->element.port_num != priv->port)
return;
ipoib_dbg(priv, "Event %d on device %s port %d\n", record->event,
dev_name(&record->device->dev), record->element.port_num);
if (record->event == IB_EVENT_CLIENT_REREGISTER) {
queue_work(ipoib_workqueue, &priv->flush_light);
} else if (record->event == IB_EVENT_PORT_ERR ||
record->event == IB_EVENT_PORT_ACTIVE ||
record->event == IB_EVENT_LID_CHANGE) {
queue_work(ipoib_workqueue, &priv->flush_normal);
} else if (record->event == IB_EVENT_PKEY_CHANGE) {
queue_work(ipoib_workqueue, &priv->flush_heavy);
} else if (record->event == IB_EVENT_GID_CHANGE &&
!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
queue_work(ipoib_workqueue, &priv->flush_light);
}
}
| linux-master | drivers/infiniband/ulp/ipoib/ipoib_verbs.c |
/*
* Copyright (c) 2006 Mellanox Technologies. All rights reserved
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_cm.h>
#include <net/dst.h>
#include <net/icmp.h>
#include <linux/icmpv6.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/moduleparam.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include "ipoib.h"
int ipoib_max_conn_qp = 128;
module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
MODULE_PARM_DESC(max_nonsrq_conn_qp,
"Max number of connected-mode QPs per interface "
"(applied only if shared receive queue is not available)");
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
static int data_debug_level;
module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
MODULE_PARM_DESC(cm_data_debug_level,
"Enable data path debug tracing for connected mode if > 0");
#endif
#define IPOIB_CM_IETF_ID 0x1000000000000000ULL
#define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
#define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
#define IPOIB_CM_RX_UPDATE_MASK (0x3)
#define IPOIB_CM_RX_RESERVE (ALIGN(IPOIB_HARD_LEN, 16) - IPOIB_ENCAP_LEN)
static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
#define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
static struct ib_send_wr ipoib_cm_rx_drain_wr = {
.opcode = IB_WR_SEND,
};
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
const struct ib_cm_event *event);
static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
u64 mapping[IPOIB_CM_RX_SG])
{
int i;
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < frags; ++i)
ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
}
static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int i, ret;
priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
for (i = 0; i < priv->cm.num_frags; ++i)
priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, NULL);
if (unlikely(ret)) {
ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
priv->cm.srq_ring[id].mapping);
dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
priv->cm.srq_ring[id].skb = NULL;
}
return ret;
}
static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
struct ipoib_cm_rx *rx,
struct ib_recv_wr *wr,
struct ib_sge *sge, int id)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int i, ret;
wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
for (i = 0; i < IPOIB_CM_RX_SG; ++i)
sge[i].addr = rx->rx_ring[id].mapping[i];
ret = ib_post_recv(rx->qp, wr, NULL);
if (unlikely(ret)) {
ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
rx->rx_ring[id].mapping);
dev_kfree_skb_any(rx->rx_ring[id].skb);
rx->rx_ring[id].skb = NULL;
}
return ret;
}
static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring,
int id, int frags,
u64 mapping[IPOIB_CM_RX_SG],
gfp_t gfp)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct sk_buff *skb;
int i;
skb = dev_alloc_skb(ALIGN(IPOIB_CM_HEAD_SIZE + IPOIB_PSEUDO_LEN, 16));
if (unlikely(!skb))
return NULL;
/*
* IPoIB adds a IPOIB_ENCAP_LEN byte header, this will align the
* IP header to a multiple of 16.
*/
skb_reserve(skb, IPOIB_CM_RX_RESERVE);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
dev_kfree_skb_any(skb);
return NULL;
}
for (i = 0; i < frags; i++) {
struct page *page = alloc_page(gfp);
if (!page)
goto partial_error;
skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
mapping[i + 1] = ib_dma_map_page(priv->ca, page,
0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
goto partial_error;
}
rx_ring[id].skb = skb;
return skb;
partial_error:
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
for (; i > 0; --i)
ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
return NULL;
}
static void ipoib_cm_free_rx_ring(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int i;
for (i = 0; i < ipoib_recvq_size; ++i)
if (rx_ring[i].skb) {
ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
rx_ring[i].mapping);
dev_kfree_skb_any(rx_ring[i].skb);
}
vfree(rx_ring);
}
static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
{
struct ipoib_cm_rx *p;
/* We only reserved 1 extra slot in CQ for drain WRs, so
* make sure we have at most 1 outstanding WR. */
if (list_empty(&priv->cm.rx_flush_list) ||
!list_empty(&priv->cm.rx_drain_list))
return;
/*
* QPs on flush list are error state. This way, a "flush
* error" WC will be immediately generated for each WR we post.
*/
p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID;
if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, NULL))
ipoib_warn(priv, "failed to post drain wr\n");
list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
}
static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
{
struct ipoib_cm_rx *p = ctx;
struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
unsigned long flags;
if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
return;
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_flush_list);
p->state = IPOIB_CM_RX_FLUSH;
ipoib_cm_start_rx_drain(priv);
spin_unlock_irqrestore(&priv->lock, flags);
}
static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
struct ipoib_cm_rx *p)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_init_attr attr = {
.event_handler = ipoib_cm_rx_event_handler,
.send_cq = priv->recv_cq, /* For drain WR */
.recv_cq = priv->recv_cq,
.srq = priv->cm.srq,
.cap.max_send_wr = 1, /* For drain WR */
.cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
.qp_context = p,
};
if (!ipoib_cm_has_srq(dev)) {
attr.cap.max_recv_wr = ipoib_recvq_size;
attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
}
return ib_create_qp(priv->pd, &attr);
}
static int ipoib_cm_modify_rx_qp(struct net_device *dev,
struct ib_cm_id *cm_id, struct ib_qp *qp,
unsigned int psn)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
qp_attr.qp_state = IB_QPS_INIT;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
return ret;
}
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
return ret;
}
qp_attr.qp_state = IB_QPS_RTR;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
return ret;
}
qp_attr.rq_psn = psn;
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
return ret;
}
/*
* Current Mellanox HCA firmware won't generate completions
* with error for drain WRs unless the QP has been moved to
* RTS first. This work-around leaves a window where a QP has
* moved to error asynchronously, but this will eventually get
* fixed in firmware, so let's not error out if modify QP
* fails.
*/
qp_attr.qp_state = IB_QPS_RTS;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
return 0;
}
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
return 0;
}
return 0;
}
static void ipoib_cm_init_rx_wr(struct net_device *dev,
struct ib_recv_wr *wr,
struct ib_sge *sge)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int i;
for (i = 0; i < priv->cm.num_frags; ++i)
sge[i].lkey = priv->pd->local_dma_lkey;
sge[0].length = IPOIB_CM_HEAD_SIZE;
for (i = 1; i < priv->cm.num_frags; ++i)
sge[i].length = PAGE_SIZE;
wr->next = NULL;
wr->sg_list = sge;
wr->num_sge = priv->cm.num_frags;
}
static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
struct ipoib_cm_rx *rx)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct {
struct ib_recv_wr wr;
struct ib_sge sge[IPOIB_CM_RX_SG];
} *t;
int ret;
int i;
rx->rx_ring = vzalloc(array_size(ipoib_recvq_size,
sizeof(*rx->rx_ring)));
if (!rx->rx_ring)
return -ENOMEM;
t = kmalloc(sizeof(*t), GFP_KERNEL);
if (!t) {
ret = -ENOMEM;
goto err_free_1;
}
ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
spin_lock_irq(&priv->lock);
if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
spin_unlock_irq(&priv->lock);
ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
ret = -EINVAL;
goto err_free;
} else
++priv->cm.nonsrq_conn_qp;
spin_unlock_irq(&priv->lock);
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
rx->rx_ring[i].mapping,
GFP_KERNEL)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
ret = -ENOMEM;
goto err_count;
}
ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
if (ret) {
ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
"failed for buf %d\n", i);
ret = -EIO;
goto err_count;
}
}
rx->recv_count = ipoib_recvq_size;
kfree(t);
return 0;
err_count:
spin_lock_irq(&priv->lock);
--priv->cm.nonsrq_conn_qp;
spin_unlock_irq(&priv->lock);
err_free:
kfree(t);
err_free_1:
ipoib_cm_free_rx_ring(dev, rx->rx_ring);
return ret;
}
static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
struct ib_qp *qp,
const struct ib_cm_req_event_param *req,
unsigned int psn)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_data data = {};
struct ib_cm_rep_param rep = {};
data.qpn = cpu_to_be32(priv->qp->qp_num);
data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
rep.private_data = &data;
rep.private_data_len = sizeof(data);
rep.flow_control = 0;
rep.rnr_retry_count = req->rnr_retry_count;
rep.srq = ipoib_cm_has_srq(dev);
rep.qp_num = qp->qp_num;
rep.starting_psn = psn;
return ib_send_cm_rep(cm_id, &rep);
}
static int ipoib_cm_req_handler(struct ib_cm_id *cm_id,
const struct ib_cm_event *event)
{
struct net_device *dev = cm_id->context;
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_rx *p;
unsigned int psn;
int ret;
ipoib_dbg(priv, "REQ arrived\n");
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev = dev;
p->id = cm_id;
cm_id->context = p;
p->state = IPOIB_CM_RX_LIVE;
p->jiffies = jiffies;
INIT_LIST_HEAD(&p->list);
p->qp = ipoib_cm_create_rx_qp(dev, p);
if (IS_ERR(p->qp)) {
ret = PTR_ERR(p->qp);
goto err_qp;
}
psn = get_random_u32() & 0xffffff;
ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
if (ret)
goto err_modify;
if (!ipoib_cm_has_srq(dev)) {
ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
if (ret)
goto err_modify;
}
spin_lock_irq(&priv->lock);
queue_delayed_work(priv->wq,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
/* Add this entry to passive ids list head, but do not re-add it
* if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
p->jiffies = jiffies;
if (p->state == IPOIB_CM_RX_LIVE)
list_move(&p->list, &priv->cm.passive_ids);
spin_unlock_irq(&priv->lock);
ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
if (ret) {
ipoib_warn(priv, "failed to send REP: %d\n", ret);
if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
ipoib_warn(priv, "unable to move qp to error state\n");
}
return 0;
err_modify:
ib_destroy_qp(p->qp);
err_qp:
kfree(p);
return ret;
}
static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
const struct ib_cm_event *event)
{
struct ipoib_cm_rx *p;
struct ipoib_dev_priv *priv;
switch (event->event) {
case IB_CM_REQ_RECEIVED:
return ipoib_cm_req_handler(cm_id, event);
case IB_CM_DREQ_RECEIVED:
ib_send_cm_drep(cm_id, NULL, 0);
fallthrough;
case IB_CM_REJ_RECEIVED:
p = cm_id->context;
priv = ipoib_priv(p->dev);
if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
ipoib_warn(priv, "unable to move qp to error state\n");
fallthrough;
default:
return 0;
}
}
/* Adjust length of skb with fragments to match received data */
static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
unsigned int length, struct sk_buff *toskb)
{
int i, num_frags;
unsigned int size;
/* put header into skb */
size = min(length, hdr_space);
skb->tail += size;
skb->len += size;
length -= size;
num_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < num_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (length == 0) {
/* don't need this page */
skb_fill_page_desc(toskb, i, skb_frag_page(frag),
0, PAGE_SIZE);
--skb_shinfo(skb)->nr_frags;
} else {
size = min_t(unsigned int, length, PAGE_SIZE);
skb_frag_size_set(frag, size);
skb->data_len += size;
skb->truesize += size;
skb->len += size;
length -= size;
}
}
}
void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_rx_buf *rx_ring;
unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
struct sk_buff *skb, *newskb;
struct ipoib_cm_rx *p;
unsigned long flags;
u64 mapping[IPOIB_CM_RX_SG];
int frags;
int has_srq;
struct sk_buff *small_skb;
ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_recvq_size)) {
if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
ipoib_cm_start_rx_drain(priv);
queue_work(priv->wq, &priv->cm.rx_reap_task);
spin_unlock_irqrestore(&priv->lock, flags);
} else
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
wr_id, ipoib_recvq_size);
return;
}
p = wc->qp->qp_context;
has_srq = ipoib_cm_has_srq(dev);
rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
skb = rx_ring[wr_id].skb;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
ipoib_dbg(priv,
"cm recv error (status=%d, wrid=%d vend_err %#x)\n",
wc->status, wr_id, wc->vendor_err);
++dev->stats.rx_dropped;
if (has_srq)
goto repost;
else {
if (!--p->recv_count) {
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_reap_list);
spin_unlock_irqrestore(&priv->lock, flags);
queue_work(priv->wq, &priv->cm.rx_reap_task);
}
return;
}
}
if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
spin_lock_irqsave(&priv->lock, flags);
p->jiffies = jiffies;
/* Move this entry to list head, but do not re-add it
* if it has been moved out of list. */
if (p->state == IPOIB_CM_RX_LIVE)
list_move(&p->list, &priv->cm.passive_ids);
spin_unlock_irqrestore(&priv->lock, flags);
}
}
if (wc->byte_len < IPOIB_CM_COPYBREAK) {
int dlen = wc->byte_len;
small_skb = dev_alloc_skb(dlen + IPOIB_CM_RX_RESERVE);
if (small_skb) {
skb_reserve(small_skb, IPOIB_CM_RX_RESERVE);
ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, small_skb->data, dlen);
ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_put(small_skb, dlen);
skb = small_skb;
goto copied;
}
}
frags = PAGE_ALIGN(wc->byte_len -
min_t(u32, wc->byte_len, IPOIB_CM_HEAD_SIZE)) /
PAGE_SIZE;
newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags,
mapping, GFP_ATOMIC);
if (unlikely(!newskb)) {
/*
* If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer.
*/
ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
++dev->stats.rx_dropped;
goto repost;
}
ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof(*mapping));
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
copied:
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
skb->dev = dev;
/* XXX get correct PACKET_ type here */
skb->pkt_type = PACKET_HOST;
netif_receive_skb(skb);
repost:
if (has_srq) {
if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
"for buf %d\n", wr_id);
} else {
if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
&priv->cm.rx_wr,
priv->cm.rx_sge,
wr_id))) {
--p->recv_count;
ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
"for buf %d\n", wr_id);
}
}
}
static inline int post_send(struct ipoib_dev_priv *priv,
struct ipoib_cm_tx *tx,
unsigned int wr_id,
struct ipoib_tx_buf *tx_req)
{
ipoib_build_sge(priv, tx_req);
priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
return ib_post_send(tx->qp, &priv->tx_wr.wr, NULL);
}
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_tx_buf *tx_req;
int rc;
unsigned int usable_sge = tx->max_send_sge - !!skb_headlen(skb);
if (unlikely(skb->len > tx->mtu)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
skb->len, tx->mtu);
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
return;
}
if (skb_shinfo(skb)->nr_frags > usable_sge) {
if (skb_linearize(skb) < 0) {
ipoib_warn(priv, "skb could not be linearized\n");
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
/* Does skb_linearize return ok without reducing nr_frags? */
if (skb_shinfo(skb)->nr_frags > usable_sge) {
ipoib_warn(priv, "too many frags after skb linearize\n");
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
}
ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
tx->tx_head, skb->len, tx->qp->qp_num);
/*
* We put the skb into the tx_ring _before_ we call post_send()
* because it's entirely possible that the completion handler will
* run before we execute anything after the post_send(). That
* means we have to make sure everything is properly recorded and
* our state is consistent before we call post_send().
*/
tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb;
if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
if ((priv->global_tx_head - priv->global_tx_tail) ==
ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
netif_stop_queue(dev);
}
skb_orphan(skb);
skb_dst_drop(skb);
if (netif_queue_stopped(dev)) {
rc = ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
IB_CQ_REPORT_MISSED_EVENTS);
if (unlikely(rc < 0))
ipoib_warn(priv, "IPoIB/CM:request notify on send CQ failed\n");
else if (rc)
napi_schedule(&priv->send_napi);
}
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), tx_req);
if (unlikely(rc)) {
ipoib_warn(priv, "IPoIB/CM:post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
} else {
netif_trans_update(dev);
++tx->tx_head;
++priv->global_tx_head;
}
}
void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_tx *tx = wc->qp->qp_context;
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
struct ipoib_tx_buf *tx_req;
unsigned long flags;
ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_sendq_size)) {
ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
wr_id, ipoib_sendq_size);
return;
}
tx_req = &tx->tx_ring[wr_id];
ipoib_dma_unmap_tx(priv, tx_req);
/* FIXME: is this right? Shouldn't we only increment on success? */
++dev->stats.tx_packets;
dev->stats.tx_bytes += tx_req->skb->len;
dev_kfree_skb_any(tx_req->skb);
netif_tx_lock(dev);
++tx->tx_tail;
++priv->global_tx_tail;
if (unlikely(netif_queue_stopped(dev) &&
((priv->global_tx_head - priv->global_tx_tail) <=
ipoib_sendq_size >> 1) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_neigh *neigh;
/* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
* so don't make waves.
*/
if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
wc->status == IB_WC_RETRY_EXC_ERR)
ipoib_dbg(priv,
"%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
__func__, wc->status, wr_id, wc->vendor_err);
else
ipoib_warn(priv,
"%s: failed cm send event (status=%d, wrid=%d vend_err %#x)\n",
__func__, wc->status, wr_id, wc->vendor_err);
spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
if (neigh) {
neigh->cm = NULL;
ipoib_neigh_free(neigh);
tx->neigh = NULL;
}
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
queue_work(priv->wq, &priv->cm.reap_task);
}
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
spin_unlock_irqrestore(&priv->lock, flags);
}
netif_tx_unlock(dev);
}
int ipoib_cm_dev_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret;
if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
return 0;
priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
if (IS_ERR(priv->cm.id)) {
pr_warn("%s: failed to create CM ID\n", priv->ca->name);
ret = PTR_ERR(priv->cm.id);
goto err_cm;
}
ret = ib_cm_listen(priv->cm.id,
cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num));
if (ret) {
pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
IPOIB_CM_IETF_ID | priv->qp->qp_num);
goto err_listen;
}
return 0;
err_listen:
ib_destroy_cm_id(priv->cm.id);
err_cm:
priv->cm.id = NULL;
return ret;
}
static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_rx *rx, *n;
LIST_HEAD(list);
spin_lock_irq(&priv->lock);
list_splice_init(&priv->cm.rx_reap_list, &list);
spin_unlock_irq(&priv->lock);
list_for_each_entry_safe(rx, n, &list, list) {
ib_destroy_cm_id(rx->id);
ib_destroy_qp(rx->qp);
if (!ipoib_cm_has_srq(dev)) {
ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
spin_lock_irq(&priv->lock);
--priv->cm.nonsrq_conn_qp;
spin_unlock_irq(&priv->lock);
}
kfree(rx);
}
}
void ipoib_cm_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_rx *p;
unsigned long begin;
int ret;
if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
return;
ib_destroy_cm_id(priv->cm.id);
priv->cm.id = NULL;
spin_lock_irq(&priv->lock);
while (!list_empty(&priv->cm.passive_ids)) {
p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
list_move(&p->list, &priv->cm.rx_error_list);
p->state = IPOIB_CM_RX_ERROR;
spin_unlock_irq(&priv->lock);
ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
if (ret)
ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
spin_lock_irq(&priv->lock);
}
/* Wait for all RX to be drained */
begin = jiffies;
while (!list_empty(&priv->cm.rx_error_list) ||
!list_empty(&priv->cm.rx_flush_list) ||
!list_empty(&priv->cm.rx_drain_list)) {
if (time_after(jiffies, begin + 5 * HZ)) {
ipoib_warn(priv, "RX drain timing out\n");
/*
* assume the HW is wedged and just free up everything.
*/
list_splice_init(&priv->cm.rx_flush_list,
&priv->cm.rx_reap_list);
list_splice_init(&priv->cm.rx_error_list,
&priv->cm.rx_reap_list);
list_splice_init(&priv->cm.rx_drain_list,
&priv->cm.rx_reap_list);
break;
}
spin_unlock_irq(&priv->lock);
usleep_range(1000, 2000);
ipoib_drain_cq(dev);
spin_lock_irq(&priv->lock);
}
spin_unlock_irq(&priv->lock);
ipoib_cm_free_rx_reap_list(dev);
cancel_delayed_work(&priv->cm.stale_task);
}
static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id,
const struct ib_cm_event *event)
{
struct ipoib_cm_tx *p = cm_id->context;
struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
struct ipoib_cm_data *data = event->private_data;
struct sk_buff_head skqueue;
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
struct sk_buff *skb;
p->mtu = be32_to_cpu(data->mtu);
if (p->mtu <= IPOIB_ENCAP_LEN) {
ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
p->mtu, IPOIB_ENCAP_LEN);
return -EINVAL;
}
qp_attr.qp_state = IB_QPS_RTR;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
return ret;
}
qp_attr.rq_psn = 0 /* FIXME */;
ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
return ret;
}
qp_attr.qp_state = IB_QPS_RTS;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
return ret;
}
ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
return ret;
}
skb_queue_head_init(&skqueue);
netif_tx_lock_bh(p->dev);
spin_lock_irq(&priv->lock);
set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
if (p->neigh)
while ((skb = __skb_dequeue(&p->neigh->queue)))
__skb_queue_tail(&skqueue, skb);
spin_unlock_irq(&priv->lock);
netif_tx_unlock_bh(p->dev);
while ((skb = __skb_dequeue(&skqueue))) {
skb->dev = p->dev;
ret = dev_queue_xmit(skb);
if (ret)
ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
__func__, ret);
}
ret = ib_send_cm_rtu(cm_id, NULL, 0);
if (ret) {
ipoib_warn(priv, "failed to send RTU: %d\n", ret);
return ret;
}
return 0;
}
static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_init_attr attr = {
.send_cq = priv->send_cq,
.recv_cq = priv->recv_cq,
.srq = priv->cm.srq,
.cap.max_send_wr = ipoib_sendq_size,
.cap.max_send_sge = 1,
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
.qp_context = tx,
.create_flags = 0
};
struct ib_qp *tx_qp;
if (dev->features & NETIF_F_SG)
attr.cap.max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
MAX_SKB_FRAGS + 1);
tx_qp = ib_create_qp(priv->pd, &attr);
tx->max_send_sge = attr.cap.max_send_sge;
return tx_qp;
}
static int ipoib_cm_send_req(struct net_device *dev,
struct ib_cm_id *id, struct ib_qp *qp,
u32 qpn,
struct sa_path_rec *pathrec)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_data data = {};
struct ib_cm_req_param req = {};
data.qpn = cpu_to_be32(priv->qp->qp_num);
data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
req.primary_path = pathrec;
req.alternate_path = NULL;
req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
req.qp_num = qp->qp_num;
req.qp_type = qp->qp_type;
req.private_data = &data;
req.private_data_len = sizeof(data);
req.flow_control = 0;
req.starting_psn = 0; /* FIXME */
/*
* Pick some arbitrary defaults here; we could make these
* module parameters if anyone cared about setting them.
*/
req.responder_resources = 4;
req.remote_cm_response_timeout = 20;
req.local_cm_response_timeout = 20;
req.retry_count = 0; /* RFC draft warns against retries */
req.rnr_retry_count = 0; /* RFC draft warns against retries */
req.max_cm_retries = 15;
req.srq = ipoib_cm_has_srq(dev);
return ib_send_cm_req(id, &req);
}
static int ipoib_cm_modify_tx_init(struct net_device *dev,
struct ib_cm_id *cm_id, struct ib_qp *qp)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
qp_attr.pkey_index = priv->pkey_index;
qp_attr.qp_state = IB_QPS_INIT;
qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
qp_attr.port_num = priv->port;
qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
return ret;
}
return 0;
}
static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
struct sa_path_rec *pathrec)
{
struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
unsigned int noio_flag;
int ret;
noio_flag = memalloc_noio_save();
p->tx_ring = vzalloc(array_size(ipoib_sendq_size, sizeof(*p->tx_ring)));
if (!p->tx_ring) {
memalloc_noio_restore(noio_flag);
ret = -ENOMEM;
goto err_tx;
}
p->qp = ipoib_cm_create_tx_qp(p->dev, p);
memalloc_noio_restore(noio_flag);
if (IS_ERR(p->qp)) {
ret = PTR_ERR(p->qp);
ipoib_warn(priv, "failed to create tx qp: %d\n", ret);
goto err_qp;
}
p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
if (IS_ERR(p->id)) {
ret = PTR_ERR(p->id);
ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
goto err_id;
}
ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
if (ret) {
ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
goto err_modify_send;
}
ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
if (ret) {
ipoib_warn(priv, "failed to send cm req: %d\n", ret);
goto err_modify_send;
}
ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
p->qp->qp_num, pathrec->dgid.raw, qpn);
return 0;
err_modify_send:
ib_destroy_cm_id(p->id);
err_id:
p->id = NULL;
ib_destroy_qp(p->qp);
err_qp:
p->qp = NULL;
vfree(p->tx_ring);
err_tx:
return ret;
}
static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
{
struct ipoib_dev_priv *priv = ipoib_priv(p->dev);
struct ipoib_tx_buf *tx_req;
unsigned long begin;
ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
if (p->id)
ib_destroy_cm_id(p->id);
if (p->tx_ring) {
/* Wait for all sends to complete */
begin = jiffies;
while ((int) p->tx_tail - (int) p->tx_head < 0) {
if (time_after(jiffies, begin + 5 * HZ)) {
ipoib_warn(priv, "timing out; %d sends not completed\n",
p->tx_head - p->tx_tail);
goto timeout;
}
usleep_range(1000, 2000);
}
}
timeout:
while ((int) p->tx_tail - (int) p->tx_head < 0) {
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
netif_tx_lock_bh(p->dev);
++p->tx_tail;
++priv->global_tx_tail;
if (unlikely((priv->global_tx_head - priv->global_tx_tail) <=
ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev);
netif_tx_unlock_bh(p->dev);
}
if (p->qp)
ib_destroy_qp(p->qp);
vfree(p->tx_ring);
kfree(p);
}
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
const struct ib_cm_event *event)
{
struct ipoib_cm_tx *tx = cm_id->context;
struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
struct net_device *dev = priv->dev;
struct ipoib_neigh *neigh;
unsigned long flags;
int ret;
switch (event->event) {
case IB_CM_DREQ_RECEIVED:
ipoib_dbg(priv, "DREQ received.\n");
ib_send_cm_drep(cm_id, NULL, 0);
break;
case IB_CM_REP_RECEIVED:
ipoib_dbg(priv, "REP received.\n");
ret = ipoib_cm_rep_handler(cm_id, event);
if (ret)
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
break;
case IB_CM_REQ_ERROR:
case IB_CM_REJ_RECEIVED:
case IB_CM_TIMEWAIT_EXIT:
ipoib_dbg(priv, "CM error %d.\n", event->event);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
if (neigh) {
neigh->cm = NULL;
ipoib_neigh_free(neigh);
tx->neigh = NULL;
}
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
queue_work(priv->wq, &priv->cm.reap_task);
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
break;
default:
break;
}
return 0;
}
struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
struct ipoib_neigh *neigh)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_cm_tx *tx;
tx = kzalloc(sizeof(*tx), GFP_ATOMIC);
if (!tx)
return NULL;
neigh->cm = tx;
tx->neigh = neigh;
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
queue_work(priv->wq, &priv->cm.start_task);
return tx;
}
void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = ipoib_priv(tx->dev);
unsigned long flags;
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
spin_lock_irqsave(&priv->lock, flags);
list_move(&tx->list, &priv->cm.reap_list);
queue_work(priv->wq, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
tx->neigh->daddr + 4);
tx->neigh = NULL;
spin_unlock_irqrestore(&priv->lock, flags);
}
}
#define QPN_AND_OPTIONS_OFFSET 4
static void ipoib_cm_tx_start(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.start_task);
struct net_device *dev = priv->dev;
struct ipoib_neigh *neigh;
struct ipoib_cm_tx *p;
unsigned long flags;
struct ipoib_path *path;
int ret;
struct sa_path_rec pathrec;
u32 qpn;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
while (!list_empty(&priv->cm.start_list)) {
p = list_entry(priv->cm.start_list.next, typeof(*p), list);
list_del_init(&p->list);
neigh = p->neigh;
qpn = IPOIB_QPN(neigh->daddr);
/*
* As long as the search is with these 2 locks,
* path existence indicates its validity.
*/
path = __path_find(dev, neigh->daddr + QPN_AND_OPTIONS_OFFSET);
if (!path) {
pr_info("%s ignore not valid path %pI6\n",
__func__,
neigh->daddr + QPN_AND_OPTIONS_OFFSET);
goto free_neigh;
}
memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ret = ipoib_cm_tx_init(p, qpn, &pathrec);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
if (ret) {
free_neigh:
neigh = p->neigh;
if (neigh) {
neigh->cm = NULL;
ipoib_neigh_free(neigh);
}
list_del(&p->list);
kfree(p);
}
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
}
static void ipoib_cm_tx_reap(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.reap_task);
struct net_device *dev = priv->dev;
struct ipoib_cm_tx *p;
unsigned long flags;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
list_del_init(&p->list);
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ipoib_cm_tx_destroy(p);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
}
static void ipoib_cm_skb_reap(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.skb_task);
struct net_device *dev = priv->dev;
struct sk_buff *skb;
unsigned long flags;
unsigned int mtu = priv->mcast_mtu;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
if (skb->protocol == htons(ETH_P_IP)) {
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
}
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) {
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
}
#endif
dev_kfree_skb_any(skb);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
}
void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
unsigned int mtu)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int e = skb_queue_empty(&priv->cm.skb_queue);
skb_dst_update_pmtu(skb, mtu);
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
queue_work(priv->wq, &priv->cm.skb_task);
}
static void ipoib_cm_rx_reap(struct work_struct *work)
{
ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
cm.rx_reap_task)->dev);
}
static void ipoib_cm_stale_task(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.stale_task.work);
struct ipoib_cm_rx *p;
int ret;
spin_lock_irq(&priv->lock);
while (!list_empty(&priv->cm.passive_ids)) {
/* List is sorted by LRU, start from tail,
* stop when we see a recently used entry */
p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
break;
list_move(&p->list, &priv->cm.rx_error_list);
p->state = IPOIB_CM_RX_ERROR;
spin_unlock_irq(&priv->lock);
ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
if (ret)
ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
spin_lock_irq(&priv->lock);
}
if (!list_empty(&priv->cm.passive_ids))
queue_delayed_work(priv->wq,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock);
}
static ssize_t mode_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_device *dev = to_net_dev(d);
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
return sysfs_emit(buf, "connected\n");
else
return sysfs_emit(buf, "datagram\n");
}
static ssize_t mode_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct net_device *dev = to_net_dev(d);
int ret;
if (!rtnl_trylock()) {
return restart_syscall();
}
if (dev->reg_state != NETREG_REGISTERED) {
rtnl_unlock();
return -EPERM;
}
ret = ipoib_set_mode(dev, buf);
/* The assumption is that the function ipoib_set_mode returned
* with the rtnl held by it, if not the value -EBUSY returned,
* then no need to rtnl_unlock
*/
if (ret != -EBUSY)
rtnl_unlock();
return (!ret || ret == -EBUSY) ? count : ret;
}
static DEVICE_ATTR_RW(mode);
int ipoib_cm_add_mode_attr(struct net_device *dev)
{
return device_create_file(&dev->dev, &dev_attr_mode);
}
static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_srq_init_attr srq_init_attr = {
.srq_type = IB_SRQT_BASIC,
.attr = {
.max_wr = ipoib_recvq_size,
.max_sge = max_sge
}
};
priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
if (IS_ERR(priv->cm.srq)) {
if (PTR_ERR(priv->cm.srq) != -EOPNOTSUPP)
pr_warn("%s: failed to allocate SRQ, error %ld\n",
priv->ca->name, PTR_ERR(priv->cm.srq));
priv->cm.srq = NULL;
return;
}
priv->cm.srq_ring = vzalloc(array_size(ipoib_recvq_size,
sizeof(*priv->cm.srq_ring)));
if (!priv->cm.srq_ring) {
ib_destroy_srq(priv->cm.srq);
priv->cm.srq = NULL;
return;
}
}
int ipoib_cm_dev_init(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int max_srq_sge, i;
u8 addr;
INIT_LIST_HEAD(&priv->cm.passive_ids);
INIT_LIST_HEAD(&priv->cm.reap_list);
INIT_LIST_HEAD(&priv->cm.start_list);
INIT_LIST_HEAD(&priv->cm.rx_error_list);
INIT_LIST_HEAD(&priv->cm.rx_flush_list);
INIT_LIST_HEAD(&priv->cm.rx_drain_list);
INIT_LIST_HEAD(&priv->cm.rx_reap_list);
INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
skb_queue_head_init(&priv->cm.skb_queue);
ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge);
max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge);
ipoib_cm_create_srq(dev, max_srq_sge);
if (ipoib_cm_has_srq(dev)) {
priv->cm.max_cm_mtu = max_srq_sge * PAGE_SIZE - 0x10;
priv->cm.num_frags = max_srq_sge;
ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
priv->cm.max_cm_mtu, priv->cm.num_frags);
} else {
priv->cm.max_cm_mtu = IPOIB_CM_MTU;
priv->cm.num_frags = IPOIB_CM_RX_SG;
}
ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
if (ipoib_cm_has_srq(dev)) {
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
priv->cm.num_frags - 1,
priv->cm.srq_ring[i].mapping,
GFP_KERNEL)) {
ipoib_warn(priv, "failed to allocate "
"receive buffer %d\n", i);
ipoib_cm_dev_cleanup(dev);
return -ENOMEM;
}
if (ipoib_cm_post_receive_srq(dev, i)) {
ipoib_warn(priv, "ipoib_cm_post_receive_srq "
"failed for buf %d\n", i);
ipoib_cm_dev_cleanup(dev);
return -EIO;
}
}
}
addr = IPOIB_FLAGS_RC;
dev_addr_mod(dev, 0, &addr, 1);
return 0;
}
void ipoib_cm_dev_cleanup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (!priv->cm.srq)
return;
ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
ib_destroy_srq(priv->cm.srq);
priv->cm.srq = NULL;
if (!priv->cm.srq_ring)
return;
ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
priv->cm.srq_ring = NULL;
}
| linux-master | drivers/infiniband/ulp/ipoib/ipoib_cm.c |
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/moduleparam.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/igmp.h>
#include <linux/inetdevice.h>
#include <linux/delay.h>
#include <linux/completion.h>
#include <linux/slab.h>
#include <net/dst.h>
#include "ipoib.h"
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
static int mcast_debug_level;
module_param(mcast_debug_level, int, 0644);
MODULE_PARM_DESC(mcast_debug_level,
"Enable multicast debug tracing if > 0");
#endif
struct ipoib_mcast_iter {
struct net_device *dev;
union ib_gid mgid;
unsigned long created;
unsigned int queuelen;
unsigned int complete;
unsigned int send_only;
};
/* join state that allows creating mcg with sendonly member request */
#define SENDONLY_FULLMEMBER_JOIN 8
/*
* This should be called with the priv->lock held
*/
static void __ipoib_mcast_schedule_join_thread(struct ipoib_dev_priv *priv,
struct ipoib_mcast *mcast,
bool delay)
{
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return;
/*
* We will be scheduling *something*, so cancel whatever is
* currently scheduled first
*/
cancel_delayed_work(&priv->mcast_task);
if (mcast && delay) {
/*
* We had a failure and want to schedule a retry later
*/
mcast->backoff *= 2;
if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
mcast->delay_until = jiffies + (mcast->backoff * HZ);
/*
* Mark this mcast for its delay, but restart the
* task immediately. The join task will make sure to
* clear out all entries without delays, and then
* schedule itself to run again when the earliest
* delay expires
*/
queue_delayed_work(priv->wq, &priv->mcast_task, 0);
} else if (delay) {
/*
* Special case of retrying after a failure to
* allocate the broadcast multicast group, wait
* 1 second and try again
*/
queue_delayed_work(priv->wq, &priv->mcast_task, HZ);
} else
queue_delayed_work(priv->wq, &priv->mcast_task, 0);
}
static void ipoib_mcast_free(struct ipoib_mcast *mcast)
{
struct net_device *dev = mcast->dev;
int tx_dropped = 0;
ipoib_dbg_mcast(ipoib_priv(dev), "deleting multicast group %pI6\n",
mcast->mcmember.mgid.raw);
/* remove all neigh connected to this mcast */
ipoib_del_neighs_by_gid(dev, mcast->mcmember.mgid.raw);
if (mcast->ah)
ipoib_put_ah(mcast->ah);
while (!skb_queue_empty(&mcast->pkt_queue)) {
++tx_dropped;
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
}
netif_tx_lock_bh(dev);
dev->stats.tx_dropped += tx_dropped;
netif_tx_unlock_bh(dev);
kfree(mcast);
}
static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev)
{
struct ipoib_mcast *mcast;
mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
if (!mcast)
return NULL;
mcast->dev = dev;
mcast->created = jiffies;
mcast->delay_until = jiffies;
mcast->backoff = 1;
INIT_LIST_HEAD(&mcast->list);
INIT_LIST_HEAD(&mcast->neigh_list);
skb_queue_head_init(&mcast->pkt_queue);
return mcast;
}
static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rb_node *n = priv->multicast_tree.rb_node;
while (n) {
struct ipoib_mcast *mcast;
int ret;
mcast = rb_entry(n, struct ipoib_mcast, rb_node);
ret = memcmp(mgid, mcast->mcmember.mgid.raw,
sizeof (union ib_gid));
if (ret < 0)
n = n->rb_left;
else if (ret > 0)
n = n->rb_right;
else
return mcast;
}
return NULL;
}
static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
while (*n) {
struct ipoib_mcast *tmcast;
int ret;
pn = *n;
tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
sizeof (union ib_gid));
if (ret < 0)
n = &pn->rb_left;
else if (ret > 0)
n = &pn->rb_right;
else
return -EEXIST;
}
rb_link_node(&mcast->rb_node, pn, n);
rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
return 0;
}
static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
struct ib_sa_mcmember_rec *mcmember)
{
struct net_device *dev = mcast->dev;
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_ah *ah;
struct rdma_ah_attr av;
int ret;
int set_qkey = 0;
int mtu;
mcast->mcmember = *mcmember;
/* Set the multicast MTU and cached Q_Key before we attach if it's
* the broadcast group.
*/
if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
sizeof (union ib_gid))) {
spin_lock_irq(&priv->lock);
if (!priv->broadcast) {
spin_unlock_irq(&priv->lock);
return -EAGAIN;
}
/*update priv member according to the new mcast*/
priv->broadcast->mcmember.qkey = mcmember->qkey;
priv->broadcast->mcmember.mtu = mcmember->mtu;
priv->broadcast->mcmember.traffic_class = mcmember->traffic_class;
priv->broadcast->mcmember.rate = mcmember->rate;
priv->broadcast->mcmember.sl = mcmember->sl;
priv->broadcast->mcmember.flow_label = mcmember->flow_label;
priv->broadcast->mcmember.hop_limit = mcmember->hop_limit;
/* assume if the admin and the mcast are the same both can be changed */
mtu = rdma_mtu_enum_to_int(priv->ca, priv->port,
priv->broadcast->mcmember.mtu);
if (priv->mcast_mtu == priv->admin_mtu)
priv->admin_mtu = IPOIB_UD_MTU(mtu);
priv->mcast_mtu = IPOIB_UD_MTU(mtu);
rn->mtu = priv->mcast_mtu;
priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
spin_unlock_irq(&priv->lock);
priv->tx_wr.remote_qkey = priv->qkey;
set_qkey = 1;
}
if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
ipoib_warn(priv, "multicast group %pI6 already attached\n",
mcast->mcmember.mgid.raw);
return 0;
}
ret = rn->attach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
be16_to_cpu(mcast->mcmember.mlid),
set_qkey, priv->qkey);
if (ret < 0) {
ipoib_warn(priv, "couldn't attach QP to multicast group %pI6\n",
mcast->mcmember.mgid.raw);
clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
return ret;
}
}
memset(&av, 0, sizeof(av));
av.type = rdma_ah_find_type(priv->ca, priv->port);
rdma_ah_set_dlid(&av, be16_to_cpu(mcast->mcmember.mlid));
rdma_ah_set_port_num(&av, priv->port);
rdma_ah_set_sl(&av, mcast->mcmember.sl);
rdma_ah_set_static_rate(&av, mcast->mcmember.rate);
rdma_ah_set_grh(&av, &mcast->mcmember.mgid,
be32_to_cpu(mcast->mcmember.flow_label),
0, mcast->mcmember.hop_limit,
mcast->mcmember.traffic_class);
ah = ipoib_create_ah(dev, priv->pd, &av);
if (IS_ERR(ah)) {
ipoib_warn(priv, "ib_address_create failed %ld\n",
-PTR_ERR(ah));
/* use original error */
return PTR_ERR(ah);
}
spin_lock_irq(&priv->lock);
mcast->ah = ah;
spin_unlock_irq(&priv->lock);
ipoib_dbg_mcast(priv, "MGID %pI6 AV %p, LID 0x%04x, SL %d\n",
mcast->mcmember.mgid.raw,
mcast->ah->ah,
be16_to_cpu(mcast->mcmember.mlid),
mcast->mcmember.sl);
/* actually send any queued packets */
netif_tx_lock_bh(dev);
while (!skb_queue_empty(&mcast->pkt_queue)) {
struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
netif_tx_unlock_bh(dev);
skb->dev = dev;
ret = dev_queue_xmit(skb);
if (ret)
ipoib_warn(priv, "%s:dev_queue_xmit failed to re-queue packet, ret:%d\n",
__func__, ret);
netif_tx_lock_bh(dev);
}
netif_tx_unlock_bh(dev);
return 0;
}
void ipoib_mcast_carrier_on_task(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
carrier_on_task);
struct ib_port_attr attr;
if (ib_query_port(priv->ca, priv->port, &attr) ||
attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
return;
}
/*
* Take rtnl_lock to avoid racing with ipoib_stop() and
* turning the carrier back on while a device is being
* removed. However, ipoib_stop() will attempt to flush
* the workqueue while holding the rtnl lock, so loop
* on trylock until either we get the lock or we see
* FLAG_OPER_UP go away as that signals that we are bailing
* and can safely ignore the carrier on work.
*/
while (!rtnl_trylock()) {
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return;
else
msleep(20);
}
if (!ipoib_cm_admin_enabled(priv->dev))
dev_set_mtu(priv->dev, min(priv->mcast_mtu, priv->admin_mtu));
netif_carrier_on(priv->dev);
rtnl_unlock();
}
static int ipoib_mcast_join_complete(int status,
struct ib_sa_multicast *multicast)
{
struct ipoib_mcast *mcast = multicast->context;
struct net_device *dev = mcast->dev;
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg_mcast(priv, "%sjoin completion for %pI6 (status %d)\n",
test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ?
"sendonly " : "",
mcast->mcmember.mgid.raw, status);
/* We trap for port events ourselves. */
if (status == -ENETRESET) {
status = 0;
goto out;
}
if (!status)
status = ipoib_mcast_join_finish(mcast, &multicast->rec);
if (!status) {
mcast->backoff = 1;
mcast->delay_until = jiffies;
/*
* Defer carrier on work to priv->wq to avoid a
* deadlock on rtnl_lock here. Requeue our multicast
* work too, which will end up happening right after
* our carrier on task work and will allow us to
* send out all of the non-broadcast joins
*/
if (mcast == priv->broadcast) {
spin_lock_irq(&priv->lock);
queue_work(priv->wq, &priv->carrier_on_task);
__ipoib_mcast_schedule_join_thread(priv, NULL, 0);
goto out_locked;
}
} else {
bool silent_fail =
test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
status == -EINVAL;
if (mcast->logcount < 20) {
if (status == -ETIMEDOUT || status == -EAGAIN ||
silent_fail) {
ipoib_dbg_mcast(priv, "%smulticast join failed for %pI6, status %d\n",
test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
mcast->mcmember.mgid.raw, status);
} else {
ipoib_warn(priv, "%smulticast join failed for %pI6, status %d\n",
test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ? "sendonly " : "",
mcast->mcmember.mgid.raw, status);
}
if (!silent_fail)
mcast->logcount++;
}
if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) &&
mcast->backoff >= 2) {
/*
* We only retry sendonly joins once before we drop
* the packet and quit trying to deal with the
* group. However, we leave the group in the
* mcast list as an unjoined group. If we want to
* try joining again, we simply queue up a packet
* and restart the join thread. The empty queue
* is why the join thread ignores this group.
*/
mcast->backoff = 1;
netif_tx_lock_bh(dev);
while (!skb_queue_empty(&mcast->pkt_queue)) {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
}
netif_tx_unlock_bh(dev);
} else {
spin_lock_irq(&priv->lock);
/* Requeue this join task with a backoff delay */
__ipoib_mcast_schedule_join_thread(priv, mcast, 1);
goto out_locked;
}
}
out:
spin_lock_irq(&priv->lock);
out_locked:
/*
* Make sure to set mcast->mc before we clear the busy flag to avoid
* racing with code that checks for BUSY before checking mcast->mc
*/
if (status)
mcast->mc = NULL;
else
mcast->mc = multicast;
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
spin_unlock_irq(&priv->lock);
complete(&mcast->done);
return status;
}
/*
* Caller must hold 'priv->lock'
*/
static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_sa_multicast *multicast;
struct ib_sa_mcmember_rec rec = {
.join_state = 1
};
ib_sa_comp_mask comp_mask;
int ret = 0;
if (!priv->broadcast ||
!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return -EINVAL;
init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
rec.mgid = mcast->mcmember.mgid;
rec.port_gid = priv->local_gid;
rec.pkey = cpu_to_be16(priv->pkey);
comp_mask =
IB_SA_MCMEMBER_REC_MGID |
IB_SA_MCMEMBER_REC_PORT_GID |
IB_SA_MCMEMBER_REC_PKEY |
IB_SA_MCMEMBER_REC_JOIN_STATE;
if (mcast != priv->broadcast) {
/*
* RFC 4391:
* The MGID MUST use the same P_Key, Q_Key, SL, MTU,
* and HopLimit as those used in the broadcast-GID. The rest
* of attributes SHOULD follow the values used in the
* broadcast-GID as well.
*/
comp_mask |=
IB_SA_MCMEMBER_REC_QKEY |
IB_SA_MCMEMBER_REC_MTU_SELECTOR |
IB_SA_MCMEMBER_REC_MTU |
IB_SA_MCMEMBER_REC_TRAFFIC_CLASS |
IB_SA_MCMEMBER_REC_RATE_SELECTOR |
IB_SA_MCMEMBER_REC_RATE |
IB_SA_MCMEMBER_REC_SL |
IB_SA_MCMEMBER_REC_FLOW_LABEL |
IB_SA_MCMEMBER_REC_HOP_LIMIT;
rec.qkey = priv->broadcast->mcmember.qkey;
rec.mtu_selector = IB_SA_EQ;
rec.mtu = priv->broadcast->mcmember.mtu;
rec.traffic_class = priv->broadcast->mcmember.traffic_class;
rec.rate_selector = IB_SA_EQ;
rec.rate = priv->broadcast->mcmember.rate;
rec.sl = priv->broadcast->mcmember.sl;
rec.flow_label = priv->broadcast->mcmember.flow_label;
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
/*
* Send-only IB Multicast joins work at the core IB layer but
* require specific SM support.
* We can use such joins here only if the current SM supports that feature.
* However, if not, we emulate an Ethernet multicast send,
* which does not require a multicast subscription and will
* still send properly. The most appropriate thing to
* do is to create the group if it doesn't exist as that
* most closely emulates the behavior, from a user space
* application perspective, of Ethernet multicast operation.
*/
if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
rec.join_state = SENDONLY_FULLMEMBER_JOIN;
}
spin_unlock_irq(&priv->lock);
multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
spin_lock_irq(&priv->lock);
if (IS_ERR(multicast)) {
ret = PTR_ERR(multicast);
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
/* Requeue this join task with a backoff delay */
__ipoib_mcast_schedule_join_thread(priv, mcast, 1);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
spin_unlock_irq(&priv->lock);
complete(&mcast->done);
spin_lock_irq(&priv->lock);
}
return 0;
}
void ipoib_mcast_join_task(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, mcast_task.work);
struct net_device *dev = priv->dev;
struct ib_port_attr port_attr;
unsigned long delay_until = 0;
struct ipoib_mcast *mcast = NULL;
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
return;
if (ib_query_port(priv->ca, priv->port, &port_attr)) {
ipoib_dbg(priv, "ib_query_port() failed\n");
return;
}
if (port_attr.state != IB_PORT_ACTIVE) {
ipoib_dbg(priv, "port state is not ACTIVE (state = %d) suspending join task\n",
port_attr.state);
return;
}
priv->local_lid = port_attr.lid;
netif_addr_lock_bh(dev);
if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
netif_addr_unlock_bh(dev);
return;
}
netif_addr_unlock_bh(dev);
spin_lock_irq(&priv->lock);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
goto out;
if (!priv->broadcast) {
struct ipoib_mcast *broadcast;
broadcast = ipoib_mcast_alloc(dev);
if (!broadcast) {
ipoib_warn(priv, "failed to allocate broadcast group\n");
/*
* Restart us after a 1 second delay to retry
* creating our broadcast group and attaching to
* it. Until this succeeds, this ipoib dev is
* completely stalled (multicast wise).
*/
__ipoib_mcast_schedule_join_thread(priv, NULL, 1);
goto out;
}
memcpy(broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
sizeof (union ib_gid));
priv->broadcast = broadcast;
__ipoib_mcast_add(dev, priv->broadcast);
}
if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
if (IS_ERR_OR_NULL(priv->broadcast->mc) &&
!test_bit(IPOIB_MCAST_FLAG_BUSY, &priv->broadcast->flags)) {
mcast = priv->broadcast;
if (mcast->backoff > 1 &&
time_before(jiffies, mcast->delay_until)) {
delay_until = mcast->delay_until;
mcast = NULL;
}
}
goto out;
}
/*
* We'll never get here until the broadcast group is both allocated
* and attached
*/
list_for_each_entry(mcast, &priv->multicast_list, list) {
if (IS_ERR_OR_NULL(mcast->mc) &&
!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags) &&
(!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags) ||
!skb_queue_empty(&mcast->pkt_queue))) {
if (mcast->backoff == 1 ||
time_after_eq(jiffies, mcast->delay_until)) {
/* Found the next unjoined group */
if (ipoib_mcast_join(dev, mcast)) {
spin_unlock_irq(&priv->lock);
return;
}
} else if (!delay_until ||
time_before(mcast->delay_until, delay_until))
delay_until = mcast->delay_until;
}
}
mcast = NULL;
ipoib_dbg_mcast(priv, "successfully started all multicast joins\n");
out:
if (delay_until) {
cancel_delayed_work(&priv->mcast_task);
queue_delayed_work(priv->wq, &priv->mcast_task,
delay_until - jiffies);
}
if (mcast)
ipoib_mcast_join(dev, mcast);
spin_unlock_irq(&priv->lock);
}
void ipoib_mcast_start_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
unsigned long flags;
ipoib_dbg_mcast(priv, "starting multicast thread\n");
spin_lock_irqsave(&priv->lock, flags);
__ipoib_mcast_schedule_join_thread(priv, NULL, 0);
spin_unlock_irqrestore(&priv->lock, flags);
}
void ipoib_mcast_stop_thread(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg_mcast(priv, "stopping multicast thread\n");
cancel_delayed_work_sync(&priv->mcast_task);
}
static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
int ret = 0;
if (test_and_clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
ipoib_warn(priv, "ipoib_mcast_leave on an in-flight join\n");
if (!IS_ERR_OR_NULL(mcast->mc))
ib_sa_free_multicast(mcast->mc);
if (test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
ipoib_dbg_mcast(priv, "leaving MGID %pI6\n",
mcast->mcmember.mgid.raw);
/* Remove ourselves from the multicast group */
ret = rn->detach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
be16_to_cpu(mcast->mcmember.mlid));
if (ret)
ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
} else if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
ipoib_dbg(priv, "leaving with no mcmember but not a "
"SENDONLY join\n");
return 0;
}
/*
* Check if the multicast group is sendonly. If so remove it from the maps
* and add to the remove list
*/
void ipoib_check_and_add_mcast_sendonly(struct ipoib_dev_priv *priv, u8 *mgid,
struct list_head *remove_list)
{
/* Is this multicast ? */
if (*mgid == 0xff) {
struct ipoib_mcast *mcast = __ipoib_mcast_find(priv->dev, mgid);
if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
list_del(&mcast->list);
rb_erase(&mcast->rb_node, &priv->multicast_tree);
list_add_tail(&mcast->list, remove_list);
}
}
}
void ipoib_mcast_remove_list(struct list_head *remove_list)
{
struct ipoib_mcast *mcast, *tmcast;
/*
* make sure the in-flight joins have finished before we attempt
* to leave
*/
list_for_each_entry_safe(mcast, tmcast, remove_list, list)
if (test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags))
wait_for_completion(&mcast->done);
list_for_each_entry_safe(mcast, tmcast, remove_list, list) {
ipoib_mcast_leave(mcast->dev, mcast);
ipoib_mcast_free(mcast);
}
}
void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_mcast *mcast;
unsigned long flags;
void *mgid = daddr + 4;
spin_lock_irqsave(&priv->lock, flags);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) ||
!priv->broadcast ||
!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
goto unlock;
}
mcast = __ipoib_mcast_find(dev, mgid);
if (!mcast || !mcast->ah) {
if (!mcast) {
/* Let's create a new send only group now */
ipoib_dbg_mcast(priv, "setting up send only multicast group for %pI6\n",
mgid);
mcast = ipoib_mcast_alloc(dev);
if (!mcast) {
ipoib_warn(priv, "unable to allocate memory "
"for multicast structure\n");
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
goto unlock;
}
set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
memcpy(mcast->mcmember.mgid.raw, mgid,
sizeof (union ib_gid));
__ipoib_mcast_add(dev, mcast);
list_add_tail(&mcast->list, &priv->multicast_list);
}
if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE) {
/* put pseudoheader back on for next time */
skb_push(skb, sizeof(struct ipoib_pseudo_header));
skb_queue_tail(&mcast->pkt_queue, skb);
} else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
if (!test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
__ipoib_mcast_schedule_join_thread(priv, NULL, 0);
}
} else {
struct ipoib_neigh *neigh;
spin_unlock_irqrestore(&priv->lock, flags);
neigh = ipoib_neigh_get(dev, daddr);
spin_lock_irqsave(&priv->lock, flags);
if (!neigh) {
neigh = ipoib_neigh_alloc(daddr, dev);
/* Make sure that the neigh will be added only
* once to mcast list.
*/
if (neigh && list_empty(&neigh->list)) {
kref_get(&mcast->ah->ref);
neigh->ah = mcast->ah;
neigh->ah->valid = 1;
list_add_tail(&neigh->list, &mcast->neigh_list);
}
}
spin_unlock_irqrestore(&priv->lock, flags);
mcast->ah->last_send = rn->send(dev, skb, mcast->ah->ah,
IB_MULTICAST_QPN);
if (neigh)
ipoib_neigh_put(neigh);
return;
}
unlock:
spin_unlock_irqrestore(&priv->lock, flags);
}
void ipoib_mcast_dev_flush(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
LIST_HEAD(remove_list);
struct ipoib_mcast *mcast, *tmcast;
unsigned long flags;
mutex_lock(&priv->mcast_mutex);
ipoib_dbg_mcast(priv, "flushing multicast list\n");
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
list_del(&mcast->list);
rb_erase(&mcast->rb_node, &priv->multicast_tree);
list_add_tail(&mcast->list, &remove_list);
}
if (priv->broadcast) {
rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
list_add_tail(&priv->broadcast->list, &remove_list);
priv->broadcast = NULL;
}
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_mcast_remove_list(&remove_list);
mutex_unlock(&priv->mcast_mutex);
}
static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
{
/* reserved QPN, prefix, scope */
if (memcmp(addr, broadcast, 6))
return 0;
/* signature lower, pkey */
if (memcmp(addr + 7, broadcast + 7, 3))
return 0;
return 1;
}
void ipoib_mcast_restart_task(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, restart_task);
struct net_device *dev = priv->dev;
struct netdev_hw_addr *ha;
struct ipoib_mcast *mcast, *tmcast;
LIST_HEAD(remove_list);
struct ib_sa_mcmember_rec rec;
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
/*
* shortcut...on shutdown flush is called next, just
* let it do all the work
*/
return;
ipoib_dbg_mcast(priv, "restarting multicast task\n");
netif_addr_lock_bh(dev);
spin_lock_irq(&priv->lock);
/*
* Unfortunately, the networking core only gives us a list of all of
* the multicast hardware addresses. We need to figure out which ones
* are new and which ones have been removed
*/
/* Clear out the found flag */
list_for_each_entry(mcast, &priv->multicast_list, list)
clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
/* Mark all of the entries that are found or don't exist */
netdev_for_each_mc_addr(ha, dev) {
union ib_gid mgid;
if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
continue;
memcpy(mgid.raw, ha->addr + 4, sizeof(mgid));
mcast = __ipoib_mcast_find(dev, &mgid);
if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
struct ipoib_mcast *nmcast;
/* ignore group which is directly joined by userspace */
if (test_bit(IPOIB_FLAG_UMCAST, &priv->flags) &&
!ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {
ipoib_dbg_mcast(priv, "ignoring multicast entry for mgid %pI6\n",
mgid.raw);
continue;
}
/* Not found or send-only group, let's add a new entry */
ipoib_dbg_mcast(priv, "adding multicast entry for mgid %pI6\n",
mgid.raw);
nmcast = ipoib_mcast_alloc(dev);
if (!nmcast) {
ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
continue;
}
set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
nmcast->mcmember.mgid = mgid;
if (mcast) {
/* Destroy the send only entry */
list_move_tail(&mcast->list, &remove_list);
rb_replace_node(&mcast->rb_node,
&nmcast->rb_node,
&priv->multicast_tree);
} else
__ipoib_mcast_add(dev, nmcast);
list_add_tail(&nmcast->list, &priv->multicast_list);
}
if (mcast)
set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
}
/* Remove all of the entries don't exist anymore */
list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
ipoib_dbg_mcast(priv, "deleting multicast group %pI6\n",
mcast->mcmember.mgid.raw);
rb_erase(&mcast->rb_node, &priv->multicast_tree);
/* Move to the remove list */
list_move_tail(&mcast->list, &remove_list);
}
}
spin_unlock_irq(&priv->lock);
netif_addr_unlock_bh(dev);
ipoib_mcast_remove_list(&remove_list);
/*
* Double check that we are still up
*/
if (test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
spin_lock_irq(&priv->lock);
__ipoib_mcast_schedule_join_thread(priv, NULL, 0);
spin_unlock_irq(&priv->lock);
}
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
{
struct ipoib_mcast_iter *iter;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return NULL;
iter->dev = dev;
memset(iter->mgid.raw, 0, 16);
if (ipoib_mcast_iter_next(iter)) {
kfree(iter);
return NULL;
}
return iter;
}
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
{
struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
struct rb_node *n;
struct ipoib_mcast *mcast;
int ret = 1;
spin_lock_irq(&priv->lock);
n = rb_first(&priv->multicast_tree);
while (n) {
mcast = rb_entry(n, struct ipoib_mcast, rb_node);
if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
sizeof (union ib_gid)) < 0) {
iter->mgid = mcast->mcmember.mgid;
iter->created = mcast->created;
iter->queuelen = skb_queue_len(&mcast->pkt_queue);
iter->complete = !!mcast->ah;
iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
ret = 0;
break;
}
n = rb_next(n);
}
spin_unlock_irq(&priv->lock);
return ret;
}
void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
union ib_gid *mgid,
unsigned long *created,
unsigned int *queuelen,
unsigned int *complete,
unsigned int *send_only)
{
*mgid = iter->mgid;
*created = iter->created;
*queuelen = iter->queuelen;
*complete = iter->complete;
*send_only = iter->send_only;
}
#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
| linux-master | drivers/infiniband/ulp/ipoib/ipoib_multicast.c |
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "ipoib.h"
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/if_arp.h> /* For ARPHRD_xxx */
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/jhash.h>
#include <net/arp.h>
#include <net/addrconf.h>
#include <linux/inetdevice.h>
#include <rdma/ib_cache.h>
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
MODULE_LICENSE("Dual BSD/GPL");
int ipoib_sendq_size __read_mostly = IPOIB_TX_RING_SIZE;
int ipoib_recvq_size __read_mostly = IPOIB_RX_RING_SIZE;
module_param_named(send_queue_size, ipoib_sendq_size, int, 0444);
MODULE_PARM_DESC(send_queue_size, "Number of descriptors in send queue");
module_param_named(recv_queue_size, ipoib_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Number of descriptors in receive queue");
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
int ipoib_debug_level;
module_param_named(debug_level, ipoib_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
#endif
struct ipoib_path_iter {
struct net_device *dev;
struct ipoib_path path;
};
static const u8 ipv4_bcast_addr[] = {
0x00, 0xff, 0xff, 0xff,
0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
};
struct workqueue_struct *ipoib_workqueue;
struct ib_sa_client ipoib_sa_client;
static int ipoib_add_one(struct ib_device *device);
static void ipoib_remove_one(struct ib_device *device, void *client_data);
static void ipoib_neigh_reclaim(struct rcu_head *rp);
static struct net_device *ipoib_get_net_dev_by_params(
struct ib_device *dev, u32 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data);
static int ipoib_set_mac(struct net_device *dev, void *addr);
static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd);
static struct ib_client ipoib_client = {
.name = "ipoib",
.add = ipoib_add_one,
.remove = ipoib_remove_one,
.get_net_dev_by_params = ipoib_get_net_dev_by_params,
};
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
static int ipoib_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct netdev_notifier_info *ni = ptr;
struct net_device *dev = ni->dev;
if (dev->netdev_ops->ndo_open != ipoib_open)
return NOTIFY_DONE;
switch (event) {
case NETDEV_REGISTER:
ipoib_create_debug_files(dev);
break;
case NETDEV_CHANGENAME:
ipoib_delete_debug_files(dev);
ipoib_create_debug_files(dev);
break;
case NETDEV_UNREGISTER:
ipoib_delete_debug_files(dev);
break;
}
return NOTIFY_DONE;
}
#endif
int ipoib_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg(priv, "bringing up interface\n");
netif_carrier_off(dev);
set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
if (ipoib_ib_dev_open(dev)) {
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
return 0;
goto err_disable;
}
ipoib_ib_dev_up(dev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
/* Bring up any child interfaces too */
down_read(&priv->vlan_rwsem);
list_for_each_entry(cpriv, &priv->child_intfs, list) {
int flags;
flags = cpriv->dev->flags;
if (flags & IFF_UP)
continue;
dev_change_flags(cpriv->dev, flags | IFF_UP, NULL);
}
up_read(&priv->vlan_rwsem);
} else if (priv->parent) {
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &ppriv->flags))
ipoib_dbg(priv, "parent device %s is not up, so child device may be not functioning.\n",
ppriv->dev->name);
}
netif_start_queue(dev);
return 0;
err_disable:
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
return -EINVAL;
}
static int ipoib_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg(priv, "stopping interface\n");
clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
netif_stop_queue(dev);
ipoib_ib_dev_down(dev);
ipoib_ib_dev_stop(dev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
struct ipoib_dev_priv *cpriv;
/* Bring down any child interfaces too */
down_read(&priv->vlan_rwsem);
list_for_each_entry(cpriv, &priv->child_intfs, list) {
int flags;
flags = cpriv->dev->flags;
if (!(flags & IFF_UP))
continue;
dev_change_flags(cpriv->dev, flags & ~IFF_UP, NULL);
}
up_read(&priv->vlan_rwsem);
}
return 0;
}
static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
return features;
}
static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret = 0;
/* dev->mtu > 2K ==> connected mode */
if (ipoib_cm_admin_enabled(dev)) {
if (new_mtu > ipoib_cm_max_mtu(dev))
return -EINVAL;
if (new_mtu > priv->mcast_mtu)
ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n",
priv->mcast_mtu);
dev->mtu = new_mtu;
return 0;
}
if (new_mtu < (ETH_MIN_MTU + IPOIB_ENCAP_LEN) ||
new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
return -EINVAL;
priv->admin_mtu = new_mtu;
if (priv->mcast_mtu < priv->admin_mtu)
ipoib_dbg(priv, "MTU must be smaller than the underlying "
"link layer MTU - 4 (%u)\n", priv->mcast_mtu);
new_mtu = min(priv->mcast_mtu, priv->admin_mtu);
if (priv->rn_ops->ndo_change_mtu) {
bool carrier_status = netif_carrier_ok(dev);
netif_carrier_off(dev);
/* notify lower level on the real mtu */
ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu);
if (carrier_status)
netif_carrier_on(dev);
} else {
dev->mtu = new_mtu;
}
return ret;
}
static void ipoib_get_stats(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (priv->rn_ops->ndo_get_stats64)
priv->rn_ops->ndo_get_stats64(dev, stats);
else
netdev_stats_to_stats64(stats, &dev->stats);
}
/* Called with an RCU read lock taken */
static bool ipoib_is_dev_match_addr_rcu(const struct sockaddr *addr,
struct net_device *dev)
{
struct net *net = dev_net(dev);
struct in_device *in_dev;
struct sockaddr_in *addr_in = (struct sockaddr_in *)addr;
struct sockaddr_in6 *addr_in6 = (struct sockaddr_in6 *)addr;
__be32 ret_addr;
switch (addr->sa_family) {
case AF_INET:
in_dev = in_dev_get(dev);
if (!in_dev)
return false;
ret_addr = inet_confirm_addr(net, in_dev, 0,
addr_in->sin_addr.s_addr,
RT_SCOPE_HOST);
in_dev_put(in_dev);
if (ret_addr)
return true;
break;
case AF_INET6:
if (IS_ENABLED(CONFIG_IPV6) &&
ipv6_chk_addr(net, &addr_in6->sin6_addr, dev, 1))
return true;
break;
}
return false;
}
/*
* Find the master net_device on top of the given net_device.
* @dev: base IPoIB net_device
*
* Returns the master net_device with a reference held, or the same net_device
* if no master exists.
*/
static struct net_device *ipoib_get_master_net_dev(struct net_device *dev)
{
struct net_device *master;
rcu_read_lock();
master = netdev_master_upper_dev_get_rcu(dev);
if (master)
dev_hold(master);
rcu_read_unlock();
if (master)
return master;
dev_hold(dev);
return dev;
}
struct ipoib_walk_data {
const struct sockaddr *addr;
struct net_device *result;
};
static int ipoib_upper_walk(struct net_device *upper,
struct netdev_nested_priv *priv)
{
struct ipoib_walk_data *data = (struct ipoib_walk_data *)priv->data;
int ret = 0;
if (ipoib_is_dev_match_addr_rcu(data->addr, upper)) {
dev_hold(upper);
data->result = upper;
ret = 1;
}
return ret;
}
/**
* ipoib_get_net_dev_match_addr - Find a net_device matching
* the given address, which is an upper device of the given net_device.
*
* @addr: IP address to look for.
* @dev: base IPoIB net_device
*
* If found, returns the net_device with a reference held. Otherwise return
* NULL.
*/
static struct net_device *ipoib_get_net_dev_match_addr(
const struct sockaddr *addr, struct net_device *dev)
{
struct netdev_nested_priv priv;
struct ipoib_walk_data data = {
.addr = addr,
};
priv.data = (void *)&data;
rcu_read_lock();
if (ipoib_is_dev_match_addr_rcu(addr, dev)) {
dev_hold(dev);
data.result = dev;
goto out;
}
netdev_walk_all_upper_dev_rcu(dev, ipoib_upper_walk, &priv);
out:
rcu_read_unlock();
return data.result;
}
/* returns the number of IPoIB netdevs on top a given ipoib device matching a
* pkey_index and address, if one exists.
*
* @found_net_dev: contains a matching net_device if the return value >= 1,
* with a reference held. */
static int ipoib_match_gid_pkey_addr(struct ipoib_dev_priv *priv,
const union ib_gid *gid,
u16 pkey_index,
const struct sockaddr *addr,
int nesting,
struct net_device **found_net_dev)
{
struct ipoib_dev_priv *child_priv;
struct net_device *net_dev = NULL;
int matches = 0;
if (priv->pkey_index == pkey_index &&
(!gid || !memcmp(gid, &priv->local_gid, sizeof(*gid)))) {
if (!addr) {
net_dev = ipoib_get_master_net_dev(priv->dev);
} else {
/* Verify the net_device matches the IP address, as
* IPoIB child devices currently share a GID. */
net_dev = ipoib_get_net_dev_match_addr(addr, priv->dev);
}
if (net_dev) {
if (!*found_net_dev)
*found_net_dev = net_dev;
else
dev_put(net_dev);
++matches;
}
}
/* Check child interfaces */
down_read_nested(&priv->vlan_rwsem, nesting);
list_for_each_entry(child_priv, &priv->child_intfs, list) {
matches += ipoib_match_gid_pkey_addr(child_priv, gid,
pkey_index, addr,
nesting + 1,
found_net_dev);
if (matches > 1)
break;
}
up_read(&priv->vlan_rwsem);
return matches;
}
/* Returns the number of matching net_devs found (between 0 and 2). Also
* return the matching net_device in the @net_dev parameter, holding a
* reference to the net_device, if the number of matches >= 1 */
static int __ipoib_get_net_dev_by_params(struct list_head *dev_list, u32 port,
u16 pkey_index,
const union ib_gid *gid,
const struct sockaddr *addr,
struct net_device **net_dev)
{
struct ipoib_dev_priv *priv;
int matches = 0;
*net_dev = NULL;
list_for_each_entry(priv, dev_list, list) {
if (priv->port != port)
continue;
matches += ipoib_match_gid_pkey_addr(priv, gid, pkey_index,
addr, 0, net_dev);
if (matches > 1)
break;
}
return matches;
}
static struct net_device *ipoib_get_net_dev_by_params(
struct ib_device *dev, u32 port, u16 pkey,
const union ib_gid *gid, const struct sockaddr *addr,
void *client_data)
{
struct net_device *net_dev;
struct list_head *dev_list = client_data;
u16 pkey_index;
int matches;
int ret;
if (!rdma_protocol_ib(dev, port))
return NULL;
ret = ib_find_cached_pkey(dev, port, pkey, &pkey_index);
if (ret)
return NULL;
/* See if we can find a unique device matching the L2 parameters */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, NULL, &net_dev);
switch (matches) {
case 0:
return NULL;
case 1:
return net_dev;
}
dev_put(net_dev);
/* Couldn't find a unique device with L2 parameters only. Use L3
* address to uniquely match the net device */
matches = __ipoib_get_net_dev_by_params(dev_list, port, pkey_index,
gid, addr, &net_dev);
switch (matches) {
case 0:
return NULL;
default:
dev_warn_ratelimited(&dev->dev,
"duplicate IP address detected\n");
fallthrough;
case 1:
return net_dev;
}
}
int ipoib_set_mode(struct net_device *dev, const char *buf)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if ((test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
!strcmp(buf, "connected\n")) ||
(!test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags) &&
!strcmp(buf, "datagram\n"))) {
return 0;
}
/* flush paths if we switch modes so that connections are restarted */
if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
netdev_update_features(dev);
dev_set_mtu(dev, ipoib_cm_max_mtu(dev));
netif_set_real_num_tx_queues(dev, 1);
rtnl_unlock();
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
ipoib_flush_paths(dev);
return (!rtnl_trylock()) ? -EBUSY : 0;
}
if (!strcmp(buf, "datagram\n")) {
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
netdev_update_features(dev);
dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
netif_set_real_num_tx_queues(dev, dev->num_tx_queues);
rtnl_unlock();
ipoib_flush_paths(dev);
return (!rtnl_trylock()) ? -EBUSY : 0;
}
return -EINVAL;
}
struct ipoib_path *__path_find(struct net_device *dev, void *gid)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rb_node *n = priv->path_tree.rb_node;
struct ipoib_path *path;
int ret;
while (n) {
path = rb_entry(n, struct ipoib_path, rb_node);
ret = memcmp(gid, path->pathrec.dgid.raw,
sizeof (union ib_gid));
if (ret < 0)
n = n->rb_left;
else if (ret > 0)
n = n->rb_right;
else
return path;
}
return NULL;
}
static int __path_add(struct net_device *dev, struct ipoib_path *path)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rb_node **n = &priv->path_tree.rb_node;
struct rb_node *pn = NULL;
struct ipoib_path *tpath;
int ret;
while (*n) {
pn = *n;
tpath = rb_entry(pn, struct ipoib_path, rb_node);
ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
sizeof (union ib_gid));
if (ret < 0)
n = &pn->rb_left;
else if (ret > 0)
n = &pn->rb_right;
else
return -EEXIST;
}
rb_link_node(&path->rb_node, pn, n);
rb_insert_color(&path->rb_node, &priv->path_tree);
list_add_tail(&path->list, &priv->path_list);
return 0;
}
static void path_free(struct net_device *dev, struct ipoib_path *path)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(&path->queue)))
dev_kfree_skb_irq(skb);
ipoib_dbg(ipoib_priv(dev), "%s\n", __func__);
/* remove all neigh connected to this path */
ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
if (path->ah)
ipoib_put_ah(path->ah);
kfree(path);
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
{
struct ipoib_path_iter *iter;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return NULL;
iter->dev = dev;
memset(iter->path.pathrec.dgid.raw, 0, 16);
if (ipoib_path_iter_next(iter)) {
kfree(iter);
return NULL;
}
return iter;
}
int ipoib_path_iter_next(struct ipoib_path_iter *iter)
{
struct ipoib_dev_priv *priv = ipoib_priv(iter->dev);
struct rb_node *n;
struct ipoib_path *path;
int ret = 1;
spin_lock_irq(&priv->lock);
n = rb_first(&priv->path_tree);
while (n) {
path = rb_entry(n, struct ipoib_path, rb_node);
if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
sizeof (union ib_gid)) < 0) {
iter->path = *path;
ret = 0;
break;
}
n = rb_next(n);
}
spin_unlock_irq(&priv->lock);
return ret;
}
void ipoib_path_iter_read(struct ipoib_path_iter *iter,
struct ipoib_path *path)
{
*path = iter->path;
}
#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
void ipoib_mark_paths_invalid(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_path *path, *tp;
spin_lock_irq(&priv->lock);
list_for_each_entry_safe(path, tp, &priv->path_list, list) {
ipoib_dbg(priv, "mark path LID 0x%08x GID %pI6 invalid\n",
be32_to_cpu(sa_path_get_dlid(&path->pathrec)),
path->pathrec.dgid.raw);
if (path->ah)
path->ah->valid = 0;
}
spin_unlock_irq(&priv->lock);
}
static void push_pseudo_header(struct sk_buff *skb, const char *daddr)
{
struct ipoib_pseudo_header *phdr;
phdr = skb_push(skb, sizeof(*phdr));
memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
}
void ipoib_flush_paths(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_path *path, *tp;
LIST_HEAD(remove_list);
unsigned long flags;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->path_list, &remove_list);
list_for_each_entry(path, &remove_list, list)
rb_erase(&path->rb_node, &priv->path_tree);
list_for_each_entry_safe(path, tp, &remove_list, list) {
if (path->query)
ib_sa_cancel_query(path->query_id, path->query);
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
wait_for_completion(&path->done);
path_free(dev, path);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
}
static void path_rec_completion(int status,
struct sa_path_rec *pathrec,
unsigned int num_prs, void *path_ptr)
{
struct ipoib_path *path = path_ptr;
struct net_device *dev = path->dev;
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_ah *ah = NULL;
struct ipoib_ah *old_ah = NULL;
struct ipoib_neigh *neigh, *tn;
struct sk_buff_head skqueue;
struct sk_buff *skb;
unsigned long flags;
if (!status)
ipoib_dbg(priv, "PathRec LID 0x%04x for GID %pI6\n",
be32_to_cpu(sa_path_get_dlid(pathrec)),
pathrec->dgid.raw);
else
ipoib_dbg(priv, "PathRec status %d for GID %pI6\n",
status, path->pathrec.dgid.raw);
skb_queue_head_init(&skqueue);
if (!status) {
struct rdma_ah_attr av;
if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
pathrec, &av, NULL)) {
ah = ipoib_create_ah(dev, priv->pd, &av);
rdma_destroy_ah_attr(&av);
}
}
spin_lock_irqsave(&priv->lock, flags);
if (!IS_ERR_OR_NULL(ah)) {
/*
* pathrec.dgid is used as the database key from the LLADDR,
* it must remain unchanged even if the SA returns a different
* GID to use in the AH.
*/
if (memcmp(pathrec->dgid.raw, path->pathrec.dgid.raw,
sizeof(union ib_gid))) {
ipoib_dbg(
priv,
"%s got PathRec for gid %pI6 while asked for %pI6\n",
dev->name, pathrec->dgid.raw,
path->pathrec.dgid.raw);
memcpy(pathrec->dgid.raw, path->pathrec.dgid.raw,
sizeof(union ib_gid));
}
path->pathrec = *pathrec;
old_ah = path->ah;
path->ah = ah;
ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
ah, be32_to_cpu(sa_path_get_dlid(pathrec)),
pathrec->sl);
while ((skb = __skb_dequeue(&path->queue)))
__skb_queue_tail(&skqueue, skb);
list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
if (neigh->ah) {
WARN_ON(neigh->ah != old_ah);
/*
* Dropping the ah reference inside
* priv->lock is safe here, because we
* will hold one more reference from
* the original value of path->ah (ie
* old_ah).
*/
ipoib_put_ah(neigh->ah);
}
kref_get(&path->ah->ref);
neigh->ah = path->ah;
if (ipoib_cm_enabled(dev, neigh->daddr)) {
if (!ipoib_cm_get(neigh))
ipoib_cm_set(neigh, ipoib_cm_create_tx(dev,
path,
neigh));
if (!ipoib_cm_get(neigh)) {
ipoib_neigh_free(neigh);
continue;
}
}
while ((skb = __skb_dequeue(&neigh->queue)))
__skb_queue_tail(&skqueue, skb);
}
path->ah->valid = 1;
}
path->query = NULL;
complete(&path->done);
spin_unlock_irqrestore(&priv->lock, flags);
if (IS_ERR_OR_NULL(ah))
ipoib_del_neighs_by_gid(dev, path->pathrec.dgid.raw);
if (old_ah)
ipoib_put_ah(old_ah);
while ((skb = __skb_dequeue(&skqueue))) {
int ret;
skb->dev = dev;
ret = dev_queue_xmit(skb);
if (ret)
ipoib_warn(priv, "%s: dev_queue_xmit failed to re-queue packet, ret:%d\n",
__func__, ret);
}
}
static void init_path_rec(struct ipoib_dev_priv *priv, struct ipoib_path *path,
void *gid)
{
path->dev = priv->dev;
if (rdma_cap_opa_ah(priv->ca, priv->port))
path->pathrec.rec_type = SA_PATH_REC_TYPE_OPA;
else
path->pathrec.rec_type = SA_PATH_REC_TYPE_IB;
memcpy(path->pathrec.dgid.raw, gid, sizeof(union ib_gid));
path->pathrec.sgid = priv->local_gid;
path->pathrec.pkey = cpu_to_be16(priv->pkey);
path->pathrec.numb_path = 1;
path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
}
static struct ipoib_path *path_rec_create(struct net_device *dev, void *gid)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_path *path;
if (!priv->broadcast)
return NULL;
path = kzalloc(sizeof(*path), GFP_ATOMIC);
if (!path)
return NULL;
skb_queue_head_init(&path->queue);
INIT_LIST_HEAD(&path->neigh_list);
init_path_rec(priv, path, gid);
return path;
}
static int path_rec_start(struct net_device *dev,
struct ipoib_path *path)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg(priv, "Start path record lookup for %pI6\n",
path->pathrec.dgid.raw);
init_completion(&path->done);
path->query_id =
ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
&path->pathrec,
IB_SA_PATH_REC_DGID |
IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_NUMB_PATH |
IB_SA_PATH_REC_TRAFFIC_CLASS |
IB_SA_PATH_REC_PKEY,
1000, GFP_ATOMIC,
path_rec_completion,
path, &path->query);
if (path->query_id < 0) {
ipoib_warn(priv, "ib_sa_path_rec_get failed: %d\n", path->query_id);
path->query = NULL;
complete(&path->done);
return path->query_id;
}
return 0;
}
static void neigh_refresh_path(struct ipoib_neigh *neigh, u8 *daddr,
struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_path *path;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
path = __path_find(dev, daddr + 4);
if (!path)
goto out;
if (!path->query)
path_rec_start(dev, path);
out:
spin_unlock_irqrestore(&priv->lock, flags);
}
static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_path *path;
struct ipoib_neigh *neigh;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
neigh = ipoib_neigh_alloc(daddr, dev);
if (!neigh) {
spin_unlock_irqrestore(&priv->lock, flags);
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
return NULL;
}
/* To avoid race condition, make sure that the
* neigh will be added only once.
*/
if (unlikely(!list_empty(&neigh->list))) {
spin_unlock_irqrestore(&priv->lock, flags);
return neigh;
}
path = __path_find(dev, daddr + 4);
if (!path) {
path = path_rec_create(dev, daddr + 4);
if (!path)
goto err_path;
__path_add(dev, path);
}
list_add_tail(&neigh->list, &path->neigh_list);
if (path->ah && path->ah->valid) {
kref_get(&path->ah->ref);
neigh->ah = path->ah;
if (ipoib_cm_enabled(dev, neigh->daddr)) {
if (!ipoib_cm_get(neigh))
ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
if (!ipoib_cm_get(neigh)) {
ipoib_neigh_free(neigh);
goto err_drop;
}
if (skb_queue_len(&neigh->queue) <
IPOIB_MAX_PATH_REC_QUEUE) {
push_pseudo_header(skb, neigh->daddr);
__skb_queue_tail(&neigh->queue, skb);
} else {
ipoib_warn(priv, "queue length limit %d. Packet drop.\n",
skb_queue_len(&neigh->queue));
goto err_drop;
}
} else {
spin_unlock_irqrestore(&priv->lock, flags);
path->ah->last_send = rn->send(dev, skb, path->ah->ah,
IPOIB_QPN(daddr));
ipoib_neigh_put(neigh);
return NULL;
}
} else {
neigh->ah = NULL;
if (!path->query && path_rec_start(dev, path))
goto err_path;
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
push_pseudo_header(skb, neigh->daddr);
__skb_queue_tail(&neigh->queue, skb);
} else {
goto err_drop;
}
}
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
return NULL;
err_path:
ipoib_neigh_free(neigh);
err_drop:
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_neigh_put(neigh);
return NULL;
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
struct ipoib_pseudo_header *phdr)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_path *path;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
/* no broadcast means that all paths are (going to be) not valid */
if (!priv->broadcast)
goto drop_and_unlock;
path = __path_find(dev, phdr->hwaddr + 4);
if (!path || !path->ah || !path->ah->valid) {
if (!path) {
path = path_rec_create(dev, phdr->hwaddr + 4);
if (!path)
goto drop_and_unlock;
__path_add(dev, path);
} else {
/*
* make sure there are no changes in the existing
* path record
*/
init_path_rec(priv, path, phdr->hwaddr + 4);
}
if (!path->query && path_rec_start(dev, path)) {
goto drop_and_unlock;
}
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
push_pseudo_header(skb, phdr->hwaddr);
__skb_queue_tail(&path->queue, skb);
goto unlock;
} else {
goto drop_and_unlock;
}
}
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_dbg(priv, "Send unicast ARP to %08x\n",
be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
path->ah->last_send = rn->send(dev, skb, path->ah->ah,
IPOIB_QPN(phdr->hwaddr));
return;
drop_and_unlock:
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
unlock:
spin_unlock_irqrestore(&priv->lock, flags);
}
static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_neigh *neigh;
struct ipoib_pseudo_header *phdr;
struct ipoib_header *header;
unsigned long flags;
phdr = (struct ipoib_pseudo_header *) skb->data;
skb_pull(skb, sizeof(*phdr));
header = (struct ipoib_header *) skb->data;
if (unlikely(phdr->hwaddr[4] == 0xff)) {
/* multicast, arrange "if" according to probability */
if ((header->proto != htons(ETH_P_IP)) &&
(header->proto != htons(ETH_P_IPV6)) &&
(header->proto != htons(ETH_P_ARP)) &&
(header->proto != htons(ETH_P_RARP)) &&
(header->proto != htons(ETH_P_TIPC))) {
/* ethertype not supported by IPoIB */
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* Add in the P_Key for multicast*/
phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
phdr->hwaddr[9] = priv->pkey & 0xff;
neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (likely(neigh))
goto send_using_neigh;
ipoib_mcast_send(dev, phdr->hwaddr, skb);
return NETDEV_TX_OK;
}
/* unicast, arrange "switch" according to probability */
switch (header->proto) {
case htons(ETH_P_IP):
case htons(ETH_P_IPV6):
case htons(ETH_P_TIPC):
neigh = ipoib_neigh_get(dev, phdr->hwaddr);
if (unlikely(!neigh)) {
neigh = neigh_add_path(skb, phdr->hwaddr, dev);
if (likely(!neigh))
return NETDEV_TX_OK;
}
break;
case htons(ETH_P_ARP):
case htons(ETH_P_RARP):
/* for unicast ARP and RARP should always perform path find */
unicast_arp_send(skb, dev, phdr);
return NETDEV_TX_OK;
default:
/* ethertype not supported by IPoIB */
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
send_using_neigh:
/* note we now hold a ref to neigh */
if (ipoib_cm_get(neigh)) {
if (ipoib_cm_up(neigh)) {
ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
goto unref;
}
} else if (neigh->ah && neigh->ah->valid) {
neigh->ah->last_send = rn->send(dev, skb, neigh->ah->ah,
IPOIB_QPN(phdr->hwaddr));
goto unref;
} else if (neigh->ah) {
neigh_refresh_path(neigh, phdr->hwaddr, dev);
}
if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
push_pseudo_header(skb, phdr->hwaddr);
spin_lock_irqsave(&priv->lock, flags);
__skb_queue_tail(&neigh->queue, skb);
spin_unlock_irqrestore(&priv->lock, flags);
} else {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
unref:
ipoib_neigh_put(neigh);
return NETDEV_TX_OK;
}
static void ipoib_timeout(struct net_device *dev, unsigned int txqueue)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
if (rn->tx_timeout) {
rn->tx_timeout(dev, txqueue);
return;
}
ipoib_warn(priv, "transmit timeout: latency %d msecs\n",
jiffies_to_msecs(jiffies - dev_trans_start(dev)));
ipoib_warn(priv,
"queue stopped %d, tx_head %u, tx_tail %u, global_tx_head %u, global_tx_tail %u\n",
netif_queue_stopped(dev), priv->tx_head, priv->tx_tail,
priv->global_tx_head, priv->global_tx_tail);
/* XXX reset QP, etc. */
}
static int ipoib_hard_header(struct sk_buff *skb,
struct net_device *dev,
unsigned short type,
const void *daddr,
const void *saddr,
unsigned int len)
{
struct ipoib_header *header;
header = skb_push(skb, sizeof(*header));
header->proto = htons(type);
header->reserved = 0;
/*
* we don't rely on dst_entry structure, always stuff the
* destination address into skb hard header so we can figure out where
* to send the packet later.
*/
push_pseudo_header(skb, daddr);
return IPOIB_HARD_LEN;
}
static void ipoib_set_mcast_list(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
ipoib_dbg(priv, "IPOIB_FLAG_OPER_UP not set");
return;
}
queue_work(priv->wq, &priv->restart_task);
}
static int ipoib_get_iflink(const struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
/* parent interface */
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
return dev->ifindex;
/* child/vlan interface */
return priv->parent->ifindex;
}
static u32 ipoib_addr_hash(struct ipoib_neigh_hash *htbl, u8 *daddr)
{
/*
* Use only the address parts that contributes to spreading
* The subnet prefix is not used as one can not connect to
* same remote port (GUID) using the same remote QPN via two
* different subnets.
*/
/* qpn octets[1:4) & port GUID octets[12:20) */
u32 *d32 = (u32 *) daddr;
u32 hv;
hv = jhash_3words(d32[3], d32[4], IPOIB_QPN_MASK & d32[0], 0);
return hv & htbl->mask;
}
struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_neigh_table *ntbl = &priv->ntbl;
struct ipoib_neigh_hash *htbl;
struct ipoib_neigh *neigh = NULL;
u32 hash_val;
rcu_read_lock_bh();
htbl = rcu_dereference_bh(ntbl->htbl);
if (!htbl)
goto out_unlock;
hash_val = ipoib_addr_hash(htbl, daddr);
for (neigh = rcu_dereference_bh(htbl->buckets[hash_val]);
neigh != NULL;
neigh = rcu_dereference_bh(neigh->hnext)) {
if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
/* found, take one ref on behalf of the caller */
if (!refcount_inc_not_zero(&neigh->refcnt)) {
/* deleted */
neigh = NULL;
goto out_unlock;
}
if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE))
neigh->alive = jiffies;
goto out_unlock;
}
}
out_unlock:
rcu_read_unlock_bh();
return neigh;
}
static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
{
struct ipoib_neigh_table *ntbl = &priv->ntbl;
struct ipoib_neigh_hash *htbl;
unsigned long neigh_obsolete;
unsigned long dt;
unsigned long flags;
int i;
LIST_HEAD(remove_list);
spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl,
lockdep_is_held(&priv->lock));
if (!htbl)
goto out_unlock;
/* neigh is obsolete if it was idle for two GC periods */
dt = 2 * arp_tbl.gc_interval;
neigh_obsolete = jiffies - dt;
for (i = 0; i < htbl->size; i++) {
struct ipoib_neigh *neigh;
struct ipoib_neigh __rcu **np = &htbl->buckets[i];
while ((neigh = rcu_dereference_protected(*np,
lockdep_is_held(&priv->lock))) != NULL) {
/* was the neigh idle for two GC periods */
if (time_after(neigh_obsolete, neigh->alive)) {
ipoib_check_and_add_mcast_sendonly(priv, neigh->daddr + 4, &remove_list);
rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
}
}
}
out_unlock:
spin_unlock_irqrestore(&priv->lock, flags);
ipoib_mcast_remove_list(&remove_list);
}
static void ipoib_reap_neigh(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, neigh_reap_task.work);
__ipoib_reap_neigh(priv);
queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
}
static struct ipoib_neigh *ipoib_neigh_ctor(u8 *daddr,
struct net_device *dev)
{
struct ipoib_neigh *neigh;
neigh = kzalloc(sizeof(*neigh), GFP_ATOMIC);
if (!neigh)
return NULL;
neigh->dev = dev;
memcpy(&neigh->daddr, daddr, sizeof(neigh->daddr));
skb_queue_head_init(&neigh->queue);
INIT_LIST_HEAD(&neigh->list);
ipoib_cm_set(neigh, NULL);
/* one ref on behalf of the caller */
refcount_set(&neigh->refcnt, 1);
return neigh;
}
struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr,
struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_neigh_table *ntbl = &priv->ntbl;
struct ipoib_neigh_hash *htbl;
struct ipoib_neigh *neigh;
u32 hash_val;
htbl = rcu_dereference_protected(ntbl->htbl,
lockdep_is_held(&priv->lock));
if (!htbl) {
neigh = NULL;
goto out_unlock;
}
/* need to add a new neigh, but maybe some other thread succeeded?
* recalc hash, maybe hash resize took place so we do a search
*/
hash_val = ipoib_addr_hash(htbl, daddr);
for (neigh = rcu_dereference_protected(htbl->buckets[hash_val],
lockdep_is_held(&priv->lock));
neigh != NULL;
neigh = rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock))) {
if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) {
/* found, take one ref on behalf of the caller */
if (!refcount_inc_not_zero(&neigh->refcnt)) {
/* deleted */
neigh = NULL;
break;
}
neigh->alive = jiffies;
goto out_unlock;
}
}
neigh = ipoib_neigh_ctor(daddr, dev);
if (!neigh)
goto out_unlock;
/* one ref on behalf of the hash table */
refcount_inc(&neigh->refcnt);
neigh->alive = jiffies;
/* put in hash */
rcu_assign_pointer(neigh->hnext,
rcu_dereference_protected(htbl->buckets[hash_val],
lockdep_is_held(&priv->lock)));
rcu_assign_pointer(htbl->buckets[hash_val], neigh);
atomic_inc(&ntbl->entries);
out_unlock:
return neigh;
}
void ipoib_neigh_dtor(struct ipoib_neigh *neigh)
{
/* neigh reference count was dropprd to zero */
struct net_device *dev = neigh->dev;
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct sk_buff *skb;
if (neigh->ah)
ipoib_put_ah(neigh->ah);
while ((skb = __skb_dequeue(&neigh->queue))) {
++dev->stats.tx_dropped;
dev_kfree_skb_any(skb);
}
if (ipoib_cm_get(neigh))
ipoib_cm_destroy_tx(ipoib_cm_get(neigh));
ipoib_dbg(ipoib_priv(dev),
"neigh free for %06x %pI6\n",
IPOIB_QPN(neigh->daddr),
neigh->daddr + 4);
kfree(neigh);
if (atomic_dec_and_test(&priv->ntbl.entries)) {
if (test_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags))
complete(&priv->ntbl.flushed);
}
}
static void ipoib_neigh_reclaim(struct rcu_head *rp)
{
/* Called as a result of removal from hash table */
struct ipoib_neigh *neigh = container_of(rp, struct ipoib_neigh, rcu);
/* note TX context may hold another ref */
ipoib_neigh_put(neigh);
}
void ipoib_neigh_free(struct ipoib_neigh *neigh)
{
struct net_device *dev = neigh->dev;
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_neigh_table *ntbl = &priv->ntbl;
struct ipoib_neigh_hash *htbl;
struct ipoib_neigh __rcu **np;
struct ipoib_neigh *n;
u32 hash_val;
htbl = rcu_dereference_protected(ntbl->htbl,
lockdep_is_held(&priv->lock));
if (!htbl)
return;
hash_val = ipoib_addr_hash(htbl, neigh->daddr);
np = &htbl->buckets[hash_val];
for (n = rcu_dereference_protected(*np,
lockdep_is_held(&priv->lock));
n != NULL;
n = rcu_dereference_protected(*np,
lockdep_is_held(&priv->lock))) {
if (n == neigh) {
/* found */
rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from parent list */
list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
return;
} else {
np = &n->hnext;
}
}
}
static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv)
{
struct ipoib_neigh_table *ntbl = &priv->ntbl;
struct ipoib_neigh_hash *htbl;
struct ipoib_neigh __rcu **buckets;
u32 size;
clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
ntbl->htbl = NULL;
htbl = kzalloc(sizeof(*htbl), GFP_KERNEL);
if (!htbl)
return -ENOMEM;
size = roundup_pow_of_two(arp_tbl.gc_thresh3);
buckets = kvcalloc(size, sizeof(*buckets), GFP_KERNEL);
if (!buckets) {
kfree(htbl);
return -ENOMEM;
}
htbl->size = size;
htbl->mask = (size - 1);
htbl->buckets = buckets;
RCU_INIT_POINTER(ntbl->htbl, htbl);
htbl->ntbl = ntbl;
atomic_set(&ntbl->entries, 0);
/* start garbage collection */
queue_delayed_work(priv->wq, &priv->neigh_reap_task,
arp_tbl.gc_interval);
return 0;
}
static void neigh_hash_free_rcu(struct rcu_head *head)
{
struct ipoib_neigh_hash *htbl = container_of(head,
struct ipoib_neigh_hash,
rcu);
struct ipoib_neigh __rcu **buckets = htbl->buckets;
struct ipoib_neigh_table *ntbl = htbl->ntbl;
kvfree(buckets);
kfree(htbl);
complete(&ntbl->deleted);
}
void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_neigh_table *ntbl = &priv->ntbl;
struct ipoib_neigh_hash *htbl;
unsigned long flags;
int i;
/* remove all neigh connected to a given path or mcast */
spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl,
lockdep_is_held(&priv->lock));
if (!htbl)
goto out_unlock;
for (i = 0; i < htbl->size; i++) {
struct ipoib_neigh *neigh;
struct ipoib_neigh __rcu **np = &htbl->buckets[i];
while ((neigh = rcu_dereference_protected(*np,
lockdep_is_held(&priv->lock))) != NULL) {
/* delete neighs belong to this parent */
if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) {
rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from parent list */
list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
} else {
np = &neigh->hnext;
}
}
}
out_unlock:
spin_unlock_irqrestore(&priv->lock, flags);
}
static void ipoib_flush_neighs(struct ipoib_dev_priv *priv)
{
struct ipoib_neigh_table *ntbl = &priv->ntbl;
struct ipoib_neigh_hash *htbl;
unsigned long flags;
int i, wait_flushed = 0;
init_completion(&priv->ntbl.flushed);
set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags);
spin_lock_irqsave(&priv->lock, flags);
htbl = rcu_dereference_protected(ntbl->htbl,
lockdep_is_held(&priv->lock));
if (!htbl)
goto out_unlock;
wait_flushed = atomic_read(&priv->ntbl.entries);
if (!wait_flushed)
goto free_htbl;
for (i = 0; i < htbl->size; i++) {
struct ipoib_neigh *neigh;
struct ipoib_neigh __rcu **np = &htbl->buckets[i];
while ((neigh = rcu_dereference_protected(*np,
lockdep_is_held(&priv->lock))) != NULL) {
rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
/* remove from path/mc list */
list_del_init(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
}
}
free_htbl:
rcu_assign_pointer(ntbl->htbl, NULL);
call_rcu(&htbl->rcu, neigh_hash_free_rcu);
out_unlock:
spin_unlock_irqrestore(&priv->lock, flags);
if (wait_flushed)
wait_for_completion(&priv->ntbl.flushed);
}
static void ipoib_neigh_hash_uninit(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg(priv, "%s\n", __func__);
init_completion(&priv->ntbl.deleted);
cancel_delayed_work_sync(&priv->neigh_reap_task);
ipoib_flush_neighs(priv);
wait_for_completion(&priv->ntbl.deleted);
}
static void ipoib_napi_add(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
netif_napi_add_weight(dev, &priv->recv_napi, ipoib_rx_poll,
IPOIB_NUM_WC);
netif_napi_add_weight(dev, &priv->send_napi, ipoib_tx_poll,
MAX_SEND_CQE);
}
static void ipoib_napi_del(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
netif_napi_del(&priv->recv_napi);
netif_napi_del(&priv->send_napi);
}
static void ipoib_dev_uninit_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_transport_dev_cleanup(dev);
ipoib_napi_del(dev);
ipoib_cm_dev_cleanup(dev);
kfree(priv->rx_ring);
vfree(priv->tx_ring);
priv->rx_ring = NULL;
priv->tx_ring = NULL;
}
static int ipoib_dev_init_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
u8 addr_mod[3];
ipoib_napi_add(dev);
/* Allocate RX/TX "rings" to hold queued skbs */
priv->rx_ring = kcalloc(ipoib_recvq_size,
sizeof(*priv->rx_ring),
GFP_KERNEL);
if (!priv->rx_ring)
goto out;
priv->tx_ring = vzalloc(array_size(ipoib_sendq_size,
sizeof(*priv->tx_ring)));
if (!priv->tx_ring) {
pr_warn("%s: failed to allocate TX ring (%d entries)\n",
priv->ca->name, ipoib_sendq_size);
goto out_rx_ring_cleanup;
}
/* priv->tx_head, tx_tail and global_tx_tail/head are already 0 */
if (ipoib_transport_dev_init(dev, priv->ca)) {
pr_warn("%s: ipoib_transport_dev_init failed\n",
priv->ca->name);
goto out_tx_ring_cleanup;
}
/* after qp created set dev address */
addr_mod[0] = (priv->qp->qp_num >> 16) & 0xff;
addr_mod[1] = (priv->qp->qp_num >> 8) & 0xff;
addr_mod[2] = (priv->qp->qp_num) & 0xff;
dev_addr_mod(priv->dev, 1, addr_mod, sizeof(addr_mod));
return 0;
out_tx_ring_cleanup:
vfree(priv->tx_ring);
out_rx_ring_cleanup:
kfree(priv->rx_ring);
out:
ipoib_napi_del(dev);
return -ENOMEM;
}
static int ipoib_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (!priv->rn_ops->ndo_eth_ioctl)
return -EOPNOTSUPP;
return priv->rn_ops->ndo_eth_ioctl(dev, ifr, cmd);
}
static int ipoib_dev_init(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret = -ENOMEM;
priv->qp = NULL;
/*
* the various IPoIB tasks assume they will never race against
* themselves, so always use a single thread workqueue
*/
priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM);
if (!priv->wq) {
pr_warn("%s: failed to allocate device WQ\n", dev->name);
goto out;
}
/* create pd, which used both for control and datapath*/
priv->pd = ib_alloc_pd(priv->ca, 0);
if (IS_ERR(priv->pd)) {
pr_warn("%s: failed to allocate PD\n", priv->ca->name);
goto clean_wq;
}
ret = priv->rn_ops->ndo_init(dev);
if (ret) {
pr_warn("%s failed to init HW resource\n", dev->name);
goto out_free_pd;
}
ret = ipoib_neigh_hash_init(priv);
if (ret) {
pr_warn("%s failed to init neigh hash\n", dev->name);
goto out_dev_uninit;
}
if (dev->flags & IFF_UP) {
if (ipoib_ib_dev_open(dev)) {
pr_warn("%s failed to open device\n", dev->name);
ret = -ENODEV;
goto out_hash_uninit;
}
}
return 0;
out_hash_uninit:
ipoib_neigh_hash_uninit(dev);
out_dev_uninit:
ipoib_ib_dev_cleanup(dev);
out_free_pd:
if (priv->pd) {
ib_dealloc_pd(priv->pd);
priv->pd = NULL;
}
clean_wq:
if (priv->wq) {
destroy_workqueue(priv->wq);
priv->wq = NULL;
}
out:
return ret;
}
/*
* This must be called before doing an unregister_netdev on a parent device to
* shutdown the IB event handler.
*/
static void ipoib_parent_unregister_pre(struct net_device *ndev)
{
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
/*
* ipoib_set_mac checks netif_running before pushing work, clearing
* running ensures the it will not add more work.
*/
rtnl_lock();
dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP, NULL);
rtnl_unlock();
/* ipoib_event() cannot be running once this returns */
ib_unregister_event_handler(&priv->event_handler);
/*
* Work on the queue grabs the rtnl lock, so this cannot be done while
* also holding it.
*/
flush_workqueue(ipoib_workqueue);
}
static void ipoib_set_dev_features(struct ipoib_dev_priv *priv)
{
priv->hca_caps = priv->ca->attrs.device_cap_flags;
priv->kernel_caps = priv->ca->attrs.kernel_cap_flags;
if (priv->hca_caps & IB_DEVICE_UD_IP_CSUM) {
priv->dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
if (priv->kernel_caps & IBK_UD_TSO)
priv->dev->hw_features |= NETIF_F_TSO;
priv->dev->features |= priv->dev->hw_features;
}
}
static int ipoib_parent_init(struct net_device *ndev)
{
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
struct ib_port_attr attr;
int result;
result = ib_query_port(priv->ca, priv->port, &attr);
if (result) {
pr_warn("%s: ib_query_port %d failed\n", priv->ca->name,
priv->port);
return result;
}
priv->max_ib_mtu = rdma_mtu_from_attr(priv->ca, priv->port, &attr);
result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
if (result) {
pr_warn("%s: ib_query_pkey port %d failed (ret = %d)\n",
priv->ca->name, priv->port, result);
return result;
}
result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid);
if (result) {
pr_warn("%s: rdma_query_gid port %d failed (ret = %d)\n",
priv->ca->name, priv->port, result);
return result;
}
dev_addr_mod(priv->dev, 4, priv->local_gid.raw, sizeof(union ib_gid));
SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
priv->dev->dev_port = priv->port - 1;
/* Let's set this one too for backwards compatibility. */
priv->dev->dev_id = priv->port - 1;
return 0;
}
static void ipoib_child_init(struct net_device *ndev)
{
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
priv->max_ib_mtu = ppriv->max_ib_mtu;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
if (memchr_inv(priv->dev->dev_addr, 0, INFINIBAND_ALEN))
memcpy(&priv->local_gid, priv->dev->dev_addr + 4,
sizeof(priv->local_gid));
else {
__dev_addr_set(priv->dev, ppriv->dev->dev_addr,
INFINIBAND_ALEN);
memcpy(&priv->local_gid, &ppriv->local_gid,
sizeof(priv->local_gid));
}
}
static int ipoib_ndo_init(struct net_device *ndev)
{
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
int rc;
struct rdma_netdev *rn = netdev_priv(ndev);
if (priv->parent) {
ipoib_child_init(ndev);
} else {
rc = ipoib_parent_init(ndev);
if (rc)
return rc;
}
/* MTU will be reset when mcast join happens */
ndev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = ndev->mtu;
rn->mtu = priv->mcast_mtu;
ndev->max_mtu = IPOIB_CM_MTU;
ndev->neigh_priv_len = sizeof(struct ipoib_neigh);
/*
* Set the full membership bit, so that we join the right
* broadcast group, etc.
*/
priv->pkey |= 0x8000;
ndev->broadcast[8] = priv->pkey >> 8;
ndev->broadcast[9] = priv->pkey & 0xff;
set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
ipoib_set_dev_features(priv);
rc = ipoib_dev_init(ndev);
if (rc) {
pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
priv->ca->name, priv->dev->name, priv->port, rc);
return rc;
}
if (priv->parent) {
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
dev_hold(priv->parent);
down_write(&ppriv->vlan_rwsem);
list_add_tail(&priv->list, &ppriv->child_intfs);
up_write(&ppriv->vlan_rwsem);
}
return 0;
}
static void ipoib_ndo_uninit(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ASSERT_RTNL();
/*
* ipoib_remove_one guarantees the children are removed before the
* parent, and that is the only place where a parent can be removed.
*/
WARN_ON(!list_empty(&priv->child_intfs));
if (priv->parent) {
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
down_write(&ppriv->vlan_rwsem);
list_del(&priv->list);
up_write(&ppriv->vlan_rwsem);
}
ipoib_neigh_hash_uninit(dev);
ipoib_ib_dev_cleanup(dev);
/* no more works over the priv->wq */
if (priv->wq) {
/* See ipoib_mcast_carrier_on_task() */
WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
destroy_workqueue(priv->wq);
priv->wq = NULL;
}
if (priv->parent)
dev_put(priv->parent);
}
static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
}
static int ipoib_get_vf_config(struct net_device *dev, int vf,
struct ifla_vf_info *ivf)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int err;
err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
if (err)
return err;
ivf->vf = vf;
memcpy(ivf->mac, dev->dev_addr, dev->addr_len);
return 0;
}
static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
if (type != IFLA_VF_IB_NODE_GUID && type != IFLA_VF_IB_PORT_GUID)
return -EINVAL;
return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
}
static int ipoib_get_vf_guid(struct net_device *dev, int vf,
struct ifla_vf_guid *node_guid,
struct ifla_vf_guid *port_guid)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid);
}
static int ipoib_get_vf_stats(struct net_device *dev, int vf,
struct ifla_vf_stats *vf_stats)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
}
static const struct header_ops ipoib_header_ops = {
.create = ipoib_hard_header,
};
static const struct net_device_ops ipoib_netdev_ops_pf = {
.ndo_init = ipoib_ndo_init,
.ndo_uninit = ipoib_ndo_uninit,
.ndo_open = ipoib_open,
.ndo_stop = ipoib_stop,
.ndo_change_mtu = ipoib_change_mtu,
.ndo_fix_features = ipoib_fix_features,
.ndo_start_xmit = ipoib_start_xmit,
.ndo_tx_timeout = ipoib_timeout,
.ndo_set_rx_mode = ipoib_set_mcast_list,
.ndo_get_iflink = ipoib_get_iflink,
.ndo_set_vf_link_state = ipoib_set_vf_link_state,
.ndo_get_vf_config = ipoib_get_vf_config,
.ndo_get_vf_stats = ipoib_get_vf_stats,
.ndo_get_vf_guid = ipoib_get_vf_guid,
.ndo_set_vf_guid = ipoib_set_vf_guid,
.ndo_set_mac_address = ipoib_set_mac,
.ndo_get_stats64 = ipoib_get_stats,
.ndo_eth_ioctl = ipoib_ioctl,
};
static const struct net_device_ops ipoib_netdev_ops_vf = {
.ndo_init = ipoib_ndo_init,
.ndo_uninit = ipoib_ndo_uninit,
.ndo_open = ipoib_open,
.ndo_stop = ipoib_stop,
.ndo_change_mtu = ipoib_change_mtu,
.ndo_fix_features = ipoib_fix_features,
.ndo_start_xmit = ipoib_start_xmit,
.ndo_tx_timeout = ipoib_timeout,
.ndo_set_rx_mode = ipoib_set_mcast_list,
.ndo_get_iflink = ipoib_get_iflink,
.ndo_get_stats64 = ipoib_get_stats,
.ndo_eth_ioctl = ipoib_ioctl,
};
static const struct net_device_ops ipoib_netdev_default_pf = {
.ndo_init = ipoib_dev_init_default,
.ndo_uninit = ipoib_dev_uninit_default,
.ndo_open = ipoib_ib_dev_open_default,
.ndo_stop = ipoib_ib_dev_stop_default,
};
void ipoib_setup_common(struct net_device *dev)
{
dev->header_ops = &ipoib_header_ops;
dev->netdev_ops = &ipoib_netdev_default_pf;
ipoib_set_ethtool_ops(dev);
dev->watchdog_timeo = HZ;
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
dev->hard_header_len = IPOIB_HARD_LEN;
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
dev->features = (NETIF_F_VLAN_CHALLENGED |
NETIF_F_HIGHDMA);
netif_keep_dst(dev);
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
/*
* unregister_netdev always frees the netdev, we use this mode
* consistently to unify all the various unregister paths, including
* those connected to rtnl_link_ops which require it.
*/
dev->needs_free_netdev = true;
}
static void ipoib_build_priv(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
priv->dev = dev;
spin_lock_init(&priv->lock);
init_rwsem(&priv->vlan_rwsem);
mutex_init(&priv->mcast_mutex);
INIT_LIST_HEAD(&priv->path_list);
INIT_LIST_HEAD(&priv->child_intfs);
INIT_LIST_HEAD(&priv->dead_ahs);
INIT_LIST_HEAD(&priv->multicast_list);
INIT_DELAYED_WORK(&priv->mcast_task, ipoib_mcast_join_task);
INIT_WORK(&priv->carrier_on_task, ipoib_mcast_carrier_on_task);
INIT_WORK(&priv->flush_light, ipoib_ib_dev_flush_light);
INIT_WORK(&priv->flush_normal, ipoib_ib_dev_flush_normal);
INIT_WORK(&priv->flush_heavy, ipoib_ib_dev_flush_heavy);
INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task);
INIT_DELAYED_WORK(&priv->ah_reap_task, ipoib_reap_ah);
INIT_DELAYED_WORK(&priv->neigh_reap_task, ipoib_reap_neigh);
}
static struct net_device *ipoib_alloc_netdev(struct ib_device *hca, u32 port,
const char *name)
{
struct net_device *dev;
dev = rdma_alloc_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
NET_NAME_UNKNOWN, ipoib_setup_common);
if (!IS_ERR(dev) || PTR_ERR(dev) != -EOPNOTSUPP)
return dev;
dev = alloc_netdev(sizeof(struct rdma_netdev), name, NET_NAME_UNKNOWN,
ipoib_setup_common);
if (!dev)
return ERR_PTR(-ENOMEM);
return dev;
}
int ipoib_intf_init(struct ib_device *hca, u32 port, const char *name,
struct net_device *dev)
{
struct rdma_netdev *rn = netdev_priv(dev);
struct ipoib_dev_priv *priv;
int rc;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->ca = hca;
priv->port = port;
rc = rdma_init_netdev(hca, port, RDMA_NETDEV_IPOIB, name,
NET_NAME_UNKNOWN, ipoib_setup_common, dev);
if (rc) {
if (rc != -EOPNOTSUPP)
goto out;
rn->send = ipoib_send;
rn->attach_mcast = ipoib_mcast_attach;
rn->detach_mcast = ipoib_mcast_detach;
rn->hca = hca;
rc = netif_set_real_num_tx_queues(dev, 1);
if (rc)
goto out;
rc = netif_set_real_num_rx_queues(dev, 1);
if (rc)
goto out;
}
priv->rn_ops = dev->netdev_ops;
if (hca->attrs.kernel_cap_flags & IBK_VIRTUAL_FUNCTION)
dev->netdev_ops = &ipoib_netdev_ops_vf;
else
dev->netdev_ops = &ipoib_netdev_ops_pf;
rn->clnt_priv = priv;
/*
* Only the child register_netdev flows can handle priv_destructor
* being set, so we force it to NULL here and handle manually until it
* is safe to turn on.
*/
priv->next_priv_destructor = dev->priv_destructor;
dev->priv_destructor = NULL;
ipoib_build_priv(dev);
return 0;
out:
kfree(priv);
return rc;
}
struct net_device *ipoib_intf_alloc(struct ib_device *hca, u32 port,
const char *name)
{
struct net_device *dev;
int rc;
dev = ipoib_alloc_netdev(hca, port, name);
if (IS_ERR(dev))
return dev;
rc = ipoib_intf_init(hca, port, name, dev);
if (rc) {
free_netdev(dev);
return ERR_PTR(rc);
}
/*
* Upon success the caller must ensure ipoib_intf_free is called or
* register_netdevice succeed'd and priv_destructor is set to
* ipoib_intf_free.
*/
return dev;
}
void ipoib_intf_free(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
dev->priv_destructor = priv->next_priv_destructor;
if (dev->priv_destructor)
dev->priv_destructor(dev);
/*
* There are some error flows around register_netdev failing that may
* attempt to call priv_destructor twice, prevent that from happening.
*/
dev->priv_destructor = NULL;
/* unregister/destroy is very complicated. Make bugs more obvious. */
rn->clnt_priv = NULL;
kfree(priv);
}
static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct net_device *ndev = to_net_dev(dev);
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
return sysfs_emit(buf, "0x%04x\n", priv->pkey);
}
static DEVICE_ATTR_RO(pkey);
static ssize_t umcast_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct net_device *ndev = to_net_dev(dev);
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
return sysfs_emit(buf, "%d\n",
test_bit(IPOIB_FLAG_UMCAST, &priv->flags));
}
void ipoib_set_umcast(struct net_device *ndev, int umcast_val)
{
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
if (umcast_val > 0) {
set_bit(IPOIB_FLAG_UMCAST, &priv->flags);
ipoib_warn(priv, "ignoring multicast groups joined directly "
"by userspace\n");
} else
clear_bit(IPOIB_FLAG_UMCAST, &priv->flags);
}
static ssize_t umcast_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long umcast_val = simple_strtoul(buf, NULL, 0);
ipoib_set_umcast(to_net_dev(dev), umcast_val);
return count;
}
static DEVICE_ATTR_RW(umcast);
int ipoib_add_umcast_attr(struct net_device *dev)
{
return device_create_file(&dev->dev, &dev_attr_umcast);
}
static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
{
struct ipoib_dev_priv *child_priv;
struct net_device *netdev = priv->dev;
netif_addr_lock_bh(netdev);
memcpy(&priv->local_gid.global.interface_id,
&gid->global.interface_id,
sizeof(gid->global.interface_id));
dev_addr_mod(netdev, 4, (u8 *)&priv->local_gid, sizeof(priv->local_gid));
clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
netif_addr_unlock_bh(netdev);
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
down_read(&priv->vlan_rwsem);
list_for_each_entry(child_priv, &priv->child_intfs, list)
set_base_guid(child_priv, gid);
up_read(&priv->vlan_rwsem);
}
}
static int ipoib_check_lladdr(struct net_device *dev,
struct sockaddr_storage *ss)
{
union ib_gid *gid = (union ib_gid *)(ss->__data + 4);
int ret = 0;
netif_addr_lock_bh(dev);
/* Make sure the QPN, reserved and subnet prefix match the current
* lladdr, it also makes sure the lladdr is unicast.
*/
if (memcmp(dev->dev_addr, ss->__data,
4 + sizeof(gid->global.subnet_prefix)) ||
gid->global.interface_id == 0)
ret = -EINVAL;
netif_addr_unlock_bh(dev);
return ret;
}
static int ipoib_set_mac(struct net_device *dev, void *addr)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct sockaddr_storage *ss = addr;
int ret;
if (!(dev->priv_flags & IFF_LIVE_ADDR_CHANGE) && netif_running(dev))
return -EBUSY;
ret = ipoib_check_lladdr(dev, ss);
if (ret)
return ret;
set_base_guid(priv, (union ib_gid *)(ss->__data + 4));
queue_work(ipoib_workqueue, &priv->flush_light);
return 0;
}
static ssize_t create_child_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int pkey;
int ret;
if (sscanf(buf, "%i", &pkey) != 1)
return -EINVAL;
if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
return -EINVAL;
ret = ipoib_vlan_add(to_net_dev(dev), pkey);
return ret ? ret : count;
}
static DEVICE_ATTR_WO(create_child);
static ssize_t delete_child_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
int pkey;
int ret;
if (sscanf(buf, "%i", &pkey) != 1)
return -EINVAL;
if (pkey < 0 || pkey > 0xffff)
return -EINVAL;
ret = ipoib_vlan_delete(to_net_dev(dev), pkey);
return ret ? ret : count;
}
static DEVICE_ATTR_WO(delete_child);
int ipoib_add_pkey_attr(struct net_device *dev)
{
return device_create_file(&dev->dev, &dev_attr_pkey);
}
/*
* We erroneously exposed the iface's port number in the dev_id
* sysfs field long after dev_port was introduced for that purpose[1],
* and we need to stop everyone from relying on that.
* Let's overload the shower routine for the dev_id file here
* to gently bring the issue up.
*
* [1] https://www.spinics.net/lists/netdev/msg272123.html
*/
static ssize_t dev_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *ndev = to_net_dev(dev);
/*
* ndev->dev_port will be equal to 0 in old kernel prior to commit
* 9b8b2a323008 ("IB/ipoib: Use dev_port to expose network interface
* port numbers") Zero was chosen as special case for user space
* applications to fallback and query dev_id to check if it has
* different value or not.
*
* Don't print warning in such scenario.
*
* https://github.com/systemd/systemd/blob/master/src/udev/udev-builtin-net_id.c#L358
*/
if (ndev->dev_port && ndev->dev_id == ndev->dev_port)
netdev_info_once(ndev,
"\"%s\" wants to know my dev_id. Should it look at dev_port instead? See Documentation/ABI/testing/sysfs-class-net for more info.\n",
current->comm);
return sysfs_emit(buf, "%#x\n", ndev->dev_id);
}
static DEVICE_ATTR_RO(dev_id);
static int ipoib_intercept_dev_id_attr(struct net_device *dev)
{
device_remove_file(&dev->dev, &dev_attr_dev_id);
return device_create_file(&dev->dev, &dev_attr_dev_id);
}
static struct net_device *ipoib_add_port(const char *format,
struct ib_device *hca, u32 port)
{
struct rtnl_link_ops *ops = ipoib_get_link_ops();
struct rdma_netdev_alloc_params params;
struct ipoib_dev_priv *priv;
struct net_device *ndev;
int result;
ndev = ipoib_intf_alloc(hca, port, format);
if (IS_ERR(ndev)) {
pr_warn("%s, %d: ipoib_intf_alloc failed %ld\n", hca->name, port,
PTR_ERR(ndev));
return ndev;
}
priv = ipoib_priv(ndev);
INIT_IB_EVENT_HANDLER(&priv->event_handler,
priv->ca, ipoib_event);
ib_register_event_handler(&priv->event_handler);
/* call event handler to ensure pkey in sync */
queue_work(ipoib_workqueue, &priv->flush_heavy);
ndev->rtnl_link_ops = ipoib_get_link_ops();
result = register_netdev(ndev);
if (result) {
pr_warn("%s: couldn't register ipoib port %d; error %d\n",
hca->name, port, result);
ipoib_parent_unregister_pre(ndev);
ipoib_intf_free(ndev);
free_netdev(ndev);
return ERR_PTR(result);
}
if (hca->ops.rdma_netdev_get_params) {
int rc = hca->ops.rdma_netdev_get_params(hca, port,
RDMA_NETDEV_IPOIB,
¶ms);
if (!rc && ops->priv_size < params.sizeof_priv)
ops->priv_size = params.sizeof_priv;
}
/*
* We cannot set priv_destructor before register_netdev because we
* need priv to be always valid during the error flow to execute
* ipoib_parent_unregister_pre(). Instead handle it manually and only
* enter priv_destructor mode once we are completely registered.
*/
ndev->priv_destructor = ipoib_intf_free;
if (ipoib_intercept_dev_id_attr(ndev))
goto sysfs_failed;
if (ipoib_cm_add_mode_attr(ndev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(ndev))
goto sysfs_failed;
if (ipoib_add_umcast_attr(ndev))
goto sysfs_failed;
if (device_create_file(&ndev->dev, &dev_attr_create_child))
goto sysfs_failed;
if (device_create_file(&ndev->dev, &dev_attr_delete_child))
goto sysfs_failed;
return ndev;
sysfs_failed:
ipoib_parent_unregister_pre(ndev);
unregister_netdev(ndev);
return ERR_PTR(-ENOMEM);
}
static int ipoib_add_one(struct ib_device *device)
{
struct list_head *dev_list;
struct net_device *dev;
struct ipoib_dev_priv *priv;
unsigned int p;
int count = 0;
dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
if (!dev_list)
return -ENOMEM;
INIT_LIST_HEAD(dev_list);
rdma_for_each_port (device, p) {
if (!rdma_protocol_ib(device, p))
continue;
dev = ipoib_add_port("ib%d", device, p);
if (!IS_ERR(dev)) {
priv = ipoib_priv(dev);
list_add_tail(&priv->list, dev_list);
count++;
}
}
if (!count) {
kfree(dev_list);
return -EOPNOTSUPP;
}
ib_set_client_data(device, &ipoib_client, dev_list);
return 0;
}
static void ipoib_remove_one(struct ib_device *device, void *client_data)
{
struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
struct list_head *dev_list = client_data;
list_for_each_entry_safe(priv, tmp, dev_list, list) {
LIST_HEAD(head);
ipoib_parent_unregister_pre(priv->dev);
rtnl_lock();
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs,
list)
unregister_netdevice_queue(cpriv->dev, &head);
unregister_netdevice_queue(priv->dev, &head);
unregister_netdevice_many(&head);
rtnl_unlock();
}
kfree(dev_list);
}
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
static struct notifier_block ipoib_netdev_notifier = {
.notifier_call = ipoib_netdev_event,
};
#endif
static int __init ipoib_init_module(void)
{
int ret;
ipoib_recvq_size = roundup_pow_of_two(ipoib_recvq_size);
ipoib_recvq_size = min(ipoib_recvq_size, IPOIB_MAX_QUEUE_SIZE);
ipoib_recvq_size = max(ipoib_recvq_size, IPOIB_MIN_QUEUE_SIZE);
ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
ipoib_sendq_size = max3(ipoib_sendq_size, 2 * MAX_SEND_CQE, IPOIB_MIN_QUEUE_SIZE);
#ifdef CONFIG_INFINIBAND_IPOIB_CM
ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
ipoib_max_conn_qp = max(ipoib_max_conn_qp, 0);
#endif
/*
* When copying small received packets, we only copy from the
* linear data part of the SKB, so we rely on this condition.
*/
BUILD_BUG_ON(IPOIB_CM_COPYBREAK > IPOIB_CM_HEAD_SIZE);
ipoib_register_debugfs();
/*
* We create a global workqueue here that is used for all flush
* operations. However, if you attempt to flush a workqueue
* from a task on that same workqueue, it deadlocks the system.
* We want to be able to flush the tasks associated with a
* specific net device, so we also create a workqueue for each
* netdevice. We queue up the tasks for that device only on
* its private workqueue, and we only queue up flush events
* on our global flush workqueue. This avoids the deadlocks.
*/
ipoib_workqueue = alloc_ordered_workqueue("ipoib_flush", 0);
if (!ipoib_workqueue) {
ret = -ENOMEM;
goto err_fs;
}
ib_sa_register_client(&ipoib_sa_client);
ret = ib_register_client(&ipoib_client);
if (ret)
goto err_sa;
ret = ipoib_netlink_init();
if (ret)
goto err_client;
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
register_netdevice_notifier(&ipoib_netdev_notifier);
#endif
return 0;
err_client:
ib_unregister_client(&ipoib_client);
err_sa:
ib_sa_unregister_client(&ipoib_sa_client);
destroy_workqueue(ipoib_workqueue);
err_fs:
ipoib_unregister_debugfs();
return ret;
}
static void __exit ipoib_cleanup_module(void)
{
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
unregister_netdevice_notifier(&ipoib_netdev_notifier);
#endif
ipoib_netlink_fini();
ib_unregister_client(&ipoib_client);
ib_sa_unregister_client(&ipoib_sa_client);
ipoib_unregister_debugfs();
destroy_workqueue(ipoib_workqueue);
}
module_init(ipoib_init_module);
module_exit(ipoib_cleanup_module);
| linux-master | drivers/infiniband/ulp/ipoib/ipoib_main.c |
/*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include "ipoib.h"
struct ipoib_stats {
char stat_string[ETH_GSTRING_LEN];
int stat_offset;
};
#define IPOIB_NETDEV_STAT(m) { \
.stat_string = #m, \
.stat_offset = offsetof(struct rtnl_link_stats64, m) }
static const struct ipoib_stats ipoib_gstrings_stats[] = {
IPOIB_NETDEV_STAT(rx_packets),
IPOIB_NETDEV_STAT(tx_packets),
IPOIB_NETDEV_STAT(rx_bytes),
IPOIB_NETDEV_STAT(tx_bytes),
IPOIB_NETDEV_STAT(tx_errors),
IPOIB_NETDEV_STAT(rx_dropped),
IPOIB_NETDEV_STAT(tx_dropped),
IPOIB_NETDEV_STAT(multicast),
};
#define IPOIB_GLOBAL_STATS_LEN ARRAY_SIZE(ipoib_gstrings_stats)
static void ipoib_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ipoib_dev_priv *priv = ipoib_priv(netdev);
ib_get_device_fw_str(priv->ca, drvinfo->fw_version);
strscpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
sizeof(drvinfo->bus_info));
strscpy(drvinfo->driver, "ib_ipoib", sizeof(drvinfo->driver));
}
static int ipoib_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
return 0;
}
static int ipoib_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret;
/*
* These values are saved in the private data and returned
* when ipoib_get_coalesce() is called
*/
if (coal->rx_coalesce_usecs > 0xffff ||
coal->rx_max_coalesced_frames > 0xffff)
return -EINVAL;
ret = rdma_set_cq_moderation(priv->recv_cq,
coal->rx_max_coalesced_frames,
coal->rx_coalesce_usecs);
if (ret && ret != -EOPNOTSUPP) {
ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
return ret;
}
priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs;
priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
return 0;
}
static void ipoib_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats __always_unused *stats,
u64 *data)
{
int i;
struct net_device_stats *net_stats = &dev->stats;
u8 *p = (u8 *)net_stats;
for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++)
data[i] = *(u64 *)(p + ipoib_gstrings_stats[i].stat_offset);
}
static void ipoib_get_strings(struct net_device __always_unused *dev,
u32 stringset, u8 *data)
{
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < IPOIB_GLOBAL_STATS_LEN; i++) {
memcpy(p, ipoib_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
break;
default:
break;
}
}
static int ipoib_get_sset_count(struct net_device __always_unused *dev,
int sset)
{
switch (sset) {
case ETH_SS_STATS:
return IPOIB_GLOBAL_STATS_LEN;
default:
break;
}
return -EOPNOTSUPP;
}
/* Return lane speed in unit of 1e6 bit/sec */
static inline int ib_speed_enum_to_int(int speed)
{
switch (speed) {
case IB_SPEED_SDR:
return SPEED_2500;
case IB_SPEED_DDR:
return SPEED_5000;
case IB_SPEED_QDR:
case IB_SPEED_FDR10:
return SPEED_10000;
case IB_SPEED_FDR:
return SPEED_14000;
case IB_SPEED_EDR:
return SPEED_25000;
case IB_SPEED_HDR:
return SPEED_50000;
case IB_SPEED_NDR:
return SPEED_100000;
}
return SPEED_UNKNOWN;
}
static int ipoib_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct ipoib_dev_priv *priv = ipoib_priv(netdev);
struct ib_port_attr attr;
int ret, speed, width;
if (!netif_carrier_ok(netdev)) {
cmd->base.speed = SPEED_UNKNOWN;
cmd->base.duplex = DUPLEX_UNKNOWN;
return 0;
}
ret = ib_query_port(priv->ca, priv->port, &attr);
if (ret < 0)
return -EINVAL;
speed = ib_speed_enum_to_int(attr.active_speed);
width = ib_width_enum_to_int(attr.active_width);
if (speed < 0 || width < 0)
return -EINVAL;
/* Except the following are set, the other members of
* the struct ethtool_link_settings are initialized to
* zero in the function __ethtool_get_link_ksettings.
*/
cmd->base.speed = speed * width;
cmd->base.duplex = DUPLEX_FULL;
cmd->base.phy_address = 0xFF;
cmd->base.autoneg = AUTONEG_ENABLE;
cmd->base.port = PORT_OTHER;
return 0;
}
static const struct ethtool_ops ipoib_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
ETHTOOL_COALESCE_RX_MAX_FRAMES,
.get_link_ksettings = ipoib_get_link_ksettings,
.get_drvinfo = ipoib_get_drvinfo,
.get_coalesce = ipoib_get_coalesce,
.set_coalesce = ipoib_set_coalesce,
.get_strings = ipoib_get_strings,
.get_ethtool_stats = ipoib_get_ethtool_stats,
.get_sset_count = ipoib_get_sset_count,
.get_link = ethtool_op_get_link,
};
void ipoib_set_ethtool_ops(struct net_device *dev)
{
dev->ethtool_ops = &ipoib_ethtool_ops;
}
| linux-master | drivers/infiniband/ulp/ipoib/ipoib_ethtool.c |
/*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/delay.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <rdma/ib_cache.h>
#include "ipoib.h"
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
static int data_debug_level;
module_param(data_debug_level, int, 0644);
MODULE_PARM_DESC(data_debug_level,
"Enable data path debug tracing if > 0");
#endif
struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
struct ib_pd *pd, struct rdma_ah_attr *attr)
{
struct ipoib_ah *ah;
struct ib_ah *vah;
ah = kmalloc(sizeof(*ah), GFP_KERNEL);
if (!ah)
return ERR_PTR(-ENOMEM);
ah->dev = dev;
ah->last_send = 0;
kref_init(&ah->ref);
vah = rdma_create_ah(pd, attr, RDMA_CREATE_AH_SLEEPABLE);
if (IS_ERR(vah)) {
kfree(ah);
ah = (struct ipoib_ah *)vah;
} else {
ah->ah = vah;
ipoib_dbg(ipoib_priv(dev), "Created ah %p\n", ah->ah);
}
return ah;
}
void ipoib_free_ah(struct kref *kref)
{
struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
struct ipoib_dev_priv *priv = ipoib_priv(ah->dev);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
list_add_tail(&ah->list, &priv->dead_ahs);
spin_unlock_irqrestore(&priv->lock, flags);
}
static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
u64 mapping[IPOIB_UD_RX_SG])
{
ib_dma_unmap_single(priv->ca, mapping[0],
IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
DMA_FROM_DEVICE);
}
static int ipoib_ib_post_receive(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret;
priv->rx_wr.wr_id = id | IPOIB_OP_RECV;
priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
ret = ib_post_recv(priv->qp, &priv->rx_wr, NULL);
if (unlikely(ret)) {
ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
dev_kfree_skb_any(priv->rx_ring[id].skb);
priv->rx_ring[id].skb = NULL;
}
return ret;
}
static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct sk_buff *skb;
int buf_size;
u64 *mapping;
buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
skb = dev_alloc_skb(buf_size + IPOIB_HARD_LEN);
if (unlikely(!skb))
return NULL;
/*
* the IP header will be at IPOIP_HARD_LEN + IB_GRH_BYTES, that is
* 64 bytes aligned
*/
skb_reserve(skb, sizeof(struct ipoib_pseudo_header));
mapping = priv->rx_ring[id].mapping;
mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
goto error;
priv->rx_ring[id].skb = skb;
return skb;
error:
dev_kfree_skb_any(skb);
return NULL;
}
static int ipoib_ib_post_receives(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int i;
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_alloc_rx_skb(dev, i)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
return -ENOMEM;
}
if (ipoib_ib_post_receive(dev, i)) {
ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
return -EIO;
}
}
return 0;
}
static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
struct sk_buff *skb;
u64 mapping[IPOIB_UD_RX_SG];
union ib_gid *dgid;
union ib_gid *sgid;
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_recvq_size)) {
ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
wr_id, ipoib_recvq_size);
return;
}
skb = priv->rx_ring[wr_id].skb;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
if (wc->status != IB_WC_WR_FLUSH_ERR)
ipoib_warn(priv,
"failed recv event (status=%d, wrid=%d vend_err %#x)\n",
wc->status, wr_id, wc->vendor_err);
ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
dev_kfree_skb_any(skb);
priv->rx_ring[wr_id].skb = NULL;
return;
}
memcpy(mapping, priv->rx_ring[wr_id].mapping,
IPOIB_UD_RX_SG * sizeof(*mapping));
/*
* If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer.
*/
if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
++dev->stats.rx_dropped;
goto repost;
}
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
ipoib_ud_dma_unmap_rx(priv, mapping);
skb_put(skb, wc->byte_len);
/* First byte of dgid signals multicast when 0xff */
dgid = &((struct ib_grh *)skb->data)->dgid;
if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff)
skb->pkt_type = PACKET_HOST;
else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0)
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
sgid = &((struct ib_grh *)skb->data)->sgid;
/*
* Drop packets that this interface sent, ie multicast packets
* that the HCA has replicated.
*/
if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) {
int need_repost = 1;
if ((wc->wc_flags & IB_WC_GRH) &&
sgid->global.interface_id != priv->local_gid.global.interface_id)
need_repost = 0;
if (need_repost) {
dev_kfree_skb_any(skb);
goto repost;
}
}
skb_pull(skb, IB_GRH_BYTES);
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
skb_add_pseudo_hdr(skb);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
if (skb->pkt_type == PACKET_MULTICAST)
dev->stats.multicast++;
skb->dev = dev;
if ((dev->features & NETIF_F_RXCSUM) &&
likely(wc->wc_flags & IB_WC_IP_CSUM_OK))
skb->ip_summed = CHECKSUM_UNNECESSARY;
napi_gro_receive(&priv->recv_napi, skb);
repost:
if (unlikely(ipoib_ib_post_receive(dev, wr_id)))
ipoib_warn(priv, "ipoib_ib_post_receive failed "
"for buf %d\n", wr_id);
}
int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
int i;
int off;
if (skb_headlen(skb)) {
mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
return -EIO;
off = 1;
} else
off = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping[i + off] = ib_dma_map_page(ca,
skb_frag_page(frag),
skb_frag_off(frag),
skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
goto partial_error;
}
return 0;
partial_error:
for (; i > 0; --i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
}
if (off)
ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
return -EIO;
}
void ipoib_dma_unmap_tx(struct ipoib_dev_priv *priv,
struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
int i;
int off;
if (skb_headlen(skb)) {
ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
DMA_TO_DEVICE);
off = 1;
} else
off = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ib_dma_unmap_page(priv->ca, mapping[i + off],
skb_frag_size(frag), DMA_TO_DEVICE);
}
}
/*
* As the result of a completion error the QP Can be transferred to SQE states.
* The function checks if the (send)QP is in SQE state and
* moves it back to RTS state, that in order to have it functional again.
*/
static void ipoib_qp_state_validate_work(struct work_struct *work)
{
struct ipoib_qp_state_validate *qp_work =
container_of(work, struct ipoib_qp_state_validate, work);
struct ipoib_dev_priv *priv = qp_work->priv;
struct ib_qp_attr qp_attr;
struct ib_qp_init_attr query_init_attr;
int ret;
ret = ib_query_qp(priv->qp, &qp_attr, IB_QP_STATE, &query_init_attr);
if (ret) {
ipoib_warn(priv, "%s: Failed to query QP ret: %d\n",
__func__, ret);
goto free_res;
}
pr_info("%s: QP: 0x%x is in state: %d\n",
__func__, priv->qp->qp_num, qp_attr.qp_state);
/* currently support only in SQE->RTS transition*/
if (qp_attr.qp_state == IB_QPS_SQE) {
qp_attr.qp_state = IB_QPS_RTS;
ret = ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE);
if (ret) {
pr_warn("failed(%d) modify QP:0x%x SQE->RTS\n",
ret, priv->qp->qp_num);
goto free_res;
}
pr_info("%s: QP: 0x%x moved from IB_QPS_SQE to IB_QPS_RTS\n",
__func__, priv->qp->qp_num);
} else {
pr_warn("QP (%d) will stay in state: %d\n",
priv->qp->qp_num, qp_attr.qp_state);
}
free_res:
kfree(qp_work);
}
static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
unsigned int wr_id = wc->wr_id;
struct ipoib_tx_buf *tx_req;
ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_sendq_size)) {
ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",
wr_id, ipoib_sendq_size);
return;
}
tx_req = &priv->tx_ring[wr_id];
ipoib_dma_unmap_tx(priv, tx_req);
++dev->stats.tx_packets;
dev->stats.tx_bytes += tx_req->skb->len;
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
++priv->global_tx_tail;
if (unlikely(netif_queue_stopped(dev) &&
((priv->global_tx_head - priv->global_tx_tail) <=
ipoib_sendq_size >> 1) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)))
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_qp_state_validate *qp_work;
ipoib_warn(priv,
"failed send event (status=%d, wrid=%d vend_err %#x)\n",
wc->status, wr_id, wc->vendor_err);
qp_work = kzalloc(sizeof(*qp_work), GFP_ATOMIC);
if (!qp_work)
return;
INIT_WORK(&qp_work->work, ipoib_qp_state_validate_work);
qp_work->priv = priv;
queue_work(priv->wq, &qp_work->work);
}
}
static int poll_tx(struct ipoib_dev_priv *priv)
{
int n, i;
struct ib_wc *wc;
n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
for (i = 0; i < n; ++i) {
wc = priv->send_wc + i;
if (wc->wr_id & IPOIB_OP_CM)
ipoib_cm_handle_tx_wc(priv->dev, priv->send_wc + i);
else
ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
}
return n == MAX_SEND_CQE;
}
int ipoib_rx_poll(struct napi_struct *napi, int budget)
{
struct ipoib_dev_priv *priv =
container_of(napi, struct ipoib_dev_priv, recv_napi);
struct net_device *dev = priv->dev;
int done;
int t;
int n, i;
done = 0;
poll_more:
while (done < budget) {
int max = (budget - done);
t = min(IPOIB_NUM_WC, max);
n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
for (i = 0; i < n; i++) {
struct ib_wc *wc = priv->ibwc + i;
if (wc->wr_id & IPOIB_OP_RECV) {
++done;
if (wc->wr_id & IPOIB_OP_CM)
ipoib_cm_handle_rx_wc(dev, wc);
else
ipoib_ib_handle_rx_wc(dev, wc);
} else {
pr_warn("%s: Got unexpected wqe id\n", __func__);
}
}
if (n != t)
break;
}
if (done < budget) {
napi_complete(napi);
if (unlikely(ib_req_notify_cq(priv->recv_cq,
IB_CQ_NEXT_COMP |
IB_CQ_REPORT_MISSED_EVENTS)) &&
napi_reschedule(napi))
goto poll_more;
}
return done;
}
int ipoib_tx_poll(struct napi_struct *napi, int budget)
{
struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv,
send_napi);
struct net_device *dev = priv->dev;
int n, i;
struct ib_wc *wc;
poll_more:
n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
for (i = 0; i < n; i++) {
wc = priv->send_wc + i;
if (wc->wr_id & IPOIB_OP_CM)
ipoib_cm_handle_tx_wc(dev, wc);
else
ipoib_ib_handle_tx_wc(dev, wc);
}
if (n < budget) {
napi_complete(napi);
if (unlikely(ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
IB_CQ_REPORT_MISSED_EVENTS)) &&
napi_reschedule(napi))
goto poll_more;
}
return n < 0 ? 0 : n;
}
void ipoib_ib_rx_completion(struct ib_cq *cq, void *ctx_ptr)
{
struct ipoib_dev_priv *priv = ctx_ptr;
napi_schedule(&priv->recv_napi);
}
void ipoib_ib_tx_completion(struct ib_cq *cq, void *ctx_ptr)
{
struct ipoib_dev_priv *priv = ctx_ptr;
napi_schedule(&priv->send_napi);
}
static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id,
struct ib_ah *address, u32 dqpn,
struct ipoib_tx_buf *tx_req,
void *head, int hlen)
{
struct sk_buff *skb = tx_req->skb;
ipoib_build_sge(priv, tx_req);
priv->tx_wr.wr.wr_id = wr_id;
priv->tx_wr.remote_qpn = dqpn;
priv->tx_wr.ah = address;
if (head) {
priv->tx_wr.mss = skb_shinfo(skb)->gso_size;
priv->tx_wr.header = head;
priv->tx_wr.hlen = hlen;
priv->tx_wr.wr.opcode = IB_WR_LSO;
} else
priv->tx_wr.wr.opcode = IB_WR_SEND;
return ib_post_send(priv->qp, &priv->tx_wr.wr, NULL);
}
int ipoib_send(struct net_device *dev, struct sk_buff *skb,
struct ib_ah *address, u32 dqpn)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ipoib_tx_buf *tx_req;
int hlen, rc;
void *phead;
unsigned int usable_sge = priv->max_send_sge - !!skb_headlen(skb);
if (skb_is_gso(skb)) {
hlen = skb_tcp_all_headers(skb);
phead = skb->data;
if (unlikely(!skb_pull(skb, hlen))) {
ipoib_warn(priv, "linear data too small\n");
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return -1;
}
} else {
if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN);
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu);
return -1;
}
phead = NULL;
hlen = 0;
}
if (skb_shinfo(skb)->nr_frags > usable_sge) {
if (skb_linearize(skb) < 0) {
ipoib_warn(priv, "skb could not be linearized\n");
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return -1;
}
/* Does skb_linearize return ok without reducing nr_frags? */
if (skb_shinfo(skb)->nr_frags > usable_sge) {
ipoib_warn(priv, "too many frags after skb linearize\n");
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return -1;
}
}
ipoib_dbg_data(priv,
"sending packet, length=%d address=%p dqpn=0x%06x\n",
skb->len, address, dqpn);
/*
* We put the skb into the tx_ring _before_ we call post_send()
* because it's entirely possible that the completion handler will
* run before we execute anything after the post_send(). That
* means we have to make sure everything is properly recorded and
* our state is consistent before we call post_send().
*/
tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb;
if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return -1;
}
if (skb->ip_summed == CHECKSUM_PARTIAL)
priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
else
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
/* increase the tx_head after send success, but use it for queue state */
if ((priv->global_tx_head - priv->global_tx_tail) ==
ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
netif_stop_queue(dev);
}
skb_orphan(skb);
skb_dst_drop(skb);
if (netif_queue_stopped(dev))
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP |
IB_CQ_REPORT_MISSED_EVENTS) < 0)
ipoib_warn(priv, "request notify on send CQ failed\n");
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address, dqpn, tx_req, phead, hlen);
if (unlikely(rc)) {
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb);
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
rc = 0;
} else {
netif_trans_update(dev);
rc = priv->tx_head;
++priv->tx_head;
++priv->global_tx_head;
}
return rc;
}
static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv)
{
struct ipoib_ah *ah, *tah;
unsigned long flags;
netif_tx_lock_bh(priv->dev);
spin_lock_irqsave(&priv->lock, flags);
list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
if ((int) priv->tx_tail - (int) ah->last_send >= 0) {
list_del(&ah->list);
rdma_destroy_ah(ah->ah, 0);
kfree(ah);
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(priv->dev);
}
void ipoib_reap_ah(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
ipoib_reap_dead_ahs(priv);
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
}
static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv)
{
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
queue_delayed_work(priv->wq, &priv->ah_reap_task,
round_jiffies_relative(HZ));
}
static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv)
{
set_bit(IPOIB_STOP_REAPER, &priv->flags);
cancel_delayed_work(&priv->ah_reap_task);
/*
* After ipoib_stop_ah_reaper() we always go through
* ipoib_reap_dead_ahs() which ensures the work is really stopped and
* does a final flush out of the dead_ah's list
*/
}
static int recvs_pending(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int pending = 0;
int i;
for (i = 0; i < ipoib_recvq_size; ++i)
if (priv->rx_ring[i].skb)
++pending;
return pending;
}
static void check_qp_movement_and_print(struct ipoib_dev_priv *priv,
struct ib_qp *qp,
enum ib_qp_state new_state)
{
struct ib_qp_attr qp_attr;
struct ib_qp_init_attr query_init_attr;
int ret;
ret = ib_query_qp(qp, &qp_attr, IB_QP_STATE, &query_init_attr);
if (ret) {
ipoib_warn(priv, "%s: Failed to query QP\n", __func__);
return;
}
/* print according to the new-state and the previous state.*/
if (new_state == IB_QPS_ERR && qp_attr.qp_state == IB_QPS_RESET)
ipoib_dbg(priv, "Failed modify QP, IB_QPS_RESET to IB_QPS_ERR, acceptable\n");
else
ipoib_warn(priv, "Failed to modify QP to state: %d from state: %d\n",
new_state, qp_attr.qp_state);
}
static void ipoib_napi_enable(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
napi_enable(&priv->recv_napi);
napi_enable(&priv->send_napi);
}
static void ipoib_napi_disable(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
napi_disable(&priv->recv_napi);
napi_disable(&priv->send_napi);
}
int ipoib_ib_dev_stop_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct ib_qp_attr qp_attr;
unsigned long begin;
struct ipoib_tx_buf *tx_req;
int i;
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_napi_disable(dev);
ipoib_cm_dev_stop(dev);
/*
* Move our QP to the error state and then reinitialize in
* when all work requests have completed or have been flushed.
*/
qp_attr.qp_state = IB_QPS_ERR;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
check_qp_movement_and_print(priv, priv->qp, IB_QPS_ERR);
/* Wait for all sends and receives to complete */
begin = jiffies;
while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
if (time_after(jiffies, begin + 5 * HZ)) {
ipoib_warn(priv,
"timing out; %d sends %d receives not completed\n",
priv->tx_head - priv->tx_tail,
recvs_pending(dev));
/*
* assume the HW is wedged and just free up
* all our pending work requests.
*/
while ((int)priv->tx_tail - (int)priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail &
(ipoib_sendq_size - 1)];
ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
++priv->global_tx_tail;
}
for (i = 0; i < ipoib_recvq_size; ++i) {
struct ipoib_rx_buf *rx_req;
rx_req = &priv->rx_ring[i];
if (!rx_req->skb)
continue;
ipoib_ud_dma_unmap_rx(priv,
priv->rx_ring[i].mapping);
dev_kfree_skb_any(rx_req->skb);
rx_req->skb = NULL;
}
goto timeout;
}
ipoib_drain_cq(dev);
usleep_range(1000, 2000);
}
ipoib_dbg(priv, "All sends and receives done.\n");
timeout:
qp_attr.qp_state = IB_QPS_RESET;
if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
ipoib_warn(priv, "Failed to modify QP to RESET state\n");
ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
return 0;
}
int ipoib_ib_dev_open_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int ret;
ret = ipoib_init_qp(dev);
if (ret) {
ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret);
return -1;
}
ret = ipoib_ib_post_receives(dev);
if (ret) {
ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
goto out;
}
ret = ipoib_cm_dev_open(dev);
if (ret) {
ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret);
goto out;
}
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_napi_enable(dev);
return 0;
out:
return -1;
}
int ipoib_ib_dev_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_pkey_dev_check_presence(dev);
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
ipoib_warn(priv, "P_Key 0x%04x is %s\n", priv->pkey,
(!(priv->pkey & 0x7fff) ? "Invalid" : "not found"));
return -1;
}
ipoib_start_ah_reaper(priv);
if (priv->rn_ops->ndo_open(dev)) {
pr_warn("%s: Failed to open dev\n", dev->name);
goto dev_stop;
}
set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
return 0;
dev_stop:
ipoib_stop_ah_reaper(priv);
return -1;
}
void ipoib_ib_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
priv->rn_ops->ndo_stop(dev);
clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
ipoib_stop_ah_reaper(priv);
}
void ipoib_pkey_dev_check_presence(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev);
if (!(priv->pkey & 0x7fff) ||
ib_find_pkey(priv->ca, priv->port, priv->pkey,
&priv->pkey_index)) {
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
} else {
if (rn->set_id)
rn->set_id(dev, priv->pkey_index);
set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
}
}
void ipoib_ib_dev_up(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_pkey_dev_check_presence(dev);
if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
ipoib_dbg(priv, "PKEY is not assigned.\n");
return;
}
set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ipoib_mcast_start_thread(dev);
}
void ipoib_ib_dev_down(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg(priv, "downing ib_dev\n");
clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
netif_carrier_off(dev);
ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
ipoib_flush_paths(dev);
}
void ipoib_drain_cq(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int i, n;
/*
* We call completion handling routines that expect to be
* called from the BH-disabled NAPI poll context, so disable
* BHs here too.
*/
local_bh_disable();
do {
n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
for (i = 0; i < n; ++i) {
/*
* Convert any successful completions to flush
* errors to avoid passing packets up the
* stack after bringing the device down.
*/
if (priv->ibwc[i].status == IB_WC_SUCCESS)
priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR;
if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) {
if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
else
ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
} else {
pr_warn("%s: Got unexpected wqe id\n", __func__);
}
}
} while (n == IPOIB_NUM_WC);
while (poll_tx(priv))
; /* nothing */
local_bh_enable();
}
/*
* Takes whatever value which is in pkey index 0 and updates priv->pkey
* returns 0 if the pkey value was changed.
*/
static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
{
int result;
u16 prev_pkey;
prev_pkey = priv->pkey;
result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
if (result) {
ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
priv->port, result);
return result;
}
priv->pkey |= 0x8000;
if (prev_pkey != priv->pkey) {
ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
prev_pkey, priv->pkey);
/*
* Update the pkey in the broadcast address, while making sure to set
* the full membership bit, so that we join the right broadcast group.
*/
priv->dev->broadcast[8] = priv->pkey >> 8;
priv->dev->broadcast[9] = priv->pkey & 0xff;
return 0;
}
return 1;
}
/*
* returns 0 if pkey value was found in a different slot.
*/
static inline int update_child_pkey(struct ipoib_dev_priv *priv)
{
u16 old_index = priv->pkey_index;
priv->pkey_index = 0;
ipoib_pkey_dev_check_presence(priv->dev);
if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
(old_index == priv->pkey_index))
return 1;
return 0;
}
/*
* returns true if the device address of the ipoib interface has changed and the
* new address is a valid one (i.e in the gid table), return false otherwise.
*/
static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
{
union ib_gid search_gid;
union ib_gid gid0;
int err;
u16 index;
u32 port;
bool ret = false;
if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
return false;
netif_addr_lock_bh(priv->dev);
/* The subnet prefix may have changed, update it now so we won't have
* to do it later
*/
priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
dev_addr_mod(priv->dev, 4, (u8 *)&gid0.global.subnet_prefix,
sizeof(gid0.global.subnet_prefix));
search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
search_gid.global.interface_id = priv->local_gid.global.interface_id;
netif_addr_unlock_bh(priv->dev);
err = ib_find_gid(priv->ca, &search_gid, &port, &index);
netif_addr_lock_bh(priv->dev);
if (search_gid.global.interface_id !=
priv->local_gid.global.interface_id)
/* There was a change while we were looking up the gid, bail
* here and let the next work sort this out
*/
goto out;
/* The next section of code needs some background:
* Per IB spec the port GUID can't change if the HCA is powered on.
* port GUID is the basis for GID at index 0 which is the basis for
* the default device address of a ipoib interface.
*
* so it seems the flow should be:
* if user_changed_dev_addr && gid in gid tbl
* set bit dev_addr_set
* return true
* else
* return false
*
* The issue is that there are devices that don't follow the spec,
* they change the port GUID when the HCA is powered, so in order
* not to break userspace applications, We need to check if the
* user wanted to control the device address and we assume that
* if he sets the device address back to be based on GID index 0,
* he no longer wishs to control it.
*
* If the user doesn't control the device address,
* IPOIB_FLAG_DEV_ADDR_SET is set and ib_find_gid failed it means
* the port GUID has changed and GID at index 0 has changed
* so we need to change priv->local_gid and priv->dev->dev_addr
* to reflect the new GID.
*/
if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) {
if (!err && port == priv->port) {
set_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
if (index == 0)
clear_bit(IPOIB_FLAG_DEV_ADDR_CTRL,
&priv->flags);
else
set_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags);
ret = true;
} else {
ret = false;
}
} else {
if (!err && port == priv->port) {
ret = true;
} else {
if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
memcpy(&priv->local_gid, &gid0,
sizeof(priv->local_gid));
dev_addr_mod(priv->dev, 4, (u8 *)&gid0,
sizeof(priv->local_gid));
ret = true;
}
}
}
out:
netif_addr_unlock_bh(priv->dev);
return ret;
}
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
enum ipoib_flush_level level,
int nesting)
{
struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev;
int result;
down_read_nested(&priv->vlan_rwsem, nesting);
/*
* Flush any child interfaces too -- they might be up even if
* the parent is down.
*/
list_for_each_entry(cpriv, &priv->child_intfs, list)
__ipoib_ib_dev_flush(cpriv, level, nesting + 1);
up_read(&priv->vlan_rwsem);
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags) &&
level != IPOIB_FLUSH_HEAVY) {
/* Make sure the dev_addr is set even if not flushing */
if (level == IPOIB_FLUSH_LIGHT)
ipoib_dev_addr_changed_valid(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
return;
}
if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
/* interface is down. update pkey and leave. */
if (level == IPOIB_FLUSH_HEAVY) {
if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
update_parent_pkey(priv);
else
update_child_pkey(priv);
} else if (level == IPOIB_FLUSH_LIGHT)
ipoib_dev_addr_changed_valid(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n");
return;
}
if (level == IPOIB_FLUSH_HEAVY) {
/* child devices chase their origin pkey value, while non-child
* (parent) devices should always takes what present in pkey index 0
*/
if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
result = update_child_pkey(priv);
if (result) {
/* restart QP only if P_Key index is changed */
ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
return;
}
} else {
result = update_parent_pkey(priv);
/* restart QP only if P_Key value changed */
if (result) {
ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
return;
}
}
}
if (level == IPOIB_FLUSH_LIGHT) {
int oper_up;
ipoib_mark_paths_invalid(dev);
/* Set IPoIB operation as down to prevent races between:
* the flush flow which leaves MCG and on the fly joins
* which can happen during that time. mcast restart task
* should deal with join requests we missed.
*/
oper_up = test_and_clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ipoib_mcast_dev_flush(dev);
if (oper_up)
set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
ipoib_reap_dead_ahs(priv);
}
if (level >= IPOIB_FLUSH_NORMAL)
ipoib_ib_dev_down(dev);
if (level == IPOIB_FLUSH_HEAVY) {
if (test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags))
ipoib_ib_dev_stop(dev);
if (ipoib_ib_dev_open(dev))
return;
if (netif_queue_stopped(dev))
netif_start_queue(dev);
}
/*
* The device could have been brought down between the start and when
* we get here, don't bring it back up if it's not configured up
*/
if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) {
if (level >= IPOIB_FLUSH_NORMAL)
ipoib_ib_dev_up(dev);
if (ipoib_dev_addr_changed_valid(priv))
ipoib_mcast_restart_task(&priv->restart_task);
}
}
void ipoib_ib_dev_flush_light(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_light);
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT, 0);
}
void ipoib_ib_dev_flush_normal(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_normal);
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL, 0);
}
void ipoib_ib_dev_flush_heavy(struct work_struct *work)
{
struct ipoib_dev_priv *priv =
container_of(work, struct ipoib_dev_priv, flush_heavy);
rtnl_lock();
__ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY, 0);
rtnl_unlock();
}
void ipoib_ib_dev_cleanup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
ipoib_dbg(priv, "cleaning up ib_dev\n");
/*
* We must make sure there are no more (path) completions
* that may wish to touch priv fields that are no longer valid
*/
ipoib_flush_paths(dev);
ipoib_mcast_stop_thread(dev);
ipoib_mcast_dev_flush(dev);
/*
* All of our ah references aren't free until after
* ipoib_mcast_dev_flush(), ipoib_flush_paths, and
* the neighbor garbage collection is stopped and reaped.
* That should all be done now, so make a final ah flush.
*/
ipoib_reap_dead_ahs(priv);
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
priv->rn_ops->ndo_uninit(dev);
if (priv->pd) {
ib_dealloc_pd(priv->pd);
priv->pd = NULL;
}
}
| linux-master | drivers/infiniband/ulp/ipoib/ipoib_ib.c |
/*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/err.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
struct file_operations;
#include <linux/debugfs.h>
#include <linux/export.h>
#include "ipoib.h"
static struct dentry *ipoib_root;
static void format_gid(union ib_gid *gid, char *buf)
{
int i, n;
for (n = 0, i = 0; i < 8; ++i) {
n += sprintf(buf + n, "%x",
be16_to_cpu(((__be16 *) gid->raw)[i]));
if (i < 7)
buf[n++] = ':';
}
}
static void *ipoib_mcg_seq_start(struct seq_file *file, loff_t *pos)
{
struct ipoib_mcast_iter *iter;
loff_t n = *pos;
iter = ipoib_mcast_iter_init(file->private);
if (!iter)
return NULL;
while (n--) {
if (ipoib_mcast_iter_next(iter)) {
kfree(iter);
return NULL;
}
}
return iter;
}
static void *ipoib_mcg_seq_next(struct seq_file *file, void *iter_ptr,
loff_t *pos)
{
struct ipoib_mcast_iter *iter = iter_ptr;
(*pos)++;
if (ipoib_mcast_iter_next(iter)) {
kfree(iter);
return NULL;
}
return iter;
}
static void ipoib_mcg_seq_stop(struct seq_file *file, void *iter_ptr)
{
/* nothing for now */
}
static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
{
struct ipoib_mcast_iter *iter = iter_ptr;
char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
union ib_gid mgid;
unsigned long created;
unsigned int queuelen, complete, send_only;
if (!iter)
return 0;
ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen,
&complete, &send_only);
format_gid(&mgid, gid_buf);
seq_printf(file,
"GID: %s\n"
" created: %10ld\n"
" queuelen: %9d\n"
" complete: %9s\n"
" send_only: %8s\n"
"\n",
gid_buf, created, queuelen,
complete ? "yes" : "no",
send_only ? "yes" : "no");
return 0;
}
static const struct seq_operations ipoib_mcg_sops = {
.start = ipoib_mcg_seq_start,
.next = ipoib_mcg_seq_next,
.stop = ipoib_mcg_seq_stop,
.show = ipoib_mcg_seq_show,
};
DEFINE_SEQ_ATTRIBUTE(ipoib_mcg);
static void *ipoib_path_seq_start(struct seq_file *file, loff_t *pos)
{
struct ipoib_path_iter *iter;
loff_t n = *pos;
iter = ipoib_path_iter_init(file->private);
if (!iter)
return NULL;
while (n--) {
if (ipoib_path_iter_next(iter)) {
kfree(iter);
return NULL;
}
}
return iter;
}
static void *ipoib_path_seq_next(struct seq_file *file, void *iter_ptr,
loff_t *pos)
{
struct ipoib_path_iter *iter = iter_ptr;
(*pos)++;
if (ipoib_path_iter_next(iter)) {
kfree(iter);
return NULL;
}
return iter;
}
static void ipoib_path_seq_stop(struct seq_file *file, void *iter_ptr)
{
/* nothing for now */
}
static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
{
struct ipoib_path_iter *iter = iter_ptr;
char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
struct ipoib_path path;
int rate;
if (!iter)
return 0;
ipoib_path_iter_read(iter, &path);
format_gid(&path.pathrec.dgid, gid_buf);
seq_printf(file,
"GID: %s\n"
" complete: %6s\n",
gid_buf, sa_path_get_dlid(&path.pathrec) ? "yes" : "no");
if (sa_path_get_dlid(&path.pathrec)) {
rate = ib_rate_to_mbps(path.pathrec.rate);
seq_printf(file,
" DLID: 0x%04x\n"
" SL: %12d\n"
" rate: %8d.%d Gb/sec\n",
be32_to_cpu(sa_path_get_dlid(&path.pathrec)),
path.pathrec.sl,
rate / 1000, rate % 1000);
}
seq_putc(file, '\n');
return 0;
}
static const struct seq_operations ipoib_path_sops = {
.start = ipoib_path_seq_start,
.next = ipoib_path_seq_next,
.stop = ipoib_path_seq_stop,
.show = ipoib_path_seq_show,
};
DEFINE_SEQ_ATTRIBUTE(ipoib_path);
void ipoib_create_debug_files(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
char name[IFNAMSIZ + sizeof("_path")];
snprintf(name, sizeof(name), "%s_mcg", dev->name);
priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
ipoib_root, dev, &ipoib_mcg_fops);
snprintf(name, sizeof(name), "%s_path", dev->name);
priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
ipoib_root, dev, &ipoib_path_fops);
}
void ipoib_delete_debug_files(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
debugfs_remove(priv->mcg_dentry);
debugfs_remove(priv->path_dentry);
priv->mcg_dentry = priv->path_dentry = NULL;
}
void ipoib_register_debugfs(void)
{
ipoib_root = debugfs_create_dir("ipoib", NULL);
}
void ipoib_unregister_debugfs(void)
{
debugfs_remove(ipoib_root);
}
| linux-master | drivers/infiniband/ulp/ipoib/ipoib_fs.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* RDMA Network Block Driver
*
* Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
*/
#include "rtrs.h"
#include "rtrs-pri.h"
#include "rtrs-srv.h"
/*
* We include this last to have the helpers above available for the trace
* event implementations.
*/
#define CREATE_TRACE_POINTS
#include "rtrs-srv-trace.h"
| linux-master | drivers/infiniband/ulp/rtrs/rtrs-srv-trace.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDMA Transport Layer
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include "rtrs-pri.h"
#include "rtrs-srv.h"
#include "rtrs-log.h"
static void rtrs_srv_release(struct kobject *kobj)
{
struct rtrs_srv_path *srv_path;
srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
kfree(srv_path);
}
static struct kobj_type ktype = {
.sysfs_ops = &kobj_sysfs_ops,
.release = rtrs_srv_release,
};
static ssize_t rtrs_srv_disconnect_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
}
static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct rtrs_srv_path *srv_path;
struct rtrs_path *s;
char str[MAXHOSTNAMELEN];
srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
s = &srv_path->s;
if (!sysfs_streq(buf, "1")) {
rtrs_err(s, "%s: invalid value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, str,
sizeof(str));
rtrs_info(s, "disconnect for path %s requested\n", str);
/* first remove sysfs itself to avoid deadlock */
sysfs_remove_file_self(&srv_path->kobj, &attr->attr);
close_path(srv_path);
return count;
}
static struct kobj_attribute rtrs_srv_disconnect_attr =
__ATTR(disconnect, 0644,
rtrs_srv_disconnect_show, rtrs_srv_disconnect_store);
static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
struct rtrs_srv_path *srv_path;
struct rtrs_con *usr_con;
srv_path = container_of(kobj, typeof(*srv_path), kobj);
usr_con = srv_path->s.con[0];
return sysfs_emit(page, "%u\n", usr_con->cm_id->port_num);
}
static struct kobj_attribute rtrs_srv_hca_port_attr =
__ATTR(hca_port, 0444, rtrs_srv_hca_port_show, NULL);
static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
struct rtrs_srv_path *srv_path;
srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
return sysfs_emit(page, "%s\n", srv_path->s.dev->ib_dev->name);
}
static struct kobj_attribute rtrs_srv_hca_name_attr =
__ATTR(hca_name, 0444, rtrs_srv_hca_name_show, NULL);
static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
struct rtrs_srv_path *srv_path;
int cnt;
srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
cnt = sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr,
page, PAGE_SIZE);
return cnt + sysfs_emit_at(page, cnt, "\n");
}
static struct kobj_attribute rtrs_srv_src_addr_attr =
__ATTR(src_addr, 0444, rtrs_srv_src_addr_show, NULL);
static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
struct rtrs_srv_path *srv_path;
int len;
srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
len = sockaddr_to_str((struct sockaddr *)&srv_path->s.src_addr, page,
PAGE_SIZE);
len += sysfs_emit_at(page, len, "\n");
return len;
}
static struct kobj_attribute rtrs_srv_dst_addr_attr =
__ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL);
static struct attribute *rtrs_srv_path_attrs[] = {
&rtrs_srv_hca_name_attr.attr,
&rtrs_srv_hca_port_attr.attr,
&rtrs_srv_src_addr_attr.attr,
&rtrs_srv_dst_addr_attr.attr,
&rtrs_srv_disconnect_attr.attr,
NULL,
};
static const struct attribute_group rtrs_srv_path_attr_group = {
.attrs = rtrs_srv_path_attrs,
};
STAT_ATTR(struct rtrs_srv_stats, rdma,
rtrs_srv_stats_rdma_to_str,
rtrs_srv_reset_rdma_stats);
static struct attribute *rtrs_srv_stats_attrs[] = {
&rdma_attr.attr,
NULL,
};
static const struct attribute_group rtrs_srv_stats_attr_group = {
.attrs = rtrs_srv_stats_attrs,
};
static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
{
struct rtrs_srv_sess *srv = srv_path->srv;
int err = 0;
mutex_lock(&srv->paths_mutex);
if (srv->dev_ref++) {
/*
* Device needs to be registered only on the first session
*/
goto unlock;
}
srv->dev.class = &rtrs_dev_class;
err = dev_set_name(&srv->dev, "%s", srv_path->s.sessname);
if (err)
goto unlock;
/*
* Suppress user space notification until
* sysfs files are created
*/
dev_set_uevent_suppress(&srv->dev, true);
err = device_add(&srv->dev);
if (err) {
pr_err("device_add(): %d\n", err);
put_device(&srv->dev);
goto unlock;
}
srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj);
if (!srv->kobj_paths) {
err = -ENOMEM;
pr_err("kobject_create_and_add(): %d\n", err);
device_del(&srv->dev);
put_device(&srv->dev);
goto unlock;
}
dev_set_uevent_suppress(&srv->dev, false);
kobject_uevent(&srv->dev.kobj, KOBJ_ADD);
unlock:
mutex_unlock(&srv->paths_mutex);
return err;
}
static void
rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
{
struct rtrs_srv_sess *srv = srv_path->srv;
mutex_lock(&srv->paths_mutex);
if (!--srv->dev_ref) {
kobject_put(srv->kobj_paths);
mutex_unlock(&srv->paths_mutex);
device_del(&srv->dev);
put_device(&srv->dev);
} else {
put_device(&srv->dev);
mutex_unlock(&srv->paths_mutex);
}
}
static void rtrs_srv_path_stats_release(struct kobject *kobj)
{
struct rtrs_srv_stats *stats;
stats = container_of(kobj, struct rtrs_srv_stats, kobj_stats);
free_percpu(stats->rdma_stats);
kfree(stats);
}
static struct kobj_type ktype_stats = {
.sysfs_ops = &kobj_sysfs_ops,
.release = rtrs_srv_path_stats_release,
};
static int rtrs_srv_create_stats_files(struct rtrs_srv_path *srv_path)
{
int err;
struct rtrs_path *s = &srv_path->s;
err = kobject_init_and_add(&srv_path->stats->kobj_stats, &ktype_stats,
&srv_path->kobj, "stats");
if (err) {
rtrs_err(s, "kobject_init_and_add(): %d\n", err);
kobject_put(&srv_path->stats->kobj_stats);
return err;
}
err = sysfs_create_group(&srv_path->stats->kobj_stats,
&rtrs_srv_stats_attr_group);
if (err) {
rtrs_err(s, "sysfs_create_group(): %d\n", err);
goto err;
}
return 0;
err:
kobject_del(&srv_path->stats->kobj_stats);
kobject_put(&srv_path->stats->kobj_stats);
return err;
}
int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path)
{
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_path *s = &srv_path->s;
char str[NAME_MAX];
int err;
struct rtrs_addr path = {
.src = &srv_path->s.dst_addr,
.dst = &srv_path->s.src_addr,
};
rtrs_addr_to_str(&path, str, sizeof(str));
err = rtrs_srv_create_once_sysfs_root_folders(srv_path);
if (err)
return err;
err = kobject_init_and_add(&srv_path->kobj, &ktype, srv->kobj_paths,
"%s", str);
if (err) {
rtrs_err(s, "kobject_init_and_add(): %d\n", err);
goto destroy_root;
}
err = sysfs_create_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
if (err) {
rtrs_err(s, "sysfs_create_group(): %d\n", err);
goto put_kobj;
}
err = rtrs_srv_create_stats_files(srv_path);
if (err)
goto remove_group;
return 0;
remove_group:
sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
put_kobj:
kobject_del(&srv_path->kobj);
destroy_root:
kobject_put(&srv_path->kobj);
rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
return err;
}
void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
{
if (srv_path->stats->kobj_stats.state_in_sysfs) {
sysfs_remove_group(&srv_path->stats->kobj_stats,
&rtrs_srv_stats_attr_group);
kobject_del(&srv_path->stats->kobj_stats);
kobject_put(&srv_path->stats->kobj_stats);
}
if (srv_path->kobj.state_in_sysfs) {
sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
kobject_put(&srv_path->kobj);
rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
}
| linux-master | drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDMA Transport Layer
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include "rtrs-srv.h"
int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
{
if (enable) {
int cpu;
struct rtrs_srv_stats_rdma_stats *r;
for_each_possible_cpu(cpu) {
r = per_cpu_ptr(stats->rdma_stats, cpu);
memset(r, 0, sizeof(*r));
}
return 0;
}
return -EINVAL;
}
ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page)
{
int cpu;
struct rtrs_srv_stats_rdma_stats sum;
struct rtrs_srv_stats_rdma_stats *r;
memset(&sum, 0, sizeof(sum));
for_each_possible_cpu(cpu) {
r = per_cpu_ptr(stats->rdma_stats, cpu);
sum.dir[READ].cnt += r->dir[READ].cnt;
sum.dir[READ].size_total += r->dir[READ].size_total;
sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
}
return sysfs_emit(page, "%llu %llu %llu %llu\n",
sum.dir[READ].cnt, sum.dir[READ].size_total,
sum.dir[WRITE].cnt, sum.dir[WRITE].size_total);
}
| linux-master | drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDMA Transport Layer
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include "rtrs-pri.h"
#include "rtrs-clt.h"
#include "rtrs-log.h"
#define MIN_MAX_RECONN_ATT -1
#define MAX_MAX_RECONN_ATT 9999
static void rtrs_clt_path_release(struct kobject *kobj)
{
struct rtrs_clt_path *clt_path;
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
free_path(clt_path);
}
static struct kobj_type ktype_sess = {
.sysfs_ops = &kobj_sysfs_ops,
.release = rtrs_clt_path_release
};
static void rtrs_clt_path_stats_release(struct kobject *kobj)
{
struct rtrs_clt_stats *stats;
stats = container_of(kobj, struct rtrs_clt_stats, kobj_stats);
free_percpu(stats->pcpu_stats);
kfree(stats);
}
static struct kobj_type ktype_stats = {
.sysfs_ops = &kobj_sysfs_ops,
.release = rtrs_clt_path_stats_release,
};
static ssize_t max_reconnect_attempts_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
dev);
return sysfs_emit(page, "%d\n",
rtrs_clt_get_max_reconnect_attempts(clt));
}
static ssize_t max_reconnect_attempts_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
int value;
int ret;
struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
dev);
ret = kstrtoint(buf, 10, &value);
if (ret) {
rtrs_err(clt, "%s: failed to convert string '%s' to int\n",
attr->attr.name, buf);
return ret;
}
if (value > MAX_MAX_RECONN_ATT ||
value < MIN_MAX_RECONN_ATT) {
rtrs_err(clt,
"%s: invalid range (provided: '%s', accepted: min: %d, max: %d)\n",
attr->attr.name, buf, MIN_MAX_RECONN_ATT,
MAX_MAX_RECONN_ATT);
return -EINVAL;
}
rtrs_clt_set_max_reconnect_attempts(clt, value);
return count;
}
static DEVICE_ATTR_RW(max_reconnect_attempts);
static ssize_t mpath_policy_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
struct rtrs_clt_sess *clt;
clt = container_of(dev, struct rtrs_clt_sess, dev);
switch (clt->mp_policy) {
case MP_POLICY_RR:
return sysfs_emit(page, "round-robin (RR: %d)\n",
clt->mp_policy);
case MP_POLICY_MIN_INFLIGHT:
return sysfs_emit(page, "min-inflight (MI: %d)\n",
clt->mp_policy);
case MP_POLICY_MIN_LATENCY:
return sysfs_emit(page, "min-latency (ML: %d)\n",
clt->mp_policy);
default:
return sysfs_emit(page, "Unknown (%d)\n", clt->mp_policy);
}
}
static ssize_t mpath_policy_store(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct rtrs_clt_sess *clt;
int value;
int ret;
size_t len = 0;
clt = container_of(dev, struct rtrs_clt_sess, dev);
ret = kstrtoint(buf, 10, &value);
if (!ret && (value == MP_POLICY_RR ||
value == MP_POLICY_MIN_INFLIGHT ||
value == MP_POLICY_MIN_LATENCY)) {
clt->mp_policy = value;
return count;
}
/* distinguish "mi" and "min-latency" with length */
len = strnlen(buf, NAME_MAX);
if (buf[len - 1] == '\n')
len--;
if (!strncasecmp(buf, "round-robin", 11) ||
(len == 2 && !strncasecmp(buf, "rr", 2)))
clt->mp_policy = MP_POLICY_RR;
else if (!strncasecmp(buf, "min-inflight", 12) ||
(len == 2 && !strncasecmp(buf, "mi", 2)))
clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
else if (!strncasecmp(buf, "min-latency", 11) ||
(len == 2 && !strncasecmp(buf, "ml", 2)))
clt->mp_policy = MP_POLICY_MIN_LATENCY;
else
return -EINVAL;
return count;
}
static DEVICE_ATTR_RW(mpath_policy);
static ssize_t add_path_show(struct device *dev,
struct device_attribute *attr, char *page)
{
return sysfs_emit(page,
"Usage: echo [<source addr>@]<destination addr> > %s\n\n*addr ::= [ ip:<ipv4|ipv6> | gid:<gid> ]\n",
attr->attr.name);
}
static ssize_t add_path_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct sockaddr_storage srcaddr, dstaddr;
struct rtrs_addr addr = {
.src = &srcaddr,
.dst = &dstaddr
};
struct rtrs_clt_sess *clt;
const char *nl;
size_t len;
int err;
clt = container_of(dev, struct rtrs_clt_sess, dev);
nl = strchr(buf, '\n');
if (nl)
len = nl - buf;
else
len = count;
err = rtrs_addr_to_sockaddr(buf, len, clt->port, &addr);
if (err)
return -EINVAL;
err = rtrs_clt_create_path_from_sysfs(clt, &addr);
if (err)
return err;
return count;
}
static DEVICE_ATTR_RW(add_path);
static ssize_t rtrs_clt_state_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
struct rtrs_clt_path *clt_path;
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (clt_path->state == RTRS_CLT_CONNECTED)
return sysfs_emit(page, "connected\n");
return sysfs_emit(page, "disconnected\n");
}
static struct kobj_attribute rtrs_clt_state_attr =
__ATTR(state, 0444, rtrs_clt_state_show, NULL);
static ssize_t rtrs_clt_reconnect_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
}
static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct rtrs_clt_path *clt_path;
int ret;
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (!sysfs_streq(buf, "1")) {
rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
ret = rtrs_clt_reconnect_from_sysfs(clt_path);
if (ret)
return ret;
return count;
}
static struct kobj_attribute rtrs_clt_reconnect_attr =
__ATTR(reconnect, 0644, rtrs_clt_reconnect_show,
rtrs_clt_reconnect_store);
static ssize_t rtrs_clt_disconnect_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
}
static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct rtrs_clt_path *clt_path;
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (!sysfs_streq(buf, "1")) {
rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
rtrs_clt_close_conns(clt_path, true);
return count;
}
static struct kobj_attribute rtrs_clt_disconnect_attr =
__ATTR(disconnect, 0644, rtrs_clt_disconnect_show,
rtrs_clt_disconnect_store);
static ssize_t rtrs_clt_remove_path_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sysfs_emit(buf, "Usage: echo 1 > %s\n", attr->attr.name);
}
static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
struct rtrs_clt_path *clt_path;
int ret;
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (!sysfs_streq(buf, "1")) {
rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
ret = rtrs_clt_remove_path_from_sysfs(clt_path, &attr->attr);
if (ret)
return ret;
return count;
}
static struct kobj_attribute rtrs_clt_remove_path_attr =
__ATTR(remove_path, 0644, rtrs_clt_remove_path_show,
rtrs_clt_remove_path_store);
STAT_ATTR(struct rtrs_clt_stats, cpu_migration_from,
rtrs_clt_stats_migration_from_cnt_to_str,
rtrs_clt_reset_cpu_migr_stats);
STAT_ATTR(struct rtrs_clt_stats, cpu_migration_to,
rtrs_clt_stats_migration_to_cnt_to_str,
rtrs_clt_reset_cpu_migr_stats);
STAT_ATTR(struct rtrs_clt_stats, reconnects,
rtrs_clt_stats_reconnects_to_str,
rtrs_clt_reset_reconnects_stat);
STAT_ATTR(struct rtrs_clt_stats, rdma,
rtrs_clt_stats_rdma_to_str,
rtrs_clt_reset_rdma_stats);
STAT_ATTR(struct rtrs_clt_stats, reset_all,
rtrs_clt_reset_all_help,
rtrs_clt_reset_all_stats);
static struct attribute *rtrs_clt_stats_attrs[] = {
&cpu_migration_from_attr.attr,
&cpu_migration_to_attr.attr,
&reconnects_attr.attr,
&rdma_attr.attr,
&reset_all_attr.attr,
NULL
};
static const struct attribute_group rtrs_clt_stats_attr_group = {
.attrs = rtrs_clt_stats_attrs,
};
static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
struct rtrs_clt_path *clt_path;
clt_path = container_of(kobj, typeof(*clt_path), kobj);
return sysfs_emit(page, "%u\n", clt_path->hca_port);
}
static struct kobj_attribute rtrs_clt_hca_port_attr =
__ATTR(hca_port, 0444, rtrs_clt_hca_port_show, NULL);
static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
struct rtrs_clt_path *clt_path;
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
return sysfs_emit(page, "%s\n", clt_path->hca_name);
}
static struct kobj_attribute rtrs_clt_hca_name_attr =
__ATTR(hca_name, 0444, rtrs_clt_hca_name_show, NULL);
static ssize_t rtrs_clt_cur_latency_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
struct rtrs_clt_path *clt_path;
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
return sysfs_emit(page, "%lld ns\n",
ktime_to_ns(clt_path->s.hb_cur_latency));
}
static struct kobj_attribute rtrs_clt_cur_latency_attr =
__ATTR(cur_latency, 0444, rtrs_clt_cur_latency_show, NULL);
static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
struct rtrs_clt_path *clt_path;
int len;
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
len = sockaddr_to_str((struct sockaddr *)&clt_path->s.src_addr, page,
PAGE_SIZE);
len += sysfs_emit_at(page, len, "\n");
return len;
}
static struct kobj_attribute rtrs_clt_src_addr_attr =
__ATTR(src_addr, 0444, rtrs_clt_src_addr_show, NULL);
static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
struct rtrs_clt_path *clt_path;
int len;
clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
len = sockaddr_to_str((struct sockaddr *)&clt_path->s.dst_addr, page,
PAGE_SIZE);
len += sysfs_emit_at(page, len, "\n");
return len;
}
static struct kobj_attribute rtrs_clt_dst_addr_attr =
__ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL);
static struct attribute *rtrs_clt_path_attrs[] = {
&rtrs_clt_hca_name_attr.attr,
&rtrs_clt_hca_port_attr.attr,
&rtrs_clt_src_addr_attr.attr,
&rtrs_clt_dst_addr_attr.attr,
&rtrs_clt_state_attr.attr,
&rtrs_clt_reconnect_attr.attr,
&rtrs_clt_disconnect_attr.attr,
&rtrs_clt_remove_path_attr.attr,
&rtrs_clt_cur_latency_attr.attr,
NULL,
};
static const struct attribute_group rtrs_clt_path_attr_group = {
.attrs = rtrs_clt_path_attrs,
};
int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_sess *clt = clt_path->clt;
char str[NAME_MAX];
int err;
struct rtrs_addr path = {
.src = &clt_path->s.src_addr,
.dst = &clt_path->s.dst_addr,
};
rtrs_addr_to_str(&path, str, sizeof(str));
err = kobject_init_and_add(&clt_path->kobj, &ktype_sess,
clt->kobj_paths,
"%s", str);
if (err) {
pr_err("kobject_init_and_add: %d\n", err);
kobject_put(&clt_path->kobj);
return err;
}
err = sysfs_create_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
if (err) {
pr_err("sysfs_create_group(): %d\n", err);
goto put_kobj;
}
err = kobject_init_and_add(&clt_path->stats->kobj_stats, &ktype_stats,
&clt_path->kobj, "stats");
if (err) {
pr_err("kobject_init_and_add: %d\n", err);
kobject_put(&clt_path->stats->kobj_stats);
goto remove_group;
}
err = sysfs_create_group(&clt_path->stats->kobj_stats,
&rtrs_clt_stats_attr_group);
if (err) {
pr_err("failed to create stats sysfs group, err: %d\n", err);
goto put_kobj_stats;
}
return 0;
put_kobj_stats:
kobject_del(&clt_path->stats->kobj_stats);
kobject_put(&clt_path->stats->kobj_stats);
remove_group:
sysfs_remove_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
put_kobj:
kobject_del(&clt_path->kobj);
kobject_put(&clt_path->kobj);
return err;
}
void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
const struct attribute *sysfs_self)
{
kobject_del(&clt_path->stats->kobj_stats);
kobject_put(&clt_path->stats->kobj_stats);
if (sysfs_self)
sysfs_remove_file_self(&clt_path->kobj, sysfs_self);
kobject_del(&clt_path->kobj);
}
static struct attribute *rtrs_clt_attrs[] = {
&dev_attr_max_reconnect_attempts.attr,
&dev_attr_mpath_policy.attr,
&dev_attr_add_path.attr,
NULL,
};
static const struct attribute_group rtrs_clt_attr_group = {
.attrs = rtrs_clt_attrs,
};
int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt)
{
return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
}
void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt)
{
sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
if (clt->kobj_paths) {
kobject_del(clt->kobj_paths);
kobject_put(clt->kobj_paths);
}
}
| linux-master | drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* RDMA Network Block Driver
*
* Copyright (c) 2022 1&1 IONOS SE. All rights reserved.
*/
#include "rtrs.h"
#include "rtrs-clt.h"
/*
* We include this last to have the helpers above available for the trace
* event implementations.
*/
#define CREATE_TRACE_POINTS
#include "rtrs-clt-trace.h"
| linux-master | drivers/infiniband/ulp/rtrs/rtrs-clt-trace.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDMA Transport Layer
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include <linux/module.h>
#include "rtrs-srv.h"
#include "rtrs-log.h"
#include <rdma/ib_cm.h>
#include <rdma/ib_verbs.h>
#include "rtrs-srv-trace.h"
MODULE_DESCRIPTION("RDMA Transport Server");
MODULE_LICENSE("GPL");
/* Must be power of 2, see mask from mr->page_size in ib_sg_to_pages() */
#define DEFAULT_MAX_CHUNK_SIZE (128 << 10)
#define DEFAULT_SESS_QUEUE_DEPTH 512
#define MAX_HDR_SIZE PAGE_SIZE
static struct rtrs_rdma_dev_pd dev_pd;
const struct class rtrs_dev_class = {
.name = "rtrs-server",
};
static struct rtrs_srv_ib_ctx ib_ctx;
static int __read_mostly max_chunk_size = DEFAULT_MAX_CHUNK_SIZE;
static int __read_mostly sess_queue_depth = DEFAULT_SESS_QUEUE_DEPTH;
static bool always_invalidate = true;
module_param(always_invalidate, bool, 0444);
MODULE_PARM_DESC(always_invalidate,
"Invalidate memory registration for contiguous memory regions before accessing.");
module_param_named(max_chunk_size, max_chunk_size, int, 0444);
MODULE_PARM_DESC(max_chunk_size,
"Max size for each IO request, when change the unit is in byte (default: "
__stringify(DEFAULT_MAX_CHUNK_SIZE) "KB)");
module_param_named(sess_queue_depth, sess_queue_depth, int, 0444);
MODULE_PARM_DESC(sess_queue_depth,
"Number of buffers for pending I/O requests to allocate per session. Maximum: "
__stringify(MAX_SESS_QUEUE_DEPTH) " (default: "
__stringify(DEFAULT_SESS_QUEUE_DEPTH) ")");
static cpumask_t cq_affinity_mask = { CPU_BITS_ALL };
static struct workqueue_struct *rtrs_wq;
static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
{
return container_of(c, struct rtrs_srv_con, c);
}
static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
enum rtrs_srv_state new_state)
{
enum rtrs_srv_state old_state;
bool changed = false;
spin_lock_irq(&srv_path->state_lock);
old_state = srv_path->state;
switch (new_state) {
case RTRS_SRV_CONNECTED:
if (old_state == RTRS_SRV_CONNECTING)
changed = true;
break;
case RTRS_SRV_CLOSING:
if (old_state == RTRS_SRV_CONNECTING ||
old_state == RTRS_SRV_CONNECTED)
changed = true;
break;
case RTRS_SRV_CLOSED:
if (old_state == RTRS_SRV_CLOSING)
changed = true;
break;
default:
break;
}
if (changed)
srv_path->state = new_state;
spin_unlock_irq(&srv_path->state_lock);
return changed;
}
static void free_id(struct rtrs_srv_op *id)
{
if (!id)
return;
kfree(id);
}
static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path)
{
struct rtrs_srv_sess *srv = srv_path->srv;
int i;
if (srv_path->ops_ids) {
for (i = 0; i < srv->queue_depth; i++)
free_id(srv_path->ops_ids[i]);
kfree(srv_path->ops_ids);
srv_path->ops_ids = NULL;
}
}
static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
static struct ib_cqe io_comp_cqe = {
.done = rtrs_srv_rdma_done
};
static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref)
{
struct rtrs_srv_path *srv_path = container_of(ref,
struct rtrs_srv_path,
ids_inflight_ref);
percpu_ref_exit(&srv_path->ids_inflight_ref);
complete(&srv_path->complete_done);
}
static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path)
{
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_op *id;
int i, ret;
srv_path->ops_ids = kcalloc(srv->queue_depth,
sizeof(*srv_path->ops_ids),
GFP_KERNEL);
if (!srv_path->ops_ids)
goto err;
for (i = 0; i < srv->queue_depth; ++i) {
id = kzalloc(sizeof(*id), GFP_KERNEL);
if (!id)
goto err;
srv_path->ops_ids[i] = id;
}
ret = percpu_ref_init(&srv_path->ids_inflight_ref,
rtrs_srv_inflight_ref_release, 0, GFP_KERNEL);
if (ret) {
pr_err("Percpu reference init failed\n");
goto err;
}
init_completion(&srv_path->complete_done);
return 0;
err:
rtrs_srv_free_ops_ids(srv_path);
return -ENOMEM;
}
static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path)
{
percpu_ref_get(&srv_path->ids_inflight_ref);
}
static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path)
{
percpu_ref_put(&srv_path->ids_inflight_ref);
}
static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_path *s = con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "REG MR failed: %s\n",
ib_wc_status_msg(wc->status));
close_path(srv_path);
return;
}
}
static struct ib_cqe local_reg_cqe = {
.done = rtrs_srv_reg_mr_done
};
static int rdma_write_sg(struct rtrs_srv_op *id)
{
struct rtrs_path *s = id->con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id];
struct rtrs_srv_mr *srv_mr;
struct ib_send_wr inv_wr;
struct ib_rdma_wr imm_wr;
struct ib_rdma_wr *wr = NULL;
enum ib_send_flags flags;
size_t sg_cnt;
int err, offset;
bool need_inval;
u32 rkey = 0;
struct ib_reg_wr rwr;
struct ib_sge *plist;
struct ib_sge list;
sg_cnt = le16_to_cpu(id->rd_msg->sg_cnt);
need_inval = le16_to_cpu(id->rd_msg->flags) & RTRS_MSG_NEED_INVAL_F;
if (sg_cnt != 1)
return -EINVAL;
offset = 0;
wr = &id->tx_wr;
plist = &id->tx_sg;
plist->addr = dma_addr + offset;
plist->length = le32_to_cpu(id->rd_msg->desc[0].len);
/* WR will fail with length error
* if this is 0
*/
if (plist->length == 0) {
rtrs_err(s, "Invalid RDMA-Write sg list length 0\n");
return -EINVAL;
}
plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
offset += plist->length;
wr->wr.sg_list = plist;
wr->wr.num_sge = 1;
wr->remote_addr = le64_to_cpu(id->rd_msg->desc[0].addr);
wr->rkey = le32_to_cpu(id->rd_msg->desc[0].key);
if (rkey == 0)
rkey = wr->rkey;
else
/* Only one key is actually used */
WARN_ON_ONCE(rkey != wr->rkey);
wr->wr.opcode = IB_WR_RDMA_WRITE;
wr->wr.wr_cqe = &io_comp_cqe;
wr->wr.ex.imm_data = 0;
wr->wr.send_flags = 0;
if (need_inval && always_invalidate) {
wr->wr.next = &rwr.wr;
rwr.wr.next = &inv_wr;
inv_wr.next = &imm_wr.wr;
} else if (always_invalidate) {
wr->wr.next = &rwr.wr;
rwr.wr.next = &imm_wr.wr;
} else if (need_inval) {
wr->wr.next = &inv_wr;
inv_wr.next = &imm_wr.wr;
} else {
wr->wr.next = &imm_wr.wr;
}
/*
* From time to time we have to post signaled sends,
* or send queue will fill up and only QP reset can help.
*/
flags = (atomic_inc_return(&id->con->c.wr_cnt) % s->signal_interval) ?
0 : IB_SEND_SIGNALED;
if (need_inval) {
inv_wr.sg_list = NULL;
inv_wr.num_sge = 0;
inv_wr.opcode = IB_WR_SEND_WITH_INV;
inv_wr.wr_cqe = &io_comp_cqe;
inv_wr.send_flags = 0;
inv_wr.ex.invalidate_rkey = rkey;
}
imm_wr.wr.next = NULL;
if (always_invalidate) {
struct rtrs_msg_rkey_rsp *msg;
srv_mr = &srv_path->mrs[id->msg_id];
rwr.wr.opcode = IB_WR_REG_MR;
rwr.wr.wr_cqe = &local_reg_cqe;
rwr.wr.num_sge = 0;
rwr.mr = srv_mr->mr;
rwr.wr.send_flags = 0;
rwr.key = srv_mr->mr->rkey;
rwr.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE);
msg = srv_mr->iu->buf;
msg->buf_id = cpu_to_le16(id->msg_id);
msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
list.addr = srv_mr->iu->dma_addr;
list.length = sizeof(*msg);
list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
imm_wr.wr.sg_list = &list;
imm_wr.wr.num_sge = 1;
imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
srv_mr->iu->dma_addr,
srv_mr->iu->size, DMA_TO_DEVICE);
} else {
imm_wr.wr.sg_list = NULL;
imm_wr.wr.num_sge = 0;
imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
}
imm_wr.wr.send_flags = flags;
imm_wr.wr.ex.imm_data = cpu_to_be32(rtrs_to_io_rsp_imm(id->msg_id,
0, need_inval));
imm_wr.wr.wr_cqe = &io_comp_cqe;
ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr,
offset, DMA_BIDIRECTIONAL);
err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
if (err)
rtrs_err(s,
"Posting RDMA-Write-Request to QP failed, err: %d\n",
err);
return err;
}
/**
* send_io_resp_imm() - respond to client with empty IMM on failed READ/WRITE
* requests or on successful WRITE request.
* @con: the connection to send back result
* @id: the id associated with the IO
* @errno: the error number of the IO.
*
* Return 0 on success, errno otherwise.
*/
static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
int errno)
{
struct rtrs_path *s = con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
struct ib_send_wr inv_wr, *wr = NULL;
struct ib_rdma_wr imm_wr;
struct ib_reg_wr rwr;
struct rtrs_srv_mr *srv_mr;
bool need_inval = false;
enum ib_send_flags flags;
u32 imm;
int err;
if (id->dir == READ) {
struct rtrs_msg_rdma_read *rd_msg = id->rd_msg;
size_t sg_cnt;
need_inval = le16_to_cpu(rd_msg->flags) &
RTRS_MSG_NEED_INVAL_F;
sg_cnt = le16_to_cpu(rd_msg->sg_cnt);
if (need_inval) {
if (sg_cnt) {
inv_wr.wr_cqe = &io_comp_cqe;
inv_wr.sg_list = NULL;
inv_wr.num_sge = 0;
inv_wr.opcode = IB_WR_SEND_WITH_INV;
inv_wr.send_flags = 0;
/* Only one key is actually used */
inv_wr.ex.invalidate_rkey =
le32_to_cpu(rd_msg->desc[0].key);
} else {
WARN_ON_ONCE(1);
need_inval = false;
}
}
}
trace_send_io_resp_imm(id, need_inval, always_invalidate, errno);
if (need_inval && always_invalidate) {
wr = &inv_wr;
inv_wr.next = &rwr.wr;
rwr.wr.next = &imm_wr.wr;
} else if (always_invalidate) {
wr = &rwr.wr;
rwr.wr.next = &imm_wr.wr;
} else if (need_inval) {
wr = &inv_wr;
inv_wr.next = &imm_wr.wr;
} else {
wr = &imm_wr.wr;
}
/*
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
flags = (atomic_inc_return(&con->c.wr_cnt) % s->signal_interval) ?
0 : IB_SEND_SIGNALED;
imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
imm_wr.wr.next = NULL;
if (always_invalidate) {
struct ib_sge list;
struct rtrs_msg_rkey_rsp *msg;
srv_mr = &srv_path->mrs[id->msg_id];
rwr.wr.next = &imm_wr.wr;
rwr.wr.opcode = IB_WR_REG_MR;
rwr.wr.wr_cqe = &local_reg_cqe;
rwr.wr.num_sge = 0;
rwr.wr.send_flags = 0;
rwr.mr = srv_mr->mr;
rwr.key = srv_mr->mr->rkey;
rwr.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE);
msg = srv_mr->iu->buf;
msg->buf_id = cpu_to_le16(id->msg_id);
msg->type = cpu_to_le16(RTRS_MSG_RKEY_RSP);
msg->rkey = cpu_to_le32(srv_mr->mr->rkey);
list.addr = srv_mr->iu->dma_addr;
list.length = sizeof(*msg);
list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
imm_wr.wr.sg_list = &list;
imm_wr.wr.num_sge = 1;
imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
srv_mr->iu->dma_addr,
srv_mr->iu->size, DMA_TO_DEVICE);
} else {
imm_wr.wr.sg_list = NULL;
imm_wr.wr.num_sge = 0;
imm_wr.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM;
}
imm_wr.wr.send_flags = flags;
imm_wr.wr.wr_cqe = &io_comp_cqe;
imm_wr.wr.ex.imm_data = cpu_to_be32(imm);
err = ib_post_send(id->con->c.qp, wr, NULL);
if (err)
rtrs_err_rl(s, "Posting RDMA-Reply to QP failed, err: %d\n",
err);
return err;
}
void close_path(struct rtrs_srv_path *srv_path)
{
if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING))
queue_work(rtrs_wq, &srv_path->close_work);
WARN_ON(srv_path->state != RTRS_SRV_CLOSING);
}
static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
{
switch (state) {
case RTRS_SRV_CONNECTING:
return "RTRS_SRV_CONNECTING";
case RTRS_SRV_CONNECTED:
return "RTRS_SRV_CONNECTED";
case RTRS_SRV_CLOSING:
return "RTRS_SRV_CLOSING";
case RTRS_SRV_CLOSED:
return "RTRS_SRV_CLOSED";
default:
return "UNKNOWN";
}
}
/**
* rtrs_srv_resp_rdma() - Finish an RDMA request
*
* @id: Internal RTRS operation identifier
* @status: Response Code sent to the other side for this operation.
* 0 = success, <=0 error
* Context: any
*
* Finish a RDMA operation. A message is sent to the client and the
* corresponding memory areas will be released.
*/
bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
{
struct rtrs_srv_path *srv_path;
struct rtrs_srv_con *con;
struct rtrs_path *s;
int err;
if (WARN_ON(!id))
return true;
con = id->con;
s = con->c.path;
srv_path = to_srv_path(s);
id->status = status;
if (srv_path->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
"Sending I/O response failed, server path %s is disconnected, path state %s\n",
kobject_name(&srv_path->kobj),
rtrs_srv_state_str(srv_path->state));
goto out;
}
if (always_invalidate) {
struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id];
ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
}
if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) {
rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n",
kobject_name(&srv_path->kobj),
con->c.cid);
atomic_add(1, &con->c.sq_wr_avail);
spin_lock(&con->rsp_wr_wait_lock);
list_add_tail(&id->wait_list, &con->rsp_wr_wait_list);
spin_unlock(&con->rsp_wr_wait_lock);
return false;
}
if (status || id->dir == WRITE || !id->rd_msg->sg_cnt)
err = send_io_resp_imm(con, id, status);
else
err = rdma_write_sg(id);
if (err) {
rtrs_err_rl(s, "IO response failed: %d: srv_path=%s\n", err,
kobject_name(&srv_path->kobj));
close_path(srv_path);
}
out:
rtrs_srv_put_ops_ids(srv_path);
return true;
}
EXPORT_SYMBOL(rtrs_srv_resp_rdma);
/**
* rtrs_srv_set_sess_priv() - Set private pointer in rtrs_srv.
* @srv: Session pointer
* @priv: The private pointer that is associated with the session.
*/
void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv)
{
srv->priv = priv;
}
EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
{
int i;
for (i = 0; i < srv_path->mrs_num; i++) {
struct rtrs_srv_mr *srv_mr;
srv_mr = &srv_path->mrs[i];
rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
ib_dereg_mr(srv_mr->mr);
ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
sg_free_table(&srv_mr->sgt);
}
kfree(srv_path->mrs);
}
static int map_cont_bufs(struct rtrs_srv_path *srv_path)
{
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_path *ss = &srv_path->s;
int i, err, mrs_num;
unsigned int chunk_bits;
int chunks_per_mr = 1;
struct ib_mr *mr;
struct sg_table *sgt;
/*
* Here we map queue_depth chunks to MR. Firstly we have to
* figure out how many chunks can we map per MR.
*/
if (always_invalidate) {
/*
* in order to do invalidate for each chunks of memory, we needs
* more memory regions.
*/
mrs_num = srv->queue_depth;
} else {
chunks_per_mr =
srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
}
srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL);
if (!srv_path->mrs)
return -ENOMEM;
for (srv_path->mrs_num = 0; srv_path->mrs_num < mrs_num;
srv_path->mrs_num++) {
struct rtrs_srv_mr *srv_mr = &srv_path->mrs[srv_path->mrs_num];
struct scatterlist *s;
int nr, nr_sgt, chunks;
sgt = &srv_mr->sgt;
chunks = chunks_per_mr * srv_path->mrs_num;
if (!always_invalidate)
chunks_per_mr = min_t(int, chunks_per_mr,
srv->queue_depth - chunks);
err = sg_alloc_table(sgt, chunks_per_mr, GFP_KERNEL);
if (err)
goto err;
for_each_sg(sgt->sgl, s, chunks_per_mr, i)
sg_set_page(s, srv->chunks[chunks + i],
max_chunk_size, 0);
nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
if (!nr_sgt) {
err = -EINVAL;
goto free_sg;
}
mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
nr_sgt);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
goto unmap_sg;
}
nr = ib_map_mr_sg(mr, sgt->sgl, nr_sgt,
NULL, max_chunk_size);
if (nr != nr_sgt) {
err = nr < 0 ? nr : -EINVAL;
goto dereg_mr;
}
if (always_invalidate) {
srv_mr->iu = rtrs_iu_alloc(1,
sizeof(struct rtrs_msg_rkey_rsp),
GFP_KERNEL, srv_path->s.dev->ib_dev,
DMA_TO_DEVICE, rtrs_srv_rdma_done);
if (!srv_mr->iu) {
err = -ENOMEM;
rtrs_err(ss, "rtrs_iu_alloc(), err: %d\n", err);
goto dereg_mr;
}
}
/* Eventually dma addr for each chunk can be cached */
for_each_sg(sgt->sgl, s, nr_sgt, i)
srv_path->dma_addr[chunks + i] = sg_dma_address(s);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
srv_mr->mr = mr;
}
chunk_bits = ilog2(srv->queue_depth - 1) + 1;
srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
return 0;
dereg_mr:
ib_dereg_mr(mr);
unmap_sg:
ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
free_sg:
sg_free_table(sgt);
err:
unmap_cont_bufs(srv_path);
return err;
}
static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
{
close_path(to_srv_path(c->path));
}
static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path)
{
rtrs_init_hb(&srv_path->s, &io_comp_cqe,
RTRS_HB_INTERVAL_MS,
RTRS_HB_MISSED_MAX,
rtrs_srv_hb_err_handler,
rtrs_wq);
}
static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path)
{
rtrs_start_hb(&srv_path->s);
}
static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path)
{
rtrs_stop_hb(&srv_path->s);
}
static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_path *s = con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_iu *iu;
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "Sess info response send failed: %s\n",
ib_wc_status_msg(wc->status));
close_path(srv_path);
return;
}
WARN_ON(wc->opcode != IB_WC_SEND);
}
static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
{
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
int up;
mutex_lock(&srv->paths_ev_mutex);
up = ++srv->paths_up;
if (up == 1)
ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_CONNECTED, NULL);
mutex_unlock(&srv->paths_ev_mutex);
/* Mark session as established */
srv_path->established = true;
}
static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
{
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
if (!srv_path->established)
return;
srv_path->established = false;
mutex_lock(&srv->paths_ev_mutex);
WARN_ON(!srv->paths_up);
if (--srv->paths_up == 0)
ctx->ops.link_ev(srv, RTRS_SRV_LINK_EV_DISCONNECTED, srv->priv);
mutex_unlock(&srv->paths_ev_mutex);
}
static bool exist_pathname(struct rtrs_srv_ctx *ctx,
const char *pathname, const uuid_t *path_uuid)
{
struct rtrs_srv_sess *srv;
struct rtrs_srv_path *srv_path;
bool found = false;
mutex_lock(&ctx->srv_mutex);
list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
mutex_lock(&srv->paths_mutex);
/* when a client with same uuid and same sessname tried to add a path */
if (uuid_equal(&srv->paths_uuid, path_uuid)) {
mutex_unlock(&srv->paths_mutex);
continue;
}
list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
if (strlen(srv_path->s.sessname) == strlen(pathname) &&
!strcmp(srv_path->s.sessname, pathname)) {
found = true;
break;
}
}
mutex_unlock(&srv->paths_mutex);
if (found)
break;
}
mutex_unlock(&ctx->srv_mutex);
return found;
}
static int post_recv_path(struct rtrs_srv_path *srv_path);
static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno);
static int process_info_req(struct rtrs_srv_con *con,
struct rtrs_msg_info_req *msg)
{
struct rtrs_path *s = con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
struct ib_send_wr *reg_wr = NULL;
struct rtrs_msg_info_rsp *rsp;
struct rtrs_iu *tx_iu;
struct ib_reg_wr *rwr;
int mri, err;
size_t tx_sz;
err = post_recv_path(srv_path);
if (err) {
rtrs_err(s, "post_recv_path(), err: %d\n", err);
return err;
}
if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) {
rtrs_err(s, "pathname cannot contain / and .\n");
return -EINVAL;
}
if (exist_pathname(srv_path->srv->ctx,
msg->pathname, &srv_path->srv->paths_uuid)) {
rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname);
return -EPERM;
}
strscpy(srv_path->s.sessname, msg->pathname,
sizeof(srv_path->s.sessname));
rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL);
if (!rwr)
return -ENOMEM;
tx_sz = sizeof(*rsp);
tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num;
tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev,
DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
if (!tx_iu) {
err = -ENOMEM;
goto rwr_free;
}
rsp = tx_iu->buf;
rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num);
for (mri = 0; mri < srv_path->mrs_num; mri++) {
struct ib_mr *mr = srv_path->mrs[mri].mr;
rsp->desc[mri].addr = cpu_to_le64(mr->iova);
rsp->desc[mri].key = cpu_to_le32(mr->rkey);
rsp->desc[mri].len = cpu_to_le32(mr->length);
/*
* Fill in reg MR request and chain them *backwards*
*/
rwr[mri].wr.next = mri ? &rwr[mri - 1].wr : NULL;
rwr[mri].wr.opcode = IB_WR_REG_MR;
rwr[mri].wr.wr_cqe = &local_reg_cqe;
rwr[mri].wr.num_sge = 0;
rwr[mri].wr.send_flags = 0;
rwr[mri].mr = mr;
rwr[mri].key = mr->rkey;
rwr[mri].access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE);
reg_wr = &rwr[mri].wr;
}
err = rtrs_srv_create_path_files(srv_path);
if (err)
goto iu_free;
kobject_get(&srv_path->kobj);
get_device(&srv_path->srv->dev);
rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
rtrs_srv_start_hb(srv_path);
/*
* We do not account number of established connections at the current
* moment, we rely on the client, which should send info request when
* all connections are successfully established. Thus, simply notify
* listener with a proper event if we are the first path.
*/
rtrs_srv_path_up(srv_path);
ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
tx_iu->dma_addr,
tx_iu->size, DMA_TO_DEVICE);
/* Send info response */
err = rtrs_iu_post_send(&con->c, tx_iu, tx_sz, reg_wr);
if (err) {
rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
iu_free:
rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1);
}
rwr_free:
kfree(rwr);
return err;
}
static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_path *s = con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_msg_info_req *msg;
struct rtrs_iu *iu;
int err;
WARN_ON(con->c.cid);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "Sess info request receive failed: %s\n",
ib_wc_status_msg(wc->status));
goto close;
}
WARN_ON(wc->opcode != IB_WC_RECV);
if (wc->byte_len < sizeof(*msg)) {
rtrs_err(s, "Sess info request is malformed: size %d\n",
wc->byte_len);
goto close;
}
ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) {
rtrs_err(s, "Sess info request is malformed: type %d\n",
le16_to_cpu(msg->type));
goto close;
}
err = process_info_req(con, msg);
if (err)
goto close;
out:
rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
return;
close:
close_path(srv_path);
goto out;
}
static int post_recv_info_req(struct rtrs_srv_con *con)
{
struct rtrs_path *s = con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_iu *rx_iu;
int err;
rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
GFP_KERNEL, srv_path->s.dev->ib_dev,
DMA_FROM_DEVICE, rtrs_srv_info_req_done);
if (!rx_iu)
return -ENOMEM;
/* Prepare for getting info response */
err = rtrs_iu_post_recv(&con->c, rx_iu);
if (err) {
rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1);
return err;
}
return 0;
}
static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
{
int i, err;
for (i = 0; i < q_size; i++) {
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err)
return err;
}
return 0;
}
static int post_recv_path(struct rtrs_srv_path *srv_path)
{
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_path *s = &srv_path->s;
size_t q_size;
int err, cid;
for (cid = 0; cid < srv_path->s.con_num; cid++) {
if (cid == 0)
q_size = SERVICE_CON_QUEUE_DEPTH;
else
q_size = srv->queue_depth;
err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
if (err) {
rtrs_err(s, "post_recv_io(), err: %d\n", err);
return err;
}
}
return 0;
}
static void process_read(struct rtrs_srv_con *con,
struct rtrs_msg_rdma_read *msg,
u32 buf_id, u32 off)
{
struct rtrs_path *s = con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
struct rtrs_srv_op *id;
size_t usr_len, data_len;
void *data;
int ret;
if (srv_path->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
"Processing read request failed, session is disconnected, sess state %s\n",
rtrs_srv_state_str(srv_path->state));
return;
}
if (msg->sg_cnt != 1 && msg->sg_cnt != 0) {
rtrs_err_rl(s,
"Processing read request failed, invalid message\n");
return;
}
rtrs_srv_get_ops_ids(srv_path);
rtrs_srv_update_rdma_stats(srv_path->stats, off, READ);
id = srv_path->ops_ids[buf_id];
id->con = con;
id->dir = READ;
id->msg_id = buf_id;
id->rd_msg = msg;
usr_len = le16_to_cpu(msg->usr_len);
data_len = off - usr_len;
data = page_address(srv->chunks[buf_id]);
ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
data + data_len, usr_len);
if (ret) {
rtrs_err_rl(s,
"Processing read request failed, user module cb reported for msg_id %d, err: %d\n",
buf_id, ret);
goto send_err_msg;
}
return;
send_err_msg:
ret = send_io_resp_imm(con, id, ret);
if (ret < 0) {
rtrs_err_rl(s,
"Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
buf_id, ret);
close_path(srv_path);
}
rtrs_srv_put_ops_ids(srv_path);
}
static void process_write(struct rtrs_srv_con *con,
struct rtrs_msg_rdma_write *req,
u32 buf_id, u32 off)
{
struct rtrs_path *s = con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
struct rtrs_srv_op *id;
size_t data_len, usr_len;
void *data;
int ret;
if (srv_path->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
"Processing write request failed, session is disconnected, sess state %s\n",
rtrs_srv_state_str(srv_path->state));
return;
}
rtrs_srv_get_ops_ids(srv_path);
rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE);
id = srv_path->ops_ids[buf_id];
id->con = con;
id->dir = WRITE;
id->msg_id = buf_id;
usr_len = le16_to_cpu(req->usr_len);
data_len = off - usr_len;
data = page_address(srv->chunks[buf_id]);
ret = ctx->ops.rdma_ev(srv->priv, id, data, data_len,
data + data_len, usr_len);
if (ret) {
rtrs_err_rl(s,
"Processing write request failed, user module callback reports err: %d\n",
ret);
goto send_err_msg;
}
return;
send_err_msg:
ret = send_io_resp_imm(con, id, ret);
if (ret < 0) {
rtrs_err_rl(s,
"Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
buf_id, ret);
close_path(srv_path);
}
rtrs_srv_put_ops_ids(srv_path);
}
static void process_io_req(struct rtrs_srv_con *con, void *msg,
u32 id, u32 off)
{
struct rtrs_path *s = con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_msg_rdma_hdr *hdr;
unsigned int type;
ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev,
srv_path->dma_addr[id],
max_chunk_size, DMA_BIDIRECTIONAL);
hdr = msg;
type = le16_to_cpu(hdr->type);
switch (type) {
case RTRS_MSG_WRITE:
process_write(con, msg, id, off);
break;
case RTRS_MSG_READ:
process_read(con, msg, id, off);
break;
default:
rtrs_err(s,
"Processing I/O request failed, unknown message type received: 0x%02x\n",
type);
goto err;
}
return;
err:
close_path(srv_path);
}
static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_mr *mr =
container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_path *s = con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_srv_sess *srv = srv_path->srv;
u32 msg_id, off;
void *data;
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
close_path(srv_path);
}
msg_id = mr->msg_id;
off = mr->msg_off;
data = page_address(srv->chunks[msg_id]) + off;
process_io_req(con, data, msg_id, off);
}
static int rtrs_srv_inv_rkey(struct rtrs_srv_con *con,
struct rtrs_srv_mr *mr)
{
struct ib_send_wr wr = {
.opcode = IB_WR_LOCAL_INV,
.wr_cqe = &mr->inv_cqe,
.send_flags = IB_SEND_SIGNALED,
.ex.invalidate_rkey = mr->mr->rkey,
};
mr->inv_cqe.done = rtrs_srv_inv_rkey_done;
return ib_post_send(con->c.qp, &wr, NULL);
}
static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
{
spin_lock(&con->rsp_wr_wait_lock);
while (!list_empty(&con->rsp_wr_wait_list)) {
struct rtrs_srv_op *id;
int ret;
id = list_entry(con->rsp_wr_wait_list.next,
struct rtrs_srv_op, wait_list);
list_del(&id->wait_list);
spin_unlock(&con->rsp_wr_wait_lock);
ret = rtrs_srv_resp_rdma(id, id->status);
spin_lock(&con->rsp_wr_wait_lock);
if (!ret) {
list_add(&id->wait_list, &con->rsp_wr_wait_list);
break;
}
}
spin_unlock(&con->rsp_wr_wait_lock);
}
static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
struct rtrs_path *s = con->c.path;
struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_srv_sess *srv = srv_path->srv;
u32 imm_type, imm_payload;
int err;
if (wc->status != IB_WC_SUCCESS) {
if (wc->status != IB_WC_WR_FLUSH_ERR) {
rtrs_err(s,
"%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
ib_wc_status_msg(wc->status), wc->wr_cqe,
wc->opcode, wc->vendor_err, wc->byte_len);
close_path(srv_path);
}
return;
}
switch (wc->opcode) {
case IB_WC_RECV_RDMA_WITH_IMM:
/*
* post_recv() RDMA write completions of IO reqs (read/write)
* and hb
*/
if (WARN_ON(wc->wr_cqe != &io_comp_cqe))
return;
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
close_path(srv_path);
break;
}
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
&imm_type, &imm_payload);
if (imm_type == RTRS_IO_REQ_IMM) {
u32 msg_id, off;
void *data;
msg_id = imm_payload >> srv_path->mem_bits;
off = imm_payload & ((1 << srv_path->mem_bits) - 1);
if (msg_id >= srv->queue_depth || off >= max_chunk_size) {
rtrs_err(s, "Wrong msg_id %u, off %u\n",
msg_id, off);
close_path(srv_path);
return;
}
if (always_invalidate) {
struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id];
mr->msg_off = off;
mr->msg_id = msg_id;
err = rtrs_srv_inv_rkey(con, mr);
if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n",
err);
close_path(srv_path);
break;
}
} else {
data = page_address(srv->chunks[msg_id]) + off;
process_io_req(con, data, msg_id, off);
}
} else if (imm_type == RTRS_HB_MSG_IMM) {
WARN_ON(con->c.cid);
rtrs_send_hb_ack(&srv_path->s);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
srv_path->s.hb_missed_cnt = 0;
} else {
rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
}
break;
case IB_WC_RDMA_WRITE:
case IB_WC_SEND:
/*
* post_send() RDMA write completions of IO reqs (read/write)
* and hb.
*/
atomic_add(s->signal_interval, &con->c.sq_wr_avail);
if (!list_empty_careful(&con->rsp_wr_wait_list))
rtrs_rdma_process_wr_wait_list(con);
break;
default:
rtrs_wrn(s, "Unexpected WC type: %d\n", wc->opcode);
return;
}
}
/**
* rtrs_srv_get_path_name() - Get rtrs_srv peer hostname.
* @srv: Session
* @pathname: Pathname buffer
* @len: Length of sessname buffer
*/
int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname,
size_t len)
{
struct rtrs_srv_path *srv_path;
int err = -ENOTCONN;
mutex_lock(&srv->paths_mutex);
list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
if (srv_path->state != RTRS_SRV_CONNECTED)
continue;
strscpy(pathname, srv_path->s.sessname,
min_t(size_t, sizeof(srv_path->s.sessname), len));
err = 0;
break;
}
mutex_unlock(&srv->paths_mutex);
return err;
}
EXPORT_SYMBOL(rtrs_srv_get_path_name);
/**
* rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth.
* @srv: Session
*/
int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv)
{
return srv->queue_depth;
}
EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
static int find_next_bit_ring(struct rtrs_srv_path *srv_path)
{
struct ib_device *ib_dev = srv_path->s.dev->ib_dev;
int v;
v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask);
if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
v = cpumask_first(&cq_affinity_mask);
return v;
}
static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path)
{
srv_path->cur_cq_vector = find_next_bit_ring(srv_path);
return srv_path->cur_cq_vector;
}
static void rtrs_srv_dev_release(struct device *dev)
{
struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess,
dev);
kfree(srv);
}
static void free_srv(struct rtrs_srv_sess *srv)
{
int i;
WARN_ON(refcount_read(&srv->refcount));
for (i = 0; i < srv->queue_depth; i++)
__free_pages(srv->chunks[i], get_order(max_chunk_size));
kfree(srv->chunks);
mutex_destroy(&srv->paths_mutex);
mutex_destroy(&srv->paths_ev_mutex);
/* last put to release the srv structure */
put_device(&srv->dev);
}
static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
const uuid_t *paths_uuid,
bool first_conn)
{
struct rtrs_srv_sess *srv;
int i;
mutex_lock(&ctx->srv_mutex);
list_for_each_entry(srv, &ctx->srv_list, ctx_list) {
if (uuid_equal(&srv->paths_uuid, paths_uuid) &&
refcount_inc_not_zero(&srv->refcount)) {
mutex_unlock(&ctx->srv_mutex);
return srv;
}
}
mutex_unlock(&ctx->srv_mutex);
/*
* If this request is not the first connection request from the
* client for this session then fail and return error.
*/
if (!first_conn) {
pr_err_ratelimited("Error: Not the first connection request for this session\n");
return ERR_PTR(-ENXIO);
}
/* need to allocate a new srv */
srv = kzalloc(sizeof(*srv), GFP_KERNEL);
if (!srv)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&srv->paths_list);
mutex_init(&srv->paths_mutex);
mutex_init(&srv->paths_ev_mutex);
uuid_copy(&srv->paths_uuid, paths_uuid);
srv->queue_depth = sess_queue_depth;
srv->ctx = ctx;
device_initialize(&srv->dev);
srv->dev.release = rtrs_srv_dev_release;
srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
GFP_KERNEL);
if (!srv->chunks)
goto err_free_srv;
for (i = 0; i < srv->queue_depth; i++) {
srv->chunks[i] = alloc_pages(GFP_KERNEL,
get_order(max_chunk_size));
if (!srv->chunks[i])
goto err_free_chunks;
}
refcount_set(&srv->refcount, 1);
mutex_lock(&ctx->srv_mutex);
list_add(&srv->ctx_list, &ctx->srv_list);
mutex_unlock(&ctx->srv_mutex);
return srv;
err_free_chunks:
while (i--)
__free_pages(srv->chunks[i], get_order(max_chunk_size));
kfree(srv->chunks);
err_free_srv:
kfree(srv);
return ERR_PTR(-ENOMEM);
}
static void put_srv(struct rtrs_srv_sess *srv)
{
if (refcount_dec_and_test(&srv->refcount)) {
struct rtrs_srv_ctx *ctx = srv->ctx;
WARN_ON(srv->dev.kobj.state_in_sysfs);
mutex_lock(&ctx->srv_mutex);
list_del(&srv->ctx_list);
mutex_unlock(&ctx->srv_mutex);
free_srv(srv);
}
}
static void __add_path_to_srv(struct rtrs_srv_sess *srv,
struct rtrs_srv_path *srv_path)
{
list_add_tail(&srv_path->s.entry, &srv->paths_list);
srv->paths_num++;
WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
}
static void del_path_from_srv(struct rtrs_srv_path *srv_path)
{
struct rtrs_srv_sess *srv = srv_path->srv;
if (WARN_ON(!srv))
return;
mutex_lock(&srv->paths_mutex);
list_del(&srv_path->s.entry);
WARN_ON(!srv->paths_num);
srv->paths_num--;
mutex_unlock(&srv->paths_mutex);
}
/* return true if addresses are the same, error other wise */
static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
{
switch (a->sa_family) {
case AF_IB:
return memcmp(&((struct sockaddr_ib *)a)->sib_addr,
&((struct sockaddr_ib *)b)->sib_addr,
sizeof(struct ib_addr)) &&
(b->sa_family == AF_IB);
case AF_INET:
return memcmp(&((struct sockaddr_in *)a)->sin_addr,
&((struct sockaddr_in *)b)->sin_addr,
sizeof(struct in_addr)) &&
(b->sa_family == AF_INET);
case AF_INET6:
return memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
&((struct sockaddr_in6 *)b)->sin6_addr,
sizeof(struct in6_addr)) &&
(b->sa_family == AF_INET6);
default:
return -ENOENT;
}
}
static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv,
struct rdma_addr *addr)
{
struct rtrs_srv_path *srv_path;
list_for_each_entry(srv_path, &srv->paths_list, s.entry)
if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr,
(struct sockaddr *)&addr->dst_addr) &&
!sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr,
(struct sockaddr *)&addr->src_addr))
return true;
return false;
}
static void free_path(struct rtrs_srv_path *srv_path)
{
if (srv_path->kobj.state_in_sysfs) {
kobject_del(&srv_path->kobj);
kobject_put(&srv_path->kobj);
} else {
free_percpu(srv_path->stats->rdma_stats);
kfree(srv_path->stats);
kfree(srv_path);
}
}
static void rtrs_srv_close_work(struct work_struct *work)
{
struct rtrs_srv_path *srv_path;
struct rtrs_srv_con *con;
int i;
srv_path = container_of(work, typeof(*srv_path), close_work);
rtrs_srv_destroy_path_files(srv_path);
rtrs_srv_stop_hb(srv_path);
for (i = 0; i < srv_path->s.con_num; i++) {
if (!srv_path->s.con[i])
continue;
con = to_srv_con(srv_path->s.con[i]);
rdma_disconnect(con->c.cm_id);
ib_drain_qp(con->c.qp);
}
/*
* Degrade ref count to the usual model with a single shared
* atomic_t counter
*/
percpu_ref_kill(&srv_path->ids_inflight_ref);
/* Wait for all completion */
wait_for_completion(&srv_path->complete_done);
/* Notify upper layer if we are the last path */
rtrs_srv_path_down(srv_path);
unmap_cont_bufs(srv_path);
rtrs_srv_free_ops_ids(srv_path);
for (i = 0; i < srv_path->s.con_num; i++) {
if (!srv_path->s.con[i])
continue;
con = to_srv_con(srv_path->s.con[i]);
rtrs_cq_qp_destroy(&con->c);
rdma_destroy_id(con->c.cm_id);
kfree(con);
}
rtrs_ib_dev_put(srv_path->s.dev);
del_path_from_srv(srv_path);
put_srv(srv_path->srv);
srv_path->srv = NULL;
rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED);
kfree(srv_path->dma_addr);
kfree(srv_path->s.con);
free_path(srv_path);
}
static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path,
struct rdma_cm_id *cm_id)
{
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_msg_conn_rsp msg;
struct rdma_conn_param param;
int err;
param = (struct rdma_conn_param) {
.rnr_retry_count = 7,
.private_data = &msg,
.private_data_len = sizeof(msg),
};
msg = (struct rtrs_msg_conn_rsp) {
.magic = cpu_to_le16(RTRS_MAGIC),
.version = cpu_to_le16(RTRS_PROTO_VER),
.queue_depth = cpu_to_le16(srv->queue_depth),
.max_io_size = cpu_to_le32(max_chunk_size - MAX_HDR_SIZE),
.max_hdr_size = cpu_to_le32(MAX_HDR_SIZE),
};
if (always_invalidate)
msg.flags = cpu_to_le32(RTRS_MSG_NEW_RKEY_F);
err = rdma_accept(cm_id, ¶m);
if (err)
pr_err("rdma_accept(), err: %d\n", err);
return err;
}
static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
{
struct rtrs_msg_conn_rsp msg;
int err;
msg = (struct rtrs_msg_conn_rsp) {
.magic = cpu_to_le16(RTRS_MAGIC),
.version = cpu_to_le16(RTRS_PROTO_VER),
.errno = cpu_to_le16(errno),
};
err = rdma_reject(cm_id, &msg, sizeof(msg), IB_CM_REJ_CONSUMER_DEFINED);
if (err)
pr_err("rdma_reject(), err: %d\n", err);
/* Bounce errno back */
return errno;
}
static struct rtrs_srv_path *
__find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid)
{
struct rtrs_srv_path *srv_path;
list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
if (uuid_equal(&srv_path->s.uuid, sess_uuid))
return srv_path;
}
return NULL;
}
static int create_con(struct rtrs_srv_path *srv_path,
struct rdma_cm_id *cm_id,
unsigned int cid)
{
struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_path *s = &srv_path->s;
struct rtrs_srv_con *con;
u32 cq_num, max_send_wr, max_recv_wr, wr_limit;
int err, cq_vector;
con = kzalloc(sizeof(*con), GFP_KERNEL);
if (!con) {
err = -ENOMEM;
goto err;
}
spin_lock_init(&con->rsp_wr_wait_lock);
INIT_LIST_HEAD(&con->rsp_wr_wait_list);
con->c.cm_id = cm_id;
con->c.path = &srv_path->s;
con->c.cid = cid;
atomic_set(&con->c.wr_cnt, 1);
wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr;
if (con->c.cid == 0) {
/*
* All receive and all send (each requiring invalidate)
* + 2 for drain and heartbeat
*/
max_send_wr = min_t(int, wr_limit,
SERVICE_CON_QUEUE_DEPTH * 2 + 2);
max_recv_wr = max_send_wr;
s->signal_interval = min_not_zero(srv->queue_depth,
(size_t)SERVICE_CON_QUEUE_DEPTH);
} else {
/* when always_invlaidate enalbed, we need linv+rinv+mr+imm */
if (always_invalidate)
max_send_wr =
min_t(int, wr_limit,
srv->queue_depth * (1 + 4) + 1);
else
max_send_wr =
min_t(int, wr_limit,
srv->queue_depth * (1 + 2) + 1);
max_recv_wr = srv->queue_depth + 1;
}
cq_num = max_send_wr + max_recv_wr;
atomic_set(&con->c.sq_wr_avail, max_send_wr);
cq_vector = rtrs_srv_get_next_cq_vector(srv_path);
/* TODO: SOFTIRQ can be faster, but be careful with softirq context */
err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num,
max_send_wr, max_recv_wr,
IB_POLL_WORKQUEUE);
if (err) {
rtrs_err(s, "rtrs_cq_qp_create(), err: %d\n", err);
goto free_con;
}
if (con->c.cid == 0) {
err = post_recv_info_req(con);
if (err)
goto free_cqqp;
}
WARN_ON(srv_path->s.con[cid]);
srv_path->s.con[cid] = &con->c;
/*
* Change context from server to current connection. The other
* way is to use cm_id->qp->qp_context, which does not work on OFED.
*/
cm_id->context = &con->c;
return 0;
free_cqqp:
rtrs_cq_qp_destroy(&con->c);
free_con:
kfree(con);
err:
return err;
}
static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv,
struct rdma_cm_id *cm_id,
unsigned int con_num,
unsigned int recon_cnt,
const uuid_t *uuid)
{
struct rtrs_srv_path *srv_path;
int err = -ENOMEM;
char str[NAME_MAX];
struct rtrs_addr path;
if (srv->paths_num >= MAX_PATHS_NUM) {
err = -ECONNRESET;
goto err;
}
if (__is_path_w_addr_exists(srv, &cm_id->route.addr)) {
err = -EEXIST;
pr_err("Path with same addr exists\n");
goto err;
}
srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL);
if (!srv_path)
goto err;
srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL);
if (!srv_path->stats)
goto err_free_sess;
srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats);
if (!srv_path->stats->rdma_stats)
goto err_free_stats;
srv_path->stats->srv_path = srv_path;
srv_path->dma_addr = kcalloc(srv->queue_depth,
sizeof(*srv_path->dma_addr),
GFP_KERNEL);
if (!srv_path->dma_addr)
goto err_free_percpu;
srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con),
GFP_KERNEL);
if (!srv_path->s.con)
goto err_free_dma_addr;
srv_path->state = RTRS_SRV_CONNECTING;
srv_path->srv = srv;
srv_path->cur_cq_vector = -1;
srv_path->s.dst_addr = cm_id->route.addr.dst_addr;
srv_path->s.src_addr = cm_id->route.addr.src_addr;
/* temporary until receiving session-name from client */
path.src = &srv_path->s.src_addr;
path.dst = &srv_path->s.dst_addr;
rtrs_addr_to_str(&path, str, sizeof(str));
strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname));
srv_path->s.con_num = con_num;
srv_path->s.irq_con_num = con_num;
srv_path->s.recon_cnt = recon_cnt;
uuid_copy(&srv_path->s.uuid, uuid);
spin_lock_init(&srv_path->state_lock);
INIT_WORK(&srv_path->close_work, rtrs_srv_close_work);
rtrs_srv_init_hb(srv_path);
srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
if (!srv_path->s.dev) {
err = -ENOMEM;
goto err_free_con;
}
err = map_cont_bufs(srv_path);
if (err)
goto err_put_dev;
err = rtrs_srv_alloc_ops_ids(srv_path);
if (err)
goto err_unmap_bufs;
__add_path_to_srv(srv, srv_path);
return srv_path;
err_unmap_bufs:
unmap_cont_bufs(srv_path);
err_put_dev:
rtrs_ib_dev_put(srv_path->s.dev);
err_free_con:
kfree(srv_path->s.con);
err_free_dma_addr:
kfree(srv_path->dma_addr);
err_free_percpu:
free_percpu(srv_path->stats->rdma_stats);
err_free_stats:
kfree(srv_path->stats);
err_free_sess:
kfree(srv_path);
err:
return ERR_PTR(err);
}
static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
const struct rtrs_msg_conn_req *msg,
size_t len)
{
struct rtrs_srv_ctx *ctx = cm_id->context;
struct rtrs_srv_path *srv_path;
struct rtrs_srv_sess *srv;
u16 version, con_num, cid;
u16 recon_cnt;
int err = -ECONNRESET;
if (len < sizeof(*msg)) {
pr_err("Invalid RTRS connection request\n");
goto reject_w_err;
}
if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
pr_err("Invalid RTRS magic\n");
goto reject_w_err;
}
version = le16_to_cpu(msg->version);
if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
pr_err("Unsupported major RTRS version: %d, expected %d\n",
version >> 8, RTRS_PROTO_VER_MAJOR);
goto reject_w_err;
}
con_num = le16_to_cpu(msg->cid_num);
if (con_num > 4096) {
/* Sanity check */
pr_err("Too many connections requested: %d\n", con_num);
goto reject_w_err;
}
cid = le16_to_cpu(msg->cid);
if (cid >= con_num) {
/* Sanity check */
pr_err("Incorrect cid: %d >= %d\n", cid, con_num);
goto reject_w_err;
}
recon_cnt = le16_to_cpu(msg->recon_cnt);
srv = get_or_create_srv(ctx, &msg->paths_uuid, msg->first_conn);
if (IS_ERR(srv)) {
err = PTR_ERR(srv);
pr_err("get_or_create_srv(), error %d\n", err);
goto reject_w_err;
}
mutex_lock(&srv->paths_mutex);
srv_path = __find_path(srv, &msg->sess_uuid);
if (srv_path) {
struct rtrs_path *s = &srv_path->s;
/* Session already holds a reference */
put_srv(srv);
if (srv_path->state != RTRS_SRV_CONNECTING) {
rtrs_err(s, "Session in wrong state: %s\n",
rtrs_srv_state_str(srv_path->state));
mutex_unlock(&srv->paths_mutex);
goto reject_w_err;
}
/*
* Sanity checks
*/
if (con_num != s->con_num || cid >= s->con_num) {
rtrs_err(s, "Incorrect request: %d, %d\n",
cid, con_num);
mutex_unlock(&srv->paths_mutex);
goto reject_w_err;
}
if (s->con[cid]) {
rtrs_err(s, "Connection already exists: %d\n",
cid);
mutex_unlock(&srv->paths_mutex);
goto reject_w_err;
}
} else {
srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt,
&msg->sess_uuid);
if (IS_ERR(srv_path)) {
mutex_unlock(&srv->paths_mutex);
put_srv(srv);
err = PTR_ERR(srv_path);
pr_err("RTRS server session allocation failed: %d\n", err);
goto reject_w_err;
}
}
err = create_con(srv_path, cm_id, cid);
if (err) {
rtrs_err((&srv_path->s), "create_con(), error %d\n", err);
rtrs_rdma_do_reject(cm_id, err);
/*
* Since session has other connections we follow normal way
* through workqueue, but still return an error to tell cma.c
* to call rdma_destroy_id() for current connection.
*/
goto close_and_return_err;
}
err = rtrs_rdma_do_accept(srv_path, cm_id);
if (err) {
rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err);
rtrs_rdma_do_reject(cm_id, err);
/*
* Since current connection was successfully added to the
* session we follow normal way through workqueue to close the
* session, thus return 0 to tell cma.c we call
* rdma_destroy_id() ourselves.
*/
err = 0;
goto close_and_return_err;
}
mutex_unlock(&srv->paths_mutex);
return 0;
reject_w_err:
return rtrs_rdma_do_reject(cm_id, err);
close_and_return_err:
mutex_unlock(&srv->paths_mutex);
close_path(srv_path);
return err;
}
static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *ev)
{
struct rtrs_srv_path *srv_path = NULL;
struct rtrs_path *s = NULL;
struct rtrs_con *c = NULL;
if (ev->event == RDMA_CM_EVENT_CONNECT_REQUEST)
/*
* In case of error cma.c will destroy cm_id,
* see cma_process_remove()
*/
return rtrs_rdma_connect(cm_id, ev->param.conn.private_data,
ev->param.conn.private_data_len);
c = cm_id->context;
s = c->path;
srv_path = to_srv_path(s);
switch (ev->event) {
case RDMA_CM_EVENT_ESTABLISHED:
/* Nothing here */
break;
case RDMA_CM_EVENT_REJECTED:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
rtrs_err(s, "CM error (CM event: %s, err: %d)\n",
rdma_event_msg(ev->event), ev->status);
fallthrough;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
close_path(srv_path);
break;
default:
pr_err("Ignoring unexpected CM event %s, err %d\n",
rdma_event_msg(ev->event), ev->status);
break;
}
return 0;
}
static struct rdma_cm_id *rtrs_srv_cm_init(struct rtrs_srv_ctx *ctx,
struct sockaddr *addr,
enum rdma_ucm_port_space ps)
{
struct rdma_cm_id *cm_id;
int ret;
cm_id = rdma_create_id(&init_net, rtrs_srv_rdma_cm_handler,
ctx, ps, IB_QPT_RC);
if (IS_ERR(cm_id)) {
ret = PTR_ERR(cm_id);
pr_err("Creating id for RDMA connection failed, err: %d\n",
ret);
goto err_out;
}
ret = rdma_bind_addr(cm_id, addr);
if (ret) {
pr_err("Binding RDMA address failed, err: %d\n", ret);
goto err_cm;
}
ret = rdma_listen(cm_id, 64);
if (ret) {
pr_err("Listening on RDMA connection failed, err: %d\n",
ret);
goto err_cm;
}
return cm_id;
err_cm:
rdma_destroy_id(cm_id);
err_out:
return ERR_PTR(ret);
}
static int rtrs_srv_rdma_init(struct rtrs_srv_ctx *ctx, u16 port)
{
struct sockaddr_in6 sin = {
.sin6_family = AF_INET6,
.sin6_addr = IN6ADDR_ANY_INIT,
.sin6_port = htons(port),
};
struct sockaddr_ib sib = {
.sib_family = AF_IB,
.sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port),
.sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL),
.sib_pkey = cpu_to_be16(0xffff),
};
struct rdma_cm_id *cm_ip, *cm_ib;
int ret;
/*
* We accept both IPoIB and IB connections, so we need to keep
* two cm id's, one for each socket type and port space.
* If the cm initialization of one of the id's fails, we abort
* everything.
*/
cm_ip = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sin, RDMA_PS_TCP);
if (IS_ERR(cm_ip))
return PTR_ERR(cm_ip);
cm_ib = rtrs_srv_cm_init(ctx, (struct sockaddr *)&sib, RDMA_PS_IB);
if (IS_ERR(cm_ib)) {
ret = PTR_ERR(cm_ib);
goto free_cm_ip;
}
ctx->cm_id_ip = cm_ip;
ctx->cm_id_ib = cm_ib;
return 0;
free_cm_ip:
rdma_destroy_id(cm_ip);
return ret;
}
static struct rtrs_srv_ctx *alloc_srv_ctx(struct rtrs_srv_ops *ops)
{
struct rtrs_srv_ctx *ctx;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return NULL;
ctx->ops = *ops;
mutex_init(&ctx->srv_mutex);
INIT_LIST_HEAD(&ctx->srv_list);
return ctx;
}
static void free_srv_ctx(struct rtrs_srv_ctx *ctx)
{
WARN_ON(!list_empty(&ctx->srv_list));
mutex_destroy(&ctx->srv_mutex);
kfree(ctx);
}
static int rtrs_srv_add_one(struct ib_device *device)
{
struct rtrs_srv_ctx *ctx;
int ret = 0;
mutex_lock(&ib_ctx.ib_dev_mutex);
if (ib_ctx.ib_dev_count)
goto out;
/*
* Since our CM IDs are NOT bound to any ib device we will create them
* only once
*/
ctx = ib_ctx.srv_ctx;
ret = rtrs_srv_rdma_init(ctx, ib_ctx.port);
if (ret) {
/*
* We errored out here.
* According to the ib code, if we encounter an error here then the
* error code is ignored, and no more calls to our ops are made.
*/
pr_err("Failed to initialize RDMA connection");
goto err_out;
}
out:
/*
* Keep a track on the number of ib devices added
*/
ib_ctx.ib_dev_count++;
err_out:
mutex_unlock(&ib_ctx.ib_dev_mutex);
return ret;
}
static void rtrs_srv_remove_one(struct ib_device *device, void *client_data)
{
struct rtrs_srv_ctx *ctx;
mutex_lock(&ib_ctx.ib_dev_mutex);
ib_ctx.ib_dev_count--;
if (ib_ctx.ib_dev_count)
goto out;
/*
* Since our CM IDs are NOT bound to any ib device we will remove them
* only once, when the last device is removed
*/
ctx = ib_ctx.srv_ctx;
rdma_destroy_id(ctx->cm_id_ip);
rdma_destroy_id(ctx->cm_id_ib);
out:
mutex_unlock(&ib_ctx.ib_dev_mutex);
}
static struct ib_client rtrs_srv_client = {
.name = "rtrs_server",
.add = rtrs_srv_add_one,
.remove = rtrs_srv_remove_one
};
/**
* rtrs_srv_open() - open RTRS server context
* @ops: callback functions
* @port: port to listen on
*
* Creates server context with specified callbacks.
*
* Return a valid pointer on success otherwise PTR_ERR.
*/
struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
{
struct rtrs_srv_ctx *ctx;
int err;
ctx = alloc_srv_ctx(ops);
if (!ctx)
return ERR_PTR(-ENOMEM);
mutex_init(&ib_ctx.ib_dev_mutex);
ib_ctx.srv_ctx = ctx;
ib_ctx.port = port;
err = ib_register_client(&rtrs_srv_client);
if (err) {
free_srv_ctx(ctx);
return ERR_PTR(err);
}
return ctx;
}
EXPORT_SYMBOL(rtrs_srv_open);
static void close_paths(struct rtrs_srv_sess *srv)
{
struct rtrs_srv_path *srv_path;
mutex_lock(&srv->paths_mutex);
list_for_each_entry(srv_path, &srv->paths_list, s.entry)
close_path(srv_path);
mutex_unlock(&srv->paths_mutex);
}
static void close_ctx(struct rtrs_srv_ctx *ctx)
{
struct rtrs_srv_sess *srv;
mutex_lock(&ctx->srv_mutex);
list_for_each_entry(srv, &ctx->srv_list, ctx_list)
close_paths(srv);
mutex_unlock(&ctx->srv_mutex);
flush_workqueue(rtrs_wq);
}
/**
* rtrs_srv_close() - close RTRS server context
* @ctx: pointer to server context
*
* Closes RTRS server context with all client sessions.
*/
void rtrs_srv_close(struct rtrs_srv_ctx *ctx)
{
ib_unregister_client(&rtrs_srv_client);
mutex_destroy(&ib_ctx.ib_dev_mutex);
close_ctx(ctx);
free_srv_ctx(ctx);
}
EXPORT_SYMBOL(rtrs_srv_close);
static int check_module_params(void)
{
if (sess_queue_depth < 1 || sess_queue_depth > MAX_SESS_QUEUE_DEPTH) {
pr_err("Invalid sess_queue_depth value %d, has to be >= %d, <= %d.\n",
sess_queue_depth, 1, MAX_SESS_QUEUE_DEPTH);
return -EINVAL;
}
if (max_chunk_size < MIN_CHUNK_SIZE || !is_power_of_2(max_chunk_size)) {
pr_err("Invalid max_chunk_size value %d, has to be >= %d and should be power of two.\n",
max_chunk_size, MIN_CHUNK_SIZE);
return -EINVAL;
}
/*
* Check if IB immediate data size is enough to hold the mem_id and the
* offset inside the memory chunk
*/
if ((ilog2(sess_queue_depth - 1) + 1) +
(ilog2(max_chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) {
pr_err("RDMA immediate size (%db) not enough to encode %d buffers of size %dB. Reduce 'sess_queue_depth' or 'max_chunk_size' parameters.\n",
MAX_IMM_PAYL_BITS, sess_queue_depth, max_chunk_size);
return -EINVAL;
}
return 0;
}
static int __init rtrs_server_init(void)
{
int err;
pr_info("Loading module %s, proto %s: (max_chunk_size: %d (pure IO %ld, headers %ld) , sess_queue_depth: %d, always_invalidate: %d)\n",
KBUILD_MODNAME, RTRS_PROTO_VER_STRING,
max_chunk_size, max_chunk_size - MAX_HDR_SIZE, MAX_HDR_SIZE,
sess_queue_depth, always_invalidate);
rtrs_rdma_dev_pd_init(0, &dev_pd);
err = check_module_params();
if (err) {
pr_err("Failed to load module, invalid module parameters, err: %d\n",
err);
return err;
}
err = class_register(&rtrs_dev_class);
if (err)
goto out_err;
rtrs_wq = alloc_workqueue("rtrs_server_wq", 0, 0);
if (!rtrs_wq) {
err = -ENOMEM;
goto out_dev_class;
}
return 0;
out_dev_class:
class_unregister(&rtrs_dev_class);
out_err:
return err;
}
static void __exit rtrs_server_exit(void)
{
destroy_workqueue(rtrs_wq);
class_unregister(&rtrs_dev_class);
rtrs_rdma_dev_pd_deinit(&dev_pd);
}
module_init(rtrs_server_init);
module_exit(rtrs_server_exit);
| linux-master | drivers/infiniband/ulp/rtrs/rtrs-srv.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDMA Transport Layer
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include <linux/module.h>
#include <linux/inet.h>
#include "rtrs-pri.h"
#include "rtrs-log.h"
MODULE_DESCRIPTION("RDMA Transport Core");
MODULE_LICENSE("GPL");
struct rtrs_iu *rtrs_iu_alloc(u32 iu_num, size_t size, gfp_t gfp_mask,
struct ib_device *dma_dev,
enum dma_data_direction dir,
void (*done)(struct ib_cq *cq, struct ib_wc *wc))
{
struct rtrs_iu *ius, *iu;
int i;
ius = kcalloc(iu_num, sizeof(*ius), gfp_mask);
if (!ius)
return NULL;
for (i = 0; i < iu_num; i++) {
iu = &ius[i];
iu->direction = dir;
iu->buf = kzalloc(size, gfp_mask);
if (!iu->buf)
goto err;
iu->dma_addr = ib_dma_map_single(dma_dev, iu->buf, size, dir);
if (ib_dma_mapping_error(dma_dev, iu->dma_addr)) {
kfree(iu->buf);
goto err;
}
iu->cqe.done = done;
iu->size = size;
}
return ius;
err:
rtrs_iu_free(ius, dma_dev, i);
return NULL;
}
EXPORT_SYMBOL_GPL(rtrs_iu_alloc);
void rtrs_iu_free(struct rtrs_iu *ius, struct ib_device *ibdev, u32 queue_num)
{
struct rtrs_iu *iu;
int i;
if (!ius)
return;
for (i = 0; i < queue_num; i++) {
iu = &ius[i];
ib_dma_unmap_single(ibdev, iu->dma_addr, iu->size, iu->direction);
kfree(iu->buf);
}
kfree(ius);
}
EXPORT_SYMBOL_GPL(rtrs_iu_free);
int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu)
{
struct rtrs_path *path = con->path;
struct ib_recv_wr wr;
struct ib_sge list;
list.addr = iu->dma_addr;
list.length = iu->size;
list.lkey = path->dev->ib_pd->local_dma_lkey;
if (list.length == 0) {
rtrs_wrn(con->path,
"Posting receive work request failed, sg list is empty\n");
return -EINVAL;
}
wr = (struct ib_recv_wr) {
.wr_cqe = &iu->cqe,
.sg_list = &list,
.num_sge = 1,
};
return ib_post_recv(con->qp, &wr, NULL);
}
EXPORT_SYMBOL_GPL(rtrs_iu_post_recv);
int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe)
{
struct ib_recv_wr wr;
wr = (struct ib_recv_wr) {
.wr_cqe = cqe,
};
return ib_post_recv(con->qp, &wr, NULL);
}
EXPORT_SYMBOL_GPL(rtrs_post_recv_empty);
static int rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head,
struct ib_send_wr *wr, struct ib_send_wr *tail)
{
if (head) {
struct ib_send_wr *next = head;
while (next->next)
next = next->next;
next->next = wr;
} else {
head = wr;
}
if (tail)
wr->next = tail;
return ib_post_send(qp, head, NULL);
}
int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
struct ib_send_wr *head)
{
struct rtrs_path *path = con->path;
struct ib_send_wr wr;
struct ib_sge list;
if (WARN_ON(size == 0))
return -EINVAL;
list.addr = iu->dma_addr;
list.length = size;
list.lkey = path->dev->ib_pd->local_dma_lkey;
wr = (struct ib_send_wr) {
.wr_cqe = &iu->cqe,
.sg_list = &list,
.num_sge = 1,
.opcode = IB_WR_SEND,
.send_flags = IB_SEND_SIGNALED,
};
return rtrs_post_send(con->qp, head, &wr, NULL);
}
EXPORT_SYMBOL_GPL(rtrs_iu_post_send);
int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
struct ib_sge *sge, unsigned int num_sge,
u32 rkey, u64 rdma_addr, u32 imm_data,
enum ib_send_flags flags,
struct ib_send_wr *head,
struct ib_send_wr *tail)
{
struct ib_rdma_wr wr;
int i;
wr = (struct ib_rdma_wr) {
.wr.wr_cqe = &iu->cqe,
.wr.sg_list = sge,
.wr.num_sge = num_sge,
.rkey = rkey,
.remote_addr = rdma_addr,
.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
.wr.ex.imm_data = cpu_to_be32(imm_data),
.wr.send_flags = flags,
};
/*
* If one of the sges has 0 size, the operation will fail with a
* length error
*/
for (i = 0; i < num_sge; i++)
if (WARN_ONCE(sge[i].length == 0, "sg %d is zero length\n", i))
return -EINVAL;
return rtrs_post_send(con->qp, head, &wr.wr, tail);
}
EXPORT_SYMBOL_GPL(rtrs_iu_post_rdma_write_imm);
static int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con,
struct ib_cqe *cqe,
u32 imm_data,
struct ib_send_wr *head)
{
struct ib_rdma_wr wr;
struct rtrs_path *path = con->path;
enum ib_send_flags sflags;
atomic_dec_if_positive(&con->sq_wr_avail);
sflags = (atomic_inc_return(&con->wr_cnt) % path->signal_interval) ?
0 : IB_SEND_SIGNALED;
wr = (struct ib_rdma_wr) {
.wr.wr_cqe = cqe,
.wr.send_flags = sflags,
.wr.opcode = IB_WR_RDMA_WRITE_WITH_IMM,
.wr.ex.imm_data = cpu_to_be32(imm_data),
};
return rtrs_post_send(con->qp, head, &wr.wr, NULL);
}
static void qp_event_handler(struct ib_event *ev, void *ctx)
{
struct rtrs_con *con = ctx;
switch (ev->event) {
case IB_EVENT_COMM_EST:
rtrs_info(con->path, "QP event %s (%d) received\n",
ib_event_msg(ev->event), ev->event);
rdma_notify(con->cm_id, IB_EVENT_COMM_EST);
break;
default:
rtrs_info(con->path, "Unhandled QP event %s (%d) received\n",
ib_event_msg(ev->event), ev->event);
break;
}
}
static bool is_pollqueue(struct rtrs_con *con)
{
return con->cid >= con->path->irq_con_num;
}
static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
enum ib_poll_context poll_ctx)
{
struct rdma_cm_id *cm_id = con->cm_id;
struct ib_cq *cq;
if (is_pollqueue(con))
cq = ib_alloc_cq(cm_id->device, con, nr_cqe, cq_vector,
poll_ctx);
else
cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
if (IS_ERR(cq)) {
rtrs_err(con->path, "Creating completion queue failed, errno: %ld\n",
PTR_ERR(cq));
return PTR_ERR(cq);
}
con->cq = cq;
con->nr_cqe = nr_cqe;
return 0;
}
static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
u32 max_send_wr, u32 max_recv_wr, u32 max_sge)
{
struct ib_qp_init_attr init_attr = {NULL};
struct rdma_cm_id *cm_id = con->cm_id;
int ret;
init_attr.cap.max_send_wr = max_send_wr;
init_attr.cap.max_recv_wr = max_recv_wr;
init_attr.cap.max_recv_sge = 1;
init_attr.event_handler = qp_event_handler;
init_attr.qp_context = con;
init_attr.cap.max_send_sge = max_sge;
init_attr.qp_type = IB_QPT_RC;
init_attr.send_cq = con->cq;
init_attr.recv_cq = con->cq;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
ret = rdma_create_qp(cm_id, pd, &init_attr);
if (ret) {
rtrs_err(con->path, "Creating QP failed, err: %d\n", ret);
return ret;
}
con->qp = cm_id->qp;
return ret;
}
static void destroy_cq(struct rtrs_con *con)
{
if (con->cq) {
if (is_pollqueue(con))
ib_free_cq(con->cq);
else
ib_cq_pool_put(con->cq, con->nr_cqe);
}
con->cq = NULL;
}
int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr,
enum ib_poll_context poll_ctx)
{
int err;
err = create_cq(con, cq_vector, nr_cqe, poll_ctx);
if (err)
return err;
err = create_qp(con, path->dev->ib_pd, max_send_wr, max_recv_wr,
max_send_sge);
if (err) {
destroy_cq(con);
return err;
}
con->path = path;
return 0;
}
EXPORT_SYMBOL_GPL(rtrs_cq_qp_create);
void rtrs_cq_qp_destroy(struct rtrs_con *con)
{
if (con->qp) {
rdma_destroy_qp(con->cm_id);
con->qp = NULL;
}
destroy_cq(con);
}
EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
static void schedule_hb(struct rtrs_path *path)
{
queue_delayed_work(path->hb_wq, &path->hb_dwork,
msecs_to_jiffies(path->hb_interval_ms));
}
void rtrs_send_hb_ack(struct rtrs_path *path)
{
struct rtrs_con *usr_con = path->con[0];
u32 imm;
int err;
imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
NULL);
if (err) {
rtrs_err(path, "send HB ACK failed, errno: %d\n", err);
path->hb_err_handler(usr_con);
return;
}
}
EXPORT_SYMBOL_GPL(rtrs_send_hb_ack);
static void hb_work(struct work_struct *work)
{
struct rtrs_con *usr_con;
struct rtrs_path *path;
u32 imm;
int err;
path = container_of(to_delayed_work(work), typeof(*path), hb_dwork);
usr_con = path->con[0];
if (path->hb_missed_cnt > path->hb_missed_max) {
rtrs_err(path, "HB missed max reached.\n");
path->hb_err_handler(usr_con);
return;
}
if (path->hb_missed_cnt++) {
/* Reschedule work without sending hb */
schedule_hb(path);
return;
}
path->hb_last_sent = ktime_get();
imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
NULL);
if (err) {
rtrs_err(path, "HB send failed, errno: %d\n", err);
path->hb_err_handler(usr_con);
return;
}
schedule_hb(path);
}
void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
unsigned int interval_ms, unsigned int missed_max,
void (*err_handler)(struct rtrs_con *con),
struct workqueue_struct *wq)
{
path->hb_cqe = cqe;
path->hb_interval_ms = interval_ms;
path->hb_err_handler = err_handler;
path->hb_wq = wq;
path->hb_missed_max = missed_max;
path->hb_missed_cnt = 0;
INIT_DELAYED_WORK(&path->hb_dwork, hb_work);
}
EXPORT_SYMBOL_GPL(rtrs_init_hb);
void rtrs_start_hb(struct rtrs_path *path)
{
schedule_hb(path);
}
EXPORT_SYMBOL_GPL(rtrs_start_hb);
void rtrs_stop_hb(struct rtrs_path *path)
{
cancel_delayed_work_sync(&path->hb_dwork);
path->hb_missed_cnt = 0;
}
EXPORT_SYMBOL_GPL(rtrs_stop_hb);
static int rtrs_str_gid_to_sockaddr(const char *addr, size_t len,
short port, struct sockaddr_storage *dst)
{
struct sockaddr_ib *dst_ib = (struct sockaddr_ib *)dst;
int ret;
/*
* We can use some of the IPv6 functions since GID is a valid
* IPv6 address format
*/
ret = in6_pton(addr, len, dst_ib->sib_addr.sib_raw, '\0', NULL);
if (ret == 0)
return -EINVAL;
dst_ib->sib_family = AF_IB;
/*
* Use the same TCP server port number as the IB service ID
* on the IB port space range
*/
dst_ib->sib_sid = cpu_to_be64(RDMA_IB_IP_PS_IB | port);
dst_ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
dst_ib->sib_pkey = cpu_to_be16(0xffff);
return 0;
}
/**
* rtrs_str_to_sockaddr() - Convert rtrs address string to sockaddr
* @addr: String representation of an addr (IPv4, IPv6 or IB GID):
* - "ip:192.168.1.1"
* - "ip:fe80::200:5aee:feaa:20a2"
* - "gid:fe80::200:5aee:feaa:20a2"
* @len: String address length
* @port: Destination port
* @dst: Destination sockaddr structure
*
* Returns 0 if conversion successful. Non-zero on error.
*/
static int rtrs_str_to_sockaddr(const char *addr, size_t len,
u16 port, struct sockaddr_storage *dst)
{
if (strncmp(addr, "gid:", 4) == 0) {
return rtrs_str_gid_to_sockaddr(addr + 4, len - 4, port, dst);
} else if (strncmp(addr, "ip:", 3) == 0) {
char port_str[8];
char *cpy;
int err;
snprintf(port_str, sizeof(port_str), "%u", port);
cpy = kstrndup(addr + 3, len - 3, GFP_KERNEL);
err = cpy ? inet_pton_with_scope(&init_net, AF_UNSPEC,
cpy, port_str, dst) : -ENOMEM;
kfree(cpy);
return err;
}
return -EPROTONOSUPPORT;
}
/**
* sockaddr_to_str() - convert sockaddr to a string.
* @addr: the sockadddr structure to be converted.
* @buf: string containing socket addr.
* @len: string length.
*
* The return value is the number of characters written into buf not
* including the trailing '\0'. If len is == 0 the function returns 0..
*/
int sockaddr_to_str(const struct sockaddr *addr, char *buf, size_t len)
{
switch (addr->sa_family) {
case AF_IB:
return scnprintf(buf, len, "gid:%pI6",
&((struct sockaddr_ib *)addr)->sib_addr.sib_raw);
case AF_INET:
return scnprintf(buf, len, "ip:%pI4",
&((struct sockaddr_in *)addr)->sin_addr);
case AF_INET6:
return scnprintf(buf, len, "ip:%pI6c",
&((struct sockaddr_in6 *)addr)->sin6_addr);
}
return scnprintf(buf, len, "<invalid address family>");
}
EXPORT_SYMBOL(sockaddr_to_str);
/**
* rtrs_addr_to_str() - convert rtrs_addr to a string "src@dst"
* @addr: the rtrs_addr structure to be converted
* @buf: string containing source and destination addr of a path
* separated by '@' I.e. "ip:1.1.1.1@ip:1.1.1.2"
* "ip:1.1.1.1@ip:1.1.1.2".
* @len: string length
*
* The return value is the number of characters written into buf not
* including the trailing '\0'.
*/
int rtrs_addr_to_str(const struct rtrs_addr *addr, char *buf, size_t len)
{
int cnt;
cnt = sockaddr_to_str((struct sockaddr *)addr->src,
buf, len);
cnt += scnprintf(buf + cnt, len - cnt, "@");
sockaddr_to_str((struct sockaddr *)addr->dst,
buf + cnt, len - cnt);
return cnt;
}
EXPORT_SYMBOL(rtrs_addr_to_str);
/**
* rtrs_addr_to_sockaddr() - convert path string "src,dst" or "src@dst"
* to sockaddreses
* @str: string containing source and destination addr of a path
* separated by ',' or '@' I.e. "ip:1.1.1.1,ip:1.1.1.2" or
* "ip:1.1.1.1@ip:1.1.1.2". If str contains only one address it's
* considered to be destination.
* @len: string length
* @port: Destination port number.
* @addr: will be set to the source/destination address or to NULL
* if str doesn't contain any source address.
*
* Returns zero if conversion successful. Non-zero otherwise.
*/
int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
struct rtrs_addr *addr)
{
const char *d;
d = strchr(str, ',');
if (!d)
d = strchr(str, '@');
if (d) {
if (rtrs_str_to_sockaddr(str, d - str, 0, addr->src))
return -EINVAL;
d += 1;
len -= d - str;
str = d;
} else {
addr->src = NULL;
}
return rtrs_str_to_sockaddr(str, len, port, addr->dst);
}
EXPORT_SYMBOL(rtrs_addr_to_sockaddr);
void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
struct rtrs_rdma_dev_pd *pool)
{
INIT_LIST_HEAD(&pool->list);
mutex_init(&pool->mutex);
pool->pd_flags = pd_flags;
}
EXPORT_SYMBOL(rtrs_rdma_dev_pd_init);
void rtrs_rdma_dev_pd_deinit(struct rtrs_rdma_dev_pd *pool)
{
mutex_destroy(&pool->mutex);
WARN_ON(!list_empty(&pool->list));
}
EXPORT_SYMBOL(rtrs_rdma_dev_pd_deinit);
static void dev_free(struct kref *ref)
{
struct rtrs_rdma_dev_pd *pool;
struct rtrs_ib_dev *dev;
dev = container_of(ref, typeof(*dev), ref);
pool = dev->pool;
mutex_lock(&pool->mutex);
list_del(&dev->entry);
mutex_unlock(&pool->mutex);
ib_dealloc_pd(dev->ib_pd);
kfree(dev);
}
int rtrs_ib_dev_put(struct rtrs_ib_dev *dev)
{
return kref_put(&dev->ref, dev_free);
}
EXPORT_SYMBOL(rtrs_ib_dev_put);
static int rtrs_ib_dev_get(struct rtrs_ib_dev *dev)
{
return kref_get_unless_zero(&dev->ref);
}
struct rtrs_ib_dev *
rtrs_ib_dev_find_or_add(struct ib_device *ib_dev,
struct rtrs_rdma_dev_pd *pool)
{
struct rtrs_ib_dev *dev;
mutex_lock(&pool->mutex);
list_for_each_entry(dev, &pool->list, entry) {
if (dev->ib_dev->node_guid == ib_dev->node_guid &&
rtrs_ib_dev_get(dev))
goto out_unlock;
}
mutex_unlock(&pool->mutex);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
goto out_err;
kref_init(&dev->ref);
dev->pool = pool;
dev->ib_dev = ib_dev;
dev->ib_pd = ib_alloc_pd(ib_dev, pool->pd_flags);
if (IS_ERR(dev->ib_pd))
goto out_free_dev;
if (pool->ops && pool->ops->init && pool->ops->init(dev))
goto out_free_pd;
mutex_lock(&pool->mutex);
list_add(&dev->entry, &pool->list);
out_unlock:
mutex_unlock(&pool->mutex);
return dev;
out_free_pd:
ib_dealloc_pd(dev->ib_pd);
out_free_dev:
kfree(dev);
out_err:
return NULL;
}
EXPORT_SYMBOL(rtrs_ib_dev_find_or_add);
| linux-master | drivers/infiniband/ulp/rtrs/rtrs.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDMA Transport Layer
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include "rtrs-clt.h"
void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_clt_stats *stats = clt_path->stats;
struct rtrs_clt_stats_pcpu *s;
int cpu;
cpu = raw_smp_processor_id();
s = get_cpu_ptr(stats->pcpu_stats);
if (con->cpu != cpu) {
s->cpu_migr.to++;
/* Careful here, override s pointer */
s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
atomic_inc(&s->cpu_migr.from);
}
put_cpu_ptr(stats->pcpu_stats);
}
void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
{
this_cpu_inc(stats->pcpu_stats->rdma.failover_cnt);
}
int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
{
struct rtrs_clt_stats_pcpu *s;
size_t used;
int cpu;
used = 0;
for_each_possible_cpu(cpu) {
s = per_cpu_ptr(stats->pcpu_stats, cpu);
used += sysfs_emit_at(buf, used, "%d ",
atomic_read(&s->cpu_migr.from));
}
used += sysfs_emit_at(buf, used, "\n");
return used;
}
int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
{
struct rtrs_clt_stats_pcpu *s;
size_t used;
int cpu;
used = 0;
for_each_possible_cpu(cpu) {
s = per_cpu_ptr(stats->pcpu_stats, cpu);
used += sysfs_emit_at(buf, used, "%d ", s->cpu_migr.to);
}
used += sysfs_emit_at(buf, used, "\n");
return used;
}
int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf)
{
return sysfs_emit(buf, "%d %d\n", stats->reconnects.successful_cnt,
stats->reconnects.fail_cnt);
}
ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, char *page)
{
struct rtrs_clt_stats_rdma sum;
struct rtrs_clt_stats_rdma *r;
int cpu;
memset(&sum, 0, sizeof(sum));
for_each_possible_cpu(cpu) {
r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
sum.dir[READ].cnt += r->dir[READ].cnt;
sum.dir[READ].size_total += r->dir[READ].size_total;
sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
sum.failover_cnt += r->failover_cnt;
}
return sysfs_emit(page, "%llu %llu %llu %llu %u %llu\n",
sum.dir[READ].cnt, sum.dir[READ].size_total,
sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
atomic_read(&stats->inflight), sum.failover_cnt);
}
ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s, char *page)
{
return sysfs_emit(page, "echo 1 to reset all statistics\n");
}
int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
{
struct rtrs_clt_stats_pcpu *s;
int cpu;
if (!enable)
return -EINVAL;
for_each_possible_cpu(cpu) {
s = per_cpu_ptr(stats->pcpu_stats, cpu);
memset(&s->rdma, 0, sizeof(s->rdma));
}
return 0;
}
int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
{
struct rtrs_clt_stats_pcpu *s;
int cpu;
if (!enable)
return -EINVAL;
for_each_possible_cpu(cpu) {
s = per_cpu_ptr(stats->pcpu_stats, cpu);
memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
}
return 0;
}
int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
{
if (!enable)
return -EINVAL;
memset(&stats->reconnects, 0, sizeof(stats->reconnects));
return 0;
}
int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
{
if (enable) {
rtrs_clt_reset_rdma_stats(s, enable);
rtrs_clt_reset_cpu_migr_stats(s, enable);
rtrs_clt_reset_reconnects_stat(s, enable);
atomic_set(&s->inflight, 0);
return 0;
}
return -EINVAL;
}
static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
size_t size, int d)
{
this_cpu_inc(stats->pcpu_stats->rdma.dir[d].cnt);
this_cpu_add(stats->pcpu_stats->rdma.dir[d].size_total, size);
}
void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
{
struct rtrs_clt_con *con = req->con;
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_clt_stats *stats = clt_path->stats;
unsigned int len;
len = req->usr_len + req->data_len;
rtrs_clt_update_rdma_stats(stats, len, dir);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_inc(&stats->inflight);
}
int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
{
stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
if (!stats->pcpu_stats)
return -ENOMEM;
/*
* successful_cnt will be set to 0 after session
* is established for the first time
*/
stats->reconnects.successful_cnt = -1;
return 0;
}
| linux-master | drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* RDMA Transport Layer
*
* Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
* Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
* Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
*/
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
#include <linux/module.h>
#include <linux/rculist.h>
#include <linux/random.h>
#include "rtrs-clt.h"
#include "rtrs-log.h"
#include "rtrs-clt-trace.h"
#define RTRS_CONNECT_TIMEOUT_MS 30000
/*
* Wait a bit before trying to reconnect after a failure
* in order to give server time to finish clean up which
* leads to "false positives" failed reconnect attempts
*/
#define RTRS_RECONNECT_BACKOFF 1000
/*
* Wait for additional random time between 0 and 8 seconds
* before starting to reconnect to avoid clients reconnecting
* all at once in case of a major network outage
*/
#define RTRS_RECONNECT_SEED 8
#define FIRST_CONN 0x01
/* limit to 128 * 4k = 512k max IO */
#define RTRS_MAX_SEGMENTS 128
MODULE_DESCRIPTION("RDMA Transport Client");
MODULE_LICENSE("GPL");
static const struct rtrs_rdma_dev_pd_ops dev_pd_ops;
static struct rtrs_rdma_dev_pd dev_pd = {
.ops = &dev_pd_ops
};
static struct workqueue_struct *rtrs_wq;
static const struct class rtrs_clt_dev_class = {
.name = "rtrs-client",
};
static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
{
struct rtrs_clt_path *clt_path;
bool connected = false;
rcu_read_lock();
list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED) {
connected = true;
break;
}
rcu_read_unlock();
return connected;
}
static struct rtrs_permit *
__rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type)
{
size_t max_depth = clt->queue_depth;
struct rtrs_permit *permit;
int bit;
/*
* Adapted from null_blk get_tag(). Callers from different cpus may
* grab the same bit, since find_first_zero_bit is not atomic.
* But then the test_and_set_bit_lock will fail for all the
* callers but one, so that they will loop again.
* This way an explicit spinlock is not required.
*/
do {
bit = find_first_zero_bit(clt->permits_map, max_depth);
if (bit >= max_depth)
return NULL;
} while (test_and_set_bit_lock(bit, clt->permits_map));
permit = get_permit(clt, bit);
WARN_ON(permit->mem_id != bit);
permit->cpu_id = raw_smp_processor_id();
permit->con_type = con_type;
return permit;
}
static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt,
struct rtrs_permit *permit)
{
clear_bit_unlock(permit->mem_id, clt->permits_map);
}
/**
* rtrs_clt_get_permit() - allocates permit for future RDMA operation
* @clt: Current session
* @con_type: Type of connection to use with the permit
* @can_wait: Wait type
*
* Description:
* Allocates permit for the following RDMA operation. Permit is used
* to preallocate all resources and to propagate memory pressure
* up earlier.
*
* Context:
* Can sleep if @wait == RTRS_PERMIT_WAIT
*/
struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt,
enum rtrs_clt_con_type con_type,
enum wait_type can_wait)
{
struct rtrs_permit *permit;
DEFINE_WAIT(wait);
permit = __rtrs_get_permit(clt, con_type);
if (permit || !can_wait)
return permit;
do {
prepare_to_wait(&clt->permits_wait, &wait,
TASK_UNINTERRUPTIBLE);
permit = __rtrs_get_permit(clt, con_type);
if (permit)
break;
io_schedule();
} while (1);
finish_wait(&clt->permits_wait, &wait);
return permit;
}
EXPORT_SYMBOL(rtrs_clt_get_permit);
/**
* rtrs_clt_put_permit() - puts allocated permit
* @clt: Current session
* @permit: Permit to be freed
*
* Context:
* Does not matter
*/
void rtrs_clt_put_permit(struct rtrs_clt_sess *clt,
struct rtrs_permit *permit)
{
if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
return;
__rtrs_put_permit(clt, permit);
/*
* rtrs_clt_get_permit() adds itself to the &clt->permits_wait list
* before calling schedule(). So if rtrs_clt_get_permit() is sleeping
* it must have added itself to &clt->permits_wait before
* __rtrs_put_permit() finished.
* Hence it is safe to guard wake_up() with a waitqueue_active() test.
*/
if (waitqueue_active(&clt->permits_wait))
wake_up(&clt->permits_wait);
}
EXPORT_SYMBOL(rtrs_clt_put_permit);
/**
* rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
* @clt_path: client path pointer
* @permit: permit for the allocation of the RDMA buffer
* Note:
* IO connection starts from 1.
* 0 connection is for user messages.
*/
static
struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path,
struct rtrs_permit *permit)
{
int id = 0;
if (permit->con_type == RTRS_IO_CON)
id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1;
return to_clt_con(clt_path->s.con[id]);
}
/**
* rtrs_clt_change_state() - change the session state through session state
* machine.
*
* @clt_path: client path to change the state of.
* @new_state: state to change to.
*
* returns true if sess's state is changed to new state, otherwise return false.
*
* Locks:
* state_wq lock must be hold.
*/
static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path,
enum rtrs_clt_state new_state)
{
enum rtrs_clt_state old_state;
bool changed = false;
lockdep_assert_held(&clt_path->state_wq.lock);
old_state = clt_path->state;
switch (new_state) {
case RTRS_CLT_CONNECTING:
switch (old_state) {
case RTRS_CLT_RECONNECTING:
changed = true;
fallthrough;
default:
break;
}
break;
case RTRS_CLT_RECONNECTING:
switch (old_state) {
case RTRS_CLT_CONNECTED:
case RTRS_CLT_CONNECTING_ERR:
case RTRS_CLT_CLOSED:
changed = true;
fallthrough;
default:
break;
}
break;
case RTRS_CLT_CONNECTED:
switch (old_state) {
case RTRS_CLT_CONNECTING:
changed = true;
fallthrough;
default:
break;
}
break;
case RTRS_CLT_CONNECTING_ERR:
switch (old_state) {
case RTRS_CLT_CONNECTING:
changed = true;
fallthrough;
default:
break;
}
break;
case RTRS_CLT_CLOSING:
switch (old_state) {
case RTRS_CLT_CONNECTING:
case RTRS_CLT_CONNECTING_ERR:
case RTRS_CLT_RECONNECTING:
case RTRS_CLT_CONNECTED:
changed = true;
fallthrough;
default:
break;
}
break;
case RTRS_CLT_CLOSED:
switch (old_state) {
case RTRS_CLT_CLOSING:
changed = true;
fallthrough;
default:
break;
}
break;
case RTRS_CLT_DEAD:
switch (old_state) {
case RTRS_CLT_CLOSED:
changed = true;
fallthrough;
default:
break;
}
break;
default:
break;
}
if (changed) {
clt_path->state = new_state;
wake_up_locked(&clt_path->state_wq);
}
return changed;
}
static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
enum rtrs_clt_state old_state,
enum rtrs_clt_state new_state)
{
bool changed = false;
spin_lock_irq(&clt_path->state_wq.lock);
if (clt_path->state == old_state)
changed = rtrs_clt_change_state(clt_path, new_state);
spin_unlock_irq(&clt_path->state_wq.lock);
return changed;
}
static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path);
static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
trace_rtrs_rdma_error_recovery(clt_path);
if (rtrs_clt_change_state_from_to(clt_path,
RTRS_CLT_CONNECTED,
RTRS_CLT_RECONNECTING)) {
queue_work(rtrs_wq, &clt_path->err_recovery_work);
} else {
/*
* Error can happen just on establishing new connection,
* so notify waiter with error state, waiter is responsible
* for cleaning the rest and reconnect if needed.
*/
rtrs_clt_change_state_from_to(clt_path,
RTRS_CLT_CONNECTING,
RTRS_CLT_CONNECTING_ERR);
}
}
static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
}
static struct ib_cqe fast_reg_cqe = {
.done = rtrs_clt_fast_reg_done
};
static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
bool notify, bool can_wait);
static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_io_req *req =
container_of(wc->wr_cqe, typeof(*req), inv_cqe);
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
req->need_inv = false;
if (req->need_inv_comp)
complete(&req->inv_comp);
else
/* Complete request from INV callback */
complete_rdma_req(req, req->inv_errno, true, false);
}
static int rtrs_inv_rkey(struct rtrs_clt_io_req *req)
{
struct rtrs_clt_con *con = req->con;
struct ib_send_wr wr = {
.opcode = IB_WR_LOCAL_INV,
.wr_cqe = &req->inv_cqe,
.send_flags = IB_SEND_SIGNALED,
.ex.invalidate_rkey = req->mr->rkey,
};
req->inv_cqe.done = rtrs_clt_inv_rkey_done;
return ib_post_send(con->c.qp, &wr, NULL);
}
static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
bool notify, bool can_wait)
{
struct rtrs_clt_con *con = req->con;
struct rtrs_clt_path *clt_path;
int err;
if (WARN_ON(!req->in_use))
return;
if (WARN_ON(!req->con))
return;
clt_path = to_clt_path(con->c.path);
if (req->sg_cnt) {
if (req->dir == DMA_FROM_DEVICE && req->need_inv) {
/*
* We are here to invalidate read requests
* ourselves. In normal scenario server should
* send INV for all read requests, but
* we are here, thus two things could happen:
*
* 1. this is failover, when errno != 0
* and can_wait == 1,
*
* 2. something totally bad happened and
* server forgot to send INV, so we
* should do that ourselves.
*/
if (can_wait) {
req->need_inv_comp = true;
} else {
/* This should be IO path, so always notify */
WARN_ON(!notify);
/* Save errno for INV callback */
req->inv_errno = errno;
}
refcount_inc(&req->ref);
err = rtrs_inv_rkey(req);
if (err) {
rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n",
req->mr->rkey, err);
} else if (can_wait) {
wait_for_completion(&req->inv_comp);
} else {
/*
* Something went wrong, so request will be
* completed from INV callback.
*/
WARN_ON_ONCE(1);
return;
}
if (!refcount_dec_and_test(&req->ref))
return;
}
ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
}
if (!refcount_dec_and_test(&req->ref))
return;
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&clt_path->stats->inflight);
req->in_use = false;
req->con = NULL;
if (errno) {
rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
errno, kobject_name(&clt_path->kobj), clt_path->hca_name,
clt_path->hca_port, notify);
}
if (notify)
req->conf(req->priv, errno);
}
static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
struct rtrs_clt_io_req *req,
struct rtrs_rbuf *rbuf, u32 off,
u32 imm, struct ib_send_wr *wr)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
enum ib_send_flags flags;
struct ib_sge sge;
if (!req->sg_size) {
rtrs_wrn(con->c.path,
"Doing RDMA Write failed, no data supplied\n");
return -EINVAL;
}
/* user data and user message in the first list element */
sge.addr = req->iu->dma_addr;
sge.length = req->sg_size;
sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
/*
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
0 : IB_SEND_SIGNALED;
ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
req->iu->dma_addr,
req->sg_size, DMA_TO_DEVICE);
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
rbuf->rkey, rbuf->addr + off,
imm, flags, wr, NULL);
}
static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
s16 errno, bool w_inval)
{
struct rtrs_clt_io_req *req;
if (WARN_ON(msg_id >= clt_path->queue_depth))
return;
req = &clt_path->reqs[msg_id];
/* Drop need_inv if server responded with send with invalidation */
req->need_inv &= !w_inval;
complete_rdma_req(req, errno, true, false);
}
static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
{
struct rtrs_iu *iu;
int err;
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
iu = container_of(wc->wr_cqe, struct rtrs_iu,
cqe);
err = rtrs_iu_post_recv(&con->c, iu);
if (err) {
rtrs_err(con->c.path, "post iu failed %d\n", err);
rtrs_rdma_error_recovery(con);
}
}
static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_msg_rkey_rsp *msg;
u32 imm_type, imm_payload;
bool w_inval = false;
struct rtrs_iu *iu;
u32 buf_id;
int err;
WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
if (wc->byte_len < sizeof(*msg)) {
rtrs_err(con->c.path, "rkey response is malformed: size %d\n",
wc->byte_len);
goto out;
}
ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) {
rtrs_err(clt_path->clt,
"rkey response is malformed: type %d\n",
le16_to_cpu(msg->type));
goto out;
}
buf_id = le16_to_cpu(msg->buf_id);
if (WARN_ON(buf_id >= clt_path->queue_depth))
goto out;
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
if (imm_type == RTRS_IO_RSP_IMM ||
imm_type == RTRS_IO_RSP_W_INV_IMM) {
u32 msg_id;
w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
if (WARN_ON(buf_id != msg_id))
goto out;
clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
process_io_rsp(clt_path, msg_id, err, w_inval);
}
ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
return rtrs_clt_recv_done(con, wc);
out:
rtrs_rdma_error_recovery(con);
}
static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc);
static struct ib_cqe io_comp_cqe = {
.done = rtrs_clt_rdma_done
};
/*
* Post x2 empty WRs: first is for this RDMA with IMM,
* second is for RECV with INV, which happened earlier.
*/
static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
{
struct ib_recv_wr wr_arr[2], *wr;
int i;
memset(wr_arr, 0, sizeof(wr_arr));
for (i = 0; i < ARRAY_SIZE(wr_arr); i++) {
wr = &wr_arr[i];
wr->wr_cqe = cqe;
if (i)
/* Chain backwards */
wr->next = &wr_arr[i - 1];
}
return ib_post_recv(con->qp, wr, NULL);
}
static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
u32 imm_type, imm_payload;
bool w_inval = false;
int err;
if (wc->status != IB_WC_SUCCESS) {
if (wc->status != IB_WC_WR_FLUSH_ERR) {
rtrs_err(clt_path->clt, "RDMA failed: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
return;
}
rtrs_clt_update_wc_stats(con);
switch (wc->opcode) {
case IB_WC_RECV_RDMA_WITH_IMM:
/*
* post_recv() RDMA write completions of IO reqs (read/write)
* and hb
*/
if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done))
return;
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
&imm_type, &imm_payload);
if (imm_type == RTRS_IO_RSP_IMM ||
imm_type == RTRS_IO_RSP_W_INV_IMM) {
u32 msg_id;
w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
process_io_rsp(clt_path, msg_id, err, w_inval);
} else if (imm_type == RTRS_HB_MSG_IMM) {
WARN_ON(con->c.cid);
rtrs_send_hb_ack(&clt_path->s);
if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
clt_path->s.hb_missed_cnt = 0;
clt_path->s.hb_cur_latency =
ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else {
rtrs_wrn(con->c.path, "Unknown IMM type %u\n",
imm_type);
}
if (w_inval)
/*
* Post x2 empty WRs: first is for this RDMA with IMM,
* second is for RECV with INV, which happened earlier.
*/
err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe);
else
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err) {
rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n",
err);
rtrs_rdma_error_recovery(con);
}
break;
case IB_WC_RECV:
/*
* Key invalidations from server side
*/
WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
wc->wc_flags & IB_WC_WITH_IMM));
WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
return rtrs_clt_recv_done(con, wc);
return rtrs_clt_rkey_rsp_done(con, wc);
}
break;
case IB_WC_RDMA_WRITE:
/*
* post_send() RDMA write completions of IO reqs (read/write)
* and hb.
*/
break;
default:
rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode);
return;
}
}
static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
{
int err, i;
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
for (i = 0; i < q_size; i++) {
if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
struct rtrs_iu *iu = &con->rsp_ius[i];
err = rtrs_iu_post_recv(&con->c, iu);
} else {
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
}
if (err)
return err;
}
return 0;
}
static int post_recv_path(struct rtrs_clt_path *clt_path)
{
size_t q_size = 0;
int err, cid;
for (cid = 0; cid < clt_path->s.con_num; cid++) {
if (cid == 0)
q_size = SERVICE_CON_QUEUE_DEPTH;
else
q_size = clt_path->queue_depth;
/*
* x2 for RDMA read responses + FR key invalidations,
* RDMA writes do not require any FR registrations.
*/
q_size *= 2;
err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size);
if (err) {
rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n",
err);
return err;
}
}
return 0;
}
struct path_it {
int i;
struct list_head skip_list;
struct rtrs_clt_sess *clt;
struct rtrs_clt_path *(*next_path)(struct path_it *it);
};
/*
* rtrs_clt_get_next_path_or_null - get clt path from the list or return NULL
* @head: the head for the list.
* @clt_path: The element to take the next clt_path from.
*
* Next clt path returned in round-robin fashion, i.e. head will be skipped,
* but if list is observed as empty, NULL will be returned.
*
* This function may safely run concurrently with the _rcu list-mutation
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
*/
static inline struct rtrs_clt_path *
rtrs_clt_get_next_path_or_null(struct list_head *head, struct rtrs_clt_path *clt_path)
{
return list_next_or_null_rcu(head, &clt_path->s.entry, typeof(*clt_path), s.entry) ?:
list_next_or_null_rcu(head,
READ_ONCE((&clt_path->s.entry)->next),
typeof(*clt_path), s.entry);
}
/**
* get_next_path_rr() - Returns path in round-robin fashion.
* @it: the path pointer
*
* Related to @MP_POLICY_RR
*
* Locks:
* rcu_read_lock() must be hold.
*/
static struct rtrs_clt_path *get_next_path_rr(struct path_it *it)
{
struct rtrs_clt_path __rcu **ppcpu_path;
struct rtrs_clt_path *path;
struct rtrs_clt_sess *clt;
clt = it->clt;
/*
* Here we use two RCU objects: @paths_list and @pcpu_path
* pointer. See rtrs_clt_remove_path_from_arr() for details
* how that is handled.
*/
ppcpu_path = this_cpu_ptr(clt->pcpu_path);
path = rcu_dereference(*ppcpu_path);
if (!path)
path = list_first_or_null_rcu(&clt->paths_list,
typeof(*path), s.entry);
else
path = rtrs_clt_get_next_path_or_null(&clt->paths_list, path);
rcu_assign_pointer(*ppcpu_path, path);
return path;
}
/**
* get_next_path_min_inflight() - Returns path with minimal inflight count.
* @it: the path pointer
*
* Related to @MP_POLICY_MIN_INFLIGHT
*
* Locks:
* rcu_read_lock() must be hold.
*/
static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it)
{
struct rtrs_clt_path *min_path = NULL;
struct rtrs_clt_sess *clt = it->clt;
struct rtrs_clt_path *clt_path;
int min_inflight = INT_MAX;
int inflight;
list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
continue;
inflight = atomic_read(&clt_path->stats->inflight);
if (inflight < min_inflight) {
min_inflight = inflight;
min_path = clt_path;
}
}
/*
* add the path to the skip list, so that next time we can get
* a different one
*/
if (min_path)
list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
return min_path;
}
/**
* get_next_path_min_latency() - Returns path with minimal latency.
* @it: the path pointer
*
* Return: a path with the lowest latency or NULL if all paths are tried
*
* Locks:
* rcu_read_lock() must be hold.
*
* Related to @MP_POLICY_MIN_LATENCY
*
* This DOES skip an already-tried path.
* There is a skip-list to skip a path if the path has tried but failed.
* It will try the minimum latency path and then the second minimum latency
* path and so on. Finally it will return NULL if all paths are tried.
* Therefore the caller MUST check the returned
* path is NULL and trigger the IO error.
*/
static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it)
{
struct rtrs_clt_path *min_path = NULL;
struct rtrs_clt_sess *clt = it->clt;
struct rtrs_clt_path *clt_path;
ktime_t min_latency = KTIME_MAX;
ktime_t latency;
list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
continue;
latency = clt_path->s.hb_cur_latency;
if (latency < min_latency) {
min_latency = latency;
min_path = clt_path;
}
}
/*
* add the path to the skip list, so that next time we can get
* a different one
*/
if (min_path)
list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list);
return min_path;
}
static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt)
{
INIT_LIST_HEAD(&it->skip_list);
it->clt = clt;
it->i = 0;
if (clt->mp_policy == MP_POLICY_RR)
it->next_path = get_next_path_rr;
else if (clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
it->next_path = get_next_path_min_inflight;
else
it->next_path = get_next_path_min_latency;
}
static inline void path_it_deinit(struct path_it *it)
{
struct list_head *skip, *tmp;
/*
* The skip_list is used only for the MIN_INFLIGHT and MIN_LATENCY policies.
* We need to remove paths from it, so that next IO can insert
* paths (->mp_skip_entry) into a skip_list again.
*/
list_for_each_safe(skip, tmp, &it->skip_list)
list_del_init(skip);
}
/**
* rtrs_clt_init_req() - Initialize an rtrs_clt_io_req holding information
* about an inflight IO.
* The user buffer holding user control message (not data) is copied into
* the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
* also hold the control message of rtrs.
* @req: an io request holding information about IO.
* @clt_path: client path
* @conf: conformation callback function to notify upper layer.
* @permit: permit for allocation of RDMA remote buffer
* @priv: private pointer
* @vec: kernel vector containing control message
* @usr_len: length of the user message
* @sg: scater list for IO data
* @sg_cnt: number of scater list entries
* @data_len: length of the IO data
* @dir: direction of the IO.
*/
static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
struct rtrs_clt_path *clt_path,
void (*conf)(void *priv, int errno),
struct rtrs_permit *permit, void *priv,
const struct kvec *vec, size_t usr_len,
struct scatterlist *sg, size_t sg_cnt,
size_t data_len, int dir)
{
struct iov_iter iter;
size_t len;
req->permit = permit;
req->in_use = true;
req->usr_len = usr_len;
req->data_len = data_len;
req->sglist = sg;
req->sg_cnt = sg_cnt;
req->priv = priv;
req->dir = dir;
req->con = rtrs_permit_to_clt_con(clt_path, permit);
req->conf = conf;
req->need_inv = false;
req->need_inv_comp = false;
req->inv_errno = 0;
refcount_set(&req->ref, 1);
req->mp_policy = clt_path->clt->mp_policy;
iov_iter_kvec(&iter, ITER_SOURCE, vec, 1, usr_len);
len = _copy_from_iter(req->iu->buf, usr_len, &iter);
WARN_ON(len != usr_len);
reinit_completion(&req->inv_comp);
}
static struct rtrs_clt_io_req *
rtrs_clt_get_req(struct rtrs_clt_path *clt_path,
void (*conf)(void *priv, int errno),
struct rtrs_permit *permit, void *priv,
const struct kvec *vec, size_t usr_len,
struct scatterlist *sg, size_t sg_cnt,
size_t data_len, int dir)
{
struct rtrs_clt_io_req *req;
req = &clt_path->reqs[permit->mem_id];
rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len,
sg, sg_cnt, data_len, dir);
return req;
}
static struct rtrs_clt_io_req *
rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
struct rtrs_clt_io_req *fail_req)
{
struct rtrs_clt_io_req *req;
struct kvec vec = {
.iov_base = fail_req->iu->buf,
.iov_len = fail_req->usr_len
};
req = &alive_path->reqs[fail_req->permit->mem_id];
rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit,
fail_req->priv, &vec, fail_req->usr_len,
fail_req->sglist, fail_req->sg_cnt,
fail_req->data_len, fail_req->dir);
return req;
}
static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
struct rtrs_clt_io_req *req,
struct rtrs_rbuf *rbuf, bool fr_en,
u32 count, u32 size, u32 imm,
struct ib_send_wr *wr,
struct ib_send_wr *tail)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct ib_sge *sge = req->sge;
enum ib_send_flags flags;
struct scatterlist *sg;
size_t num_sge;
int i;
struct ib_send_wr *ptail = NULL;
if (fr_en) {
i = 0;
sge[i].addr = req->mr->iova;
sge[i].length = req->mr->length;
sge[i].lkey = req->mr->lkey;
i++;
num_sge = 2;
ptail = tail;
} else {
for_each_sg(req->sglist, sg, count, i) {
sge[i].addr = sg_dma_address(sg);
sge[i].length = sg_dma_len(sg);
sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
}
num_sge = 1 + count;
}
sge[i].addr = req->iu->dma_addr;
sge[i].length = size;
sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
/*
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
0 : IB_SEND_SIGNALED;
ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
req->iu->dma_addr,
size, DMA_TO_DEVICE);
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
rbuf->rkey, rbuf->addr, imm,
flags, wr, ptail);
}
static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
{
int nr;
/* Align the MR to a 4K page size to match the block virt boundary */
nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K);
if (nr != count)
return nr < 0 ? nr : -EINVAL;
ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey));
return nr;
}
static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
{
struct rtrs_clt_con *con = req->con;
struct rtrs_path *s = con->c.path;
struct rtrs_clt_path *clt_path = to_clt_path(s);
struct rtrs_msg_rdma_write *msg;
struct rtrs_rbuf *rbuf;
int ret, count = 0;
u32 imm, buf_id;
struct ib_reg_wr rwr;
struct ib_send_wr inv_wr;
struct ib_send_wr *wr = NULL;
bool fr_en = false;
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
if (tsize > clt_path->chunk_size) {
rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
tsize, clt_path->chunk_size);
return -EMSGSIZE;
}
if (req->sg_cnt) {
count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
if (!count) {
rtrs_wrn(s, "Write request failed, map failed\n");
return -EINVAL;
}
}
/* put rtrs msg after sg and user message */
msg = req->iu->buf + req->usr_len;
msg->type = cpu_to_le16(RTRS_MSG_WRITE);
msg->usr_len = cpu_to_le16(req->usr_len);
/* rtrs message on server side will be after user data and message */
imm = req->permit->mem_off + req->data_len + req->usr_len;
imm = rtrs_to_io_req_imm(imm);
buf_id = req->permit->mem_id;
req->sg_size = tsize;
rbuf = &clt_path->rbufs[buf_id];
if (count) {
ret = rtrs_map_sg_fr(req, count);
if (ret < 0) {
rtrs_err_rl(s,
"Write request failed, failed to map fast reg. data, err: %d\n",
ret);
ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
return ret;
}
inv_wr = (struct ib_send_wr) {
.opcode = IB_WR_LOCAL_INV,
.wr_cqe = &req->inv_cqe,
.send_flags = IB_SEND_SIGNALED,
.ex.invalidate_rkey = req->mr->rkey,
};
req->inv_cqe.done = rtrs_clt_inv_rkey_done;
rwr = (struct ib_reg_wr) {
.wr.opcode = IB_WR_REG_MR,
.wr.wr_cqe = &fast_reg_cqe,
.mr = req->mr,
.key = req->mr->rkey,
.access = (IB_ACCESS_LOCAL_WRITE),
};
wr = &rwr.wr;
fr_en = true;
refcount_inc(&req->ref);
}
/*
* Update stats now, after request is successfully sent it is not
* safe anymore to touch it.
*/
rtrs_clt_update_all_stats(req, WRITE);
ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count,
req->usr_len + sizeof(*msg),
imm, wr, &inv_wr);
if (ret) {
rtrs_err_rl(s,
"Write request failed: error=%d path=%s [%s:%u]\n",
ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&clt_path->stats->inflight);
if (req->sg_cnt)
ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
}
return ret;
}
static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
{
struct rtrs_clt_con *con = req->con;
struct rtrs_path *s = con->c.path;
struct rtrs_clt_path *clt_path = to_clt_path(s);
struct rtrs_msg_rdma_read *msg;
struct rtrs_ib_dev *dev = clt_path->s.dev;
struct ib_reg_wr rwr;
struct ib_send_wr *wr = NULL;
int ret, count = 0;
u32 imm, buf_id;
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
if (tsize > clt_path->chunk_size) {
rtrs_wrn(s,
"Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
tsize, clt_path->chunk_size);
return -EMSGSIZE;
}
if (req->sg_cnt) {
count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt,
req->dir);
if (!count) {
rtrs_wrn(s,
"Read request failed, dma map failed\n");
return -EINVAL;
}
}
/* put our message into req->buf after user message*/
msg = req->iu->buf + req->usr_len;
msg->type = cpu_to_le16(RTRS_MSG_READ);
msg->usr_len = cpu_to_le16(req->usr_len);
if (count) {
ret = rtrs_map_sg_fr(req, count);
if (ret < 0) {
rtrs_err_rl(s,
"Read request failed, failed to map fast reg. data, err: %d\n",
ret);
ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt,
req->dir);
return ret;
}
rwr = (struct ib_reg_wr) {
.wr.opcode = IB_WR_REG_MR,
.wr.wr_cqe = &fast_reg_cqe,
.mr = req->mr,
.key = req->mr->rkey,
.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE),
};
wr = &rwr.wr;
msg->sg_cnt = cpu_to_le16(1);
msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F);
msg->desc[0].addr = cpu_to_le64(req->mr->iova);
msg->desc[0].key = cpu_to_le32(req->mr->rkey);
msg->desc[0].len = cpu_to_le32(req->mr->length);
/* Further invalidation is required */
req->need_inv = !!RTRS_MSG_NEED_INVAL_F;
} else {
msg->sg_cnt = 0;
msg->flags = 0;
}
/*
* rtrs message will be after the space reserved for disk data and
* user message
*/
imm = req->permit->mem_off + req->data_len + req->usr_len;
imm = rtrs_to_io_req_imm(imm);
buf_id = req->permit->mem_id;
req->sg_size = sizeof(*msg);
req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc);
req->sg_size += req->usr_len;
/*
* Update stats now, after request is successfully sent it is not
* safe anymore to touch it.
*/
rtrs_clt_update_all_stats(req, READ);
ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id],
req->data_len, imm, wr);
if (ret) {
rtrs_err_rl(s,
"Read request failed: error=%d path=%s [%s:%u]\n",
ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
atomic_dec(&clt_path->stats->inflight);
req->need_inv = false;
if (req->sg_cnt)
ib_dma_unmap_sg(dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
}
return ret;
}
/**
* rtrs_clt_failover_req() - Try to find an active path for a failed request
* @clt: clt context
* @fail_req: a failed io request.
*/
static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt,
struct rtrs_clt_io_req *fail_req)
{
struct rtrs_clt_path *alive_path;
struct rtrs_clt_io_req *req;
int err = -ECONNABORTED;
struct path_it it;
rcu_read_lock();
for (path_it_init(&it, clt);
(alive_path = it.next_path(&it)) && it.i < it.clt->paths_num;
it.i++) {
if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED)
continue;
req = rtrs_clt_get_copy_req(alive_path, fail_req);
if (req->dir == DMA_TO_DEVICE)
err = rtrs_clt_write_req(req);
else
err = rtrs_clt_read_req(req);
if (err) {
req->in_use = false;
continue;
}
/* Success path */
rtrs_clt_inc_failover_cnt(alive_path->stats);
break;
}
path_it_deinit(&it);
rcu_read_unlock();
return err;
}
static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_sess *clt = clt_path->clt;
struct rtrs_clt_io_req *req;
int i, err;
if (!clt_path->reqs)
return;
for (i = 0; i < clt_path->queue_depth; ++i) {
req = &clt_path->reqs[i];
if (!req->in_use)
continue;
/*
* Safely (without notification) complete failed request.
* After completion this request is still useble and can
* be failovered to another path.
*/
complete_rdma_req(req, -ECONNABORTED, false, true);
err = rtrs_clt_failover_req(clt, req);
if (err)
/* Failover failed, notify anyway */
req->conf(req->priv, err);
}
}
static void free_path_reqs(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_io_req *req;
int i;
if (!clt_path->reqs)
return;
for (i = 0; i < clt_path->queue_depth; ++i) {
req = &clt_path->reqs[i];
if (req->mr)
ib_dereg_mr(req->mr);
kfree(req->sge);
rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1);
}
kfree(clt_path->reqs);
clt_path->reqs = NULL;
}
static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_io_req *req;
int i, err = -ENOMEM;
clt_path->reqs = kcalloc(clt_path->queue_depth,
sizeof(*clt_path->reqs),
GFP_KERNEL);
if (!clt_path->reqs)
return -ENOMEM;
for (i = 0; i < clt_path->queue_depth; ++i) {
req = &clt_path->reqs[i];
req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL,
clt_path->s.dev->ib_dev,
DMA_TO_DEVICE,
rtrs_clt_rdma_done);
if (!req->iu)
goto out;
req->sge = kcalloc(2, sizeof(*req->sge), GFP_KERNEL);
if (!req->sge)
goto out;
req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd,
IB_MR_TYPE_MEM_REG,
clt_path->max_pages_per_mr);
if (IS_ERR(req->mr)) {
err = PTR_ERR(req->mr);
req->mr = NULL;
pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n",
clt_path->max_pages_per_mr);
goto out;
}
init_completion(&req->inv_comp);
}
return 0;
out:
free_path_reqs(clt_path);
return err;
}
static int alloc_permits(struct rtrs_clt_sess *clt)
{
unsigned int chunk_bits;
int err, i;
clt->permits_map = bitmap_zalloc(clt->queue_depth, GFP_KERNEL);
if (!clt->permits_map) {
err = -ENOMEM;
goto out_err;
}
clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL);
if (!clt->permits) {
err = -ENOMEM;
goto err_map;
}
chunk_bits = ilog2(clt->queue_depth - 1) + 1;
for (i = 0; i < clt->queue_depth; i++) {
struct rtrs_permit *permit;
permit = get_permit(clt, i);
permit->mem_id = i;
permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits);
}
return 0;
err_map:
bitmap_free(clt->permits_map);
clt->permits_map = NULL;
out_err:
return err;
}
static void free_permits(struct rtrs_clt_sess *clt)
{
if (clt->permits_map)
wait_event(clt->permits_wait,
bitmap_empty(clt->permits_map, clt->queue_depth));
bitmap_free(clt->permits_map);
clt->permits_map = NULL;
kfree(clt->permits);
clt->permits = NULL;
}
static void query_fast_reg_mode(struct rtrs_clt_path *clt_path)
{
struct ib_device *ib_dev;
u64 max_pages_per_mr;
int mr_page_shift;
ib_dev = clt_path->s.dev->ib_dev;
/*
* Use the smallest page size supported by the HCA, down to a
* minimum of 4096 bytes. We're unlikely to build large sglists
* out of smaller entries.
*/
mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
max_pages_per_mr = ib_dev->attrs.max_mr_size;
do_div(max_pages_per_mr, (1ull << mr_page_shift));
clt_path->max_pages_per_mr =
min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr,
ib_dev->attrs.max_fast_reg_page_list_len);
clt_path->clt->max_segments =
min(clt_path->max_pages_per_mr, clt_path->clt->max_segments);
}
static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
enum rtrs_clt_state new_state,
enum rtrs_clt_state *old_state)
{
bool changed;
spin_lock_irq(&clt_path->state_wq.lock);
if (old_state)
*old_state = clt_path->state;
changed = rtrs_clt_change_state(clt_path, new_state);
spin_unlock_irq(&clt_path->state_wq.lock);
return changed;
}
static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
{
struct rtrs_clt_con *con = container_of(c, typeof(*con), c);
rtrs_rdma_error_recovery(con);
}
static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
{
rtrs_init_hb(&clt_path->s, &io_comp_cqe,
RTRS_HB_INTERVAL_MS,
RTRS_HB_MISSED_MAX,
rtrs_clt_hb_err_handler,
rtrs_wq);
}
static void rtrs_clt_reconnect_work(struct work_struct *work);
static void rtrs_clt_close_work(struct work_struct *work);
static void rtrs_clt_err_recovery_work(struct work_struct *work)
{
struct rtrs_clt_path *clt_path;
struct rtrs_clt_sess *clt;
int delay_ms;
clt_path = container_of(work, struct rtrs_clt_path, err_recovery_work);
clt = clt_path->clt;
delay_ms = clt->reconnect_delay_sec * 1000;
rtrs_clt_stop_and_destroy_conns(clt_path);
queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
msecs_to_jiffies(delay_ms +
get_random_u32_below(RTRS_RECONNECT_SEED)));
}
static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
const struct rtrs_addr *path,
size_t con_num, u32 nr_poll_queues)
{
struct rtrs_clt_path *clt_path;
int err = -ENOMEM;
int cpu;
size_t total_con;
clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL);
if (!clt_path)
goto err;
/*
* irqmode and poll
* +1: Extra connection for user messages
*/
total_con = con_num + nr_poll_queues + 1;
clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con),
GFP_KERNEL);
if (!clt_path->s.con)
goto err_free_path;
clt_path->s.con_num = total_con;
clt_path->s.irq_con_num = con_num + 1;
clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL);
if (!clt_path->stats)
goto err_free_con;
mutex_init(&clt_path->init_mutex);
uuid_gen(&clt_path->s.uuid);
memcpy(&clt_path->s.dst_addr, path->dst,
rdma_addr_size((struct sockaddr *)path->dst));
/*
* rdma_resolve_addr() passes src_addr to cma_bind_addr, which
* checks the sa_family to be non-zero. If user passed src_addr=NULL
* the sess->src_addr will contain only zeros, which is then fine.
*/
if (path->src)
memcpy(&clt_path->s.src_addr, path->src,
rdma_addr_size((struct sockaddr *)path->src));
strscpy(clt_path->s.sessname, clt->sessname,
sizeof(clt_path->s.sessname));
clt_path->clt = clt;
clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS;
init_waitqueue_head(&clt_path->state_wq);
clt_path->state = RTRS_CLT_CONNECTING;
atomic_set(&clt_path->connected_cnt, 0);
INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
INIT_WORK(&clt_path->err_recovery_work, rtrs_clt_err_recovery_work);
INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
rtrs_clt_init_hb(clt_path);
clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry));
if (!clt_path->mp_skip_entry)
goto err_free_stats;
for_each_possible_cpu(cpu)
INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu));
err = rtrs_clt_init_stats(clt_path->stats);
if (err)
goto err_free_percpu;
return clt_path;
err_free_percpu:
free_percpu(clt_path->mp_skip_entry);
err_free_stats:
kfree(clt_path->stats);
err_free_con:
kfree(clt_path->s.con);
err_free_path:
kfree(clt_path);
err:
return ERR_PTR(err);
}
void free_path(struct rtrs_clt_path *clt_path)
{
free_percpu(clt_path->mp_skip_entry);
mutex_destroy(&clt_path->init_mutex);
kfree(clt_path->s.con);
kfree(clt_path->rbufs);
kfree(clt_path);
}
static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid)
{
struct rtrs_clt_con *con;
con = kzalloc(sizeof(*con), GFP_KERNEL);
if (!con)
return -ENOMEM;
/* Map first two connections to the first CPU */
con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
con->c.cid = cid;
con->c.path = &clt_path->s;
/* Align with srv, init as 1 */
atomic_set(&con->c.wr_cnt, 1);
mutex_init(&con->con_mutex);
clt_path->s.con[cid] = &con->c;
return 0;
}
static void destroy_con(struct rtrs_clt_con *con)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
clt_path->s.con[con->c.cid] = NULL;
mutex_destroy(&con->con_mutex);
kfree(con);
}
static int create_con_cq_qp(struct rtrs_clt_con *con)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
int err, cq_vector;
struct rtrs_msg_rkey_rsp *rsp;
lockdep_assert_held(&con->con_mutex);
if (con->c.cid == 0) {
max_send_sge = 1;
/* We must be the first here */
if (WARN_ON(clt_path->s.dev))
return -EINVAL;
/*
* The whole session uses device from user connection.
* Be careful not to close user connection before ib dev
* is gracefully put.
*/
clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
&dev_pd);
if (!clt_path->s.dev) {
rtrs_wrn(clt_path->clt,
"rtrs_ib_dev_find_get_or_add(): no memory\n");
return -ENOMEM;
}
clt_path->s.dev_ref = 1;
query_fast_reg_mode(clt_path);
wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
/*
* Two (request + registration) completion for send
* Two for recv if always_invalidate is set on server
* or one for recv.
* + 2 for drain and heartbeat
* in case qp gets into error state.
*/
max_send_wr =
min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2);
max_recv_wr = max_send_wr;
} else {
/*
* Here we assume that session members are correctly set.
* This is always true if user connection (cid == 0) is
* established first.
*/
if (WARN_ON(!clt_path->s.dev))
return -EINVAL;
if (WARN_ON(!clt_path->queue_depth))
return -EINVAL;
wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
/* Shared between connections */
clt_path->s.dev_ref++;
max_send_wr = min_t(int, wr_limit,
/* QD * (REQ + RSP + FR REGS or INVS) + drain */
clt_path->queue_depth * 3 + 1);
max_recv_wr = min_t(int, wr_limit,
clt_path->queue_depth * 3 + 1);
max_send_sge = 2;
}
atomic_set(&con->c.sq_wr_avail, max_send_wr);
cq_num = max_send_wr + max_recv_wr;
/* alloc iu to recv new rkey reply when server reports flags set */
if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
GFP_KERNEL,
clt_path->s.dev->ib_dev,
DMA_FROM_DEVICE,
rtrs_clt_rdma_done);
if (!con->rsp_ius)
return -ENOMEM;
con->queue_num = cq_num;
}
cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
if (con->c.cid >= clt_path->s.irq_con_num)
err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_DIRECT);
else
err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_SOFTIRQ);
/*
* In case of error we do not bother to clean previous allocations,
* since destroy_con_cq_qp() must be called.
*/
return err;
}
static void destroy_con_cq_qp(struct rtrs_clt_con *con)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
/*
* Be careful here: destroy_con_cq_qp() can be called even
* create_con_cq_qp() failed, see comments there.
*/
lockdep_assert_held(&con->con_mutex);
rtrs_cq_qp_destroy(&con->c);
if (con->rsp_ius) {
rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev,
con->queue_num);
con->rsp_ius = NULL;
con->queue_num = 0;
}
if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) {
rtrs_ib_dev_put(clt_path->s.dev);
clt_path->s.dev = NULL;
}
}
static void stop_cm(struct rtrs_clt_con *con)
{
rdma_disconnect(con->c.cm_id);
if (con->c.qp)
ib_drain_qp(con->c.qp);
}
static void destroy_cm(struct rtrs_clt_con *con)
{
rdma_destroy_id(con->c.cm_id);
con->c.cm_id = NULL;
}
static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
{
struct rtrs_path *s = con->c.path;
int err;
mutex_lock(&con->con_mutex);
err = create_con_cq_qp(con);
mutex_unlock(&con->con_mutex);
if (err) {
rtrs_err(s, "create_con_cq_qp(), err: %d\n", err);
return err;
}
err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS);
if (err)
rtrs_err(s, "Resolving route failed, err: %d\n", err);
return err;
}
static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_clt_sess *clt = clt_path->clt;
struct rtrs_msg_conn_req msg;
struct rdma_conn_param param;
int err;
param = (struct rdma_conn_param) {
.retry_count = 7,
.rnr_retry_count = 7,
.private_data = &msg,
.private_data_len = sizeof(msg),
};
msg = (struct rtrs_msg_conn_req) {
.magic = cpu_to_le16(RTRS_MAGIC),
.version = cpu_to_le16(RTRS_PROTO_VER),
.cid = cpu_to_le16(con->c.cid),
.cid_num = cpu_to_le16(clt_path->s.con_num),
.recon_cnt = cpu_to_le16(clt_path->s.recon_cnt),
};
msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0;
uuid_copy(&msg.sess_uuid, &clt_path->s.uuid);
uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
err = rdma_connect_locked(con->c.cm_id, ¶m);
if (err)
rtrs_err(clt, "rdma_connect_locked(): %d\n", err);
return err;
}
static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
struct rdma_cm_event *ev)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_clt_sess *clt = clt_path->clt;
const struct rtrs_msg_conn_rsp *msg;
u16 version, queue_depth;
int errno;
u8 len;
msg = ev->param.conn.private_data;
len = ev->param.conn.private_data_len;
if (len < sizeof(*msg)) {
rtrs_err(clt, "Invalid RTRS connection response\n");
return -ECONNRESET;
}
if (le16_to_cpu(msg->magic) != RTRS_MAGIC) {
rtrs_err(clt, "Invalid RTRS magic\n");
return -ECONNRESET;
}
version = le16_to_cpu(msg->version);
if (version >> 8 != RTRS_PROTO_VER_MAJOR) {
rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n",
version >> 8, RTRS_PROTO_VER_MAJOR);
return -ECONNRESET;
}
errno = le16_to_cpu(msg->errno);
if (errno) {
rtrs_err(clt, "Invalid RTRS message: errno %d\n",
errno);
return -ECONNRESET;
}
if (con->c.cid == 0) {
queue_depth = le16_to_cpu(msg->queue_depth);
if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) {
rtrs_err(clt, "Error: queue depth changed\n");
/*
* Stop any more reconnection attempts
*/
clt_path->reconnect_attempts = -1;
rtrs_err(clt,
"Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
return -ECONNRESET;
}
if (!clt_path->rbufs) {
clt_path->rbufs = kcalloc(queue_depth,
sizeof(*clt_path->rbufs),
GFP_KERNEL);
if (!clt_path->rbufs)
return -ENOMEM;
}
clt_path->queue_depth = queue_depth;
clt_path->s.signal_interval = min_not_zero(queue_depth,
(unsigned short) SERVICE_CON_QUEUE_DEPTH);
clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
clt_path->max_io_size = le32_to_cpu(msg->max_io_size);
clt_path->flags = le32_to_cpu(msg->flags);
clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size;
/*
* Global IO size is always a minimum.
* If while a reconnection server sends us a value a bit
* higher - client does not care and uses cached minimum.
*
* Since we can have several sessions (paths) restablishing
* connections in parallel, use lock.
*/
mutex_lock(&clt->paths_mutex);
clt->queue_depth = clt_path->queue_depth;
clt->max_io_size = min_not_zero(clt_path->max_io_size,
clt->max_io_size);
mutex_unlock(&clt->paths_mutex);
/*
* Cache the hca_port and hca_name for sysfs
*/
clt_path->hca_port = con->c.cm_id->port_num;
scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name),
clt_path->s.dev->ib_dev->name);
clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr;
/* set for_new_clt, to allow future reconnect on any path */
clt_path->for_new_clt = 1;
}
return 0;
}
static inline void flag_success_on_conn(struct rtrs_clt_con *con)
{
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
atomic_inc(&clt_path->connected_cnt);
con->cm_err = 1;
}
static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
struct rdma_cm_event *ev)
{
struct rtrs_path *s = con->c.path;
const struct rtrs_msg_conn_rsp *msg;
const char *rej_msg;
int status, errno;
u8 data_len;
status = ev->status;
rej_msg = rdma_reject_msg(con->c.cm_id, status);
msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len);
if (msg && data_len >= sizeof(*msg)) {
errno = (int16_t)le16_to_cpu(msg->errno);
if (errno == -EBUSY)
rtrs_err(s,
"Previous session is still exists on the server, please reconnect later\n");
else
rtrs_err(s,
"Connect rejected: status %d (%s), rtrs errno %d\n",
status, rej_msg, errno);
} else {
rtrs_err(s,
"Connect rejected but with malformed message: status %d (%s)\n",
status, rej_msg);
}
return -ECONNRESET;
}
void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
{
trace_rtrs_clt_close_conns(clt_path);
if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
queue_work(rtrs_wq, &clt_path->close_work);
if (wait)
flush_work(&clt_path->close_work);
}
static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
{
if (con->cm_err == 1) {
struct rtrs_clt_path *clt_path;
clt_path = to_clt_path(con->c.path);
if (atomic_dec_and_test(&clt_path->connected_cnt))
wake_up(&clt_path->state_wq);
}
con->cm_err = cm_err;
}
static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *ev)
{
struct rtrs_clt_con *con = cm_id->context;
struct rtrs_path *s = con->c.path;
struct rtrs_clt_path *clt_path = to_clt_path(s);
int cm_err = 0;
switch (ev->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
cm_err = rtrs_rdma_addr_resolved(con);
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
cm_err = rtrs_rdma_route_resolved(con);
break;
case RDMA_CM_EVENT_ESTABLISHED:
cm_err = rtrs_rdma_conn_established(con, ev);
if (!cm_err) {
/*
* Report success and wake up. Here we abuse state_wq,
* i.e. wake up without state change, but we set cm_err.
*/
flag_success_on_conn(con);
wake_up(&clt_path->state_wq);
return 0;
}
break;
case RDMA_CM_EVENT_REJECTED:
cm_err = rtrs_rdma_conn_rejected(con, ev);
break;
case RDMA_CM_EVENT_DISCONNECTED:
/* No message for disconnecting */
cm_err = -ECONNRESET;
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
rdma_event_msg(ev->event), ev->status);
cm_err = -ECONNRESET;
break;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
rtrs_wrn(s, "CM error (CM event: %s, err: %d)\n",
rdma_event_msg(ev->event), ev->status);
cm_err = -EHOSTUNREACH;
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
/*
* Device removal is a special case. Queue close and return 0.
*/
rtrs_clt_close_conns(clt_path, false);
return 0;
default:
rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
rdma_event_msg(ev->event), ev->status);
cm_err = -ECONNRESET;
break;
}
if (cm_err) {
/*
* cm error makes sense only on connection establishing,
* in other cases we rely on normal procedure of reconnecting.
*/
flag_error_on_conn(con, cm_err);
rtrs_rdma_error_recovery(con);
}
return 0;
}
/* The caller should do the cleanup in case of error */
static int create_cm(struct rtrs_clt_con *con)
{
struct rtrs_path *s = con->c.path;
struct rtrs_clt_path *clt_path = to_clt_path(s);
struct rdma_cm_id *cm_id;
int err;
cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
clt_path->s.dst_addr.ss_family == AF_IB ?
RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) {
err = PTR_ERR(cm_id);
rtrs_err(s, "Failed to create CM ID, err: %d\n", err);
return err;
}
con->c.cm_id = cm_id;
con->cm_err = 0;
/* allow the port to be reused */
err = rdma_set_reuseaddr(cm_id, 1);
if (err != 0) {
rtrs_err(s, "Set address reuse failed, err: %d\n", err);
return err;
}
err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
(struct sockaddr *)&clt_path->s.dst_addr,
RTRS_CONNECT_TIMEOUT_MS);
if (err) {
rtrs_err(s, "Failed to resolve address, err: %d\n", err);
return err;
}
/*
* Combine connection status and session events. This is needed
* for waiting two possible cases: cm_err has something meaningful
* or session state was really changed to error by device removal.
*/
err = wait_event_interruptible_timeout(
clt_path->state_wq,
con->cm_err || clt_path->state != RTRS_CLT_CONNECTING,
msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
if (err == 0 || err == -ERESTARTSYS) {
if (err == 0)
err = -ETIMEDOUT;
/* Timedout or interrupted */
return err;
}
if (con->cm_err < 0)
return con->cm_err;
if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING)
/* Device removal */
return -ECONNABORTED;
return 0;
}
static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_sess *clt = clt_path->clt;
int up;
/*
* We can fire RECONNECTED event only when all paths were
* connected on rtrs_clt_open(), then each was disconnected
* and the first one connected again. That's why this nasty
* game with counter value.
*/
mutex_lock(&clt->paths_ev_mutex);
up = ++clt->paths_up;
/*
* Here it is safe to access paths num directly since up counter
* is greater than MAX_PATHS_NUM only while rtrs_clt_open() is
* in progress, thus paths removals are impossible.
*/
if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num)
clt->paths_up = clt->paths_num;
else if (up == 1)
clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED);
mutex_unlock(&clt->paths_ev_mutex);
/* Mark session as established */
clt_path->established = true;
clt_path->reconnect_attempts = 0;
clt_path->stats->reconnects.successful_cnt++;
}
static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_sess *clt = clt_path->clt;
if (!clt_path->established)
return;
clt_path->established = false;
mutex_lock(&clt->paths_ev_mutex);
WARN_ON(!clt->paths_up);
if (--clt->paths_up == 0)
clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED);
mutex_unlock(&clt->paths_ev_mutex);
}
static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_con *con;
unsigned int cid;
WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED);
/*
* Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
* exactly in between. Start destroying after it finishes.
*/
mutex_lock(&clt_path->init_mutex);
mutex_unlock(&clt_path->init_mutex);
/*
* All IO paths must observe !CONNECTED state before we
* free everything.
*/
synchronize_rcu();
rtrs_stop_hb(&clt_path->s);
/*
* The order it utterly crucial: firstly disconnect and complete all
* rdma requests with error (thus set in_use=false for requests),
* then fail outstanding requests checking in_use for each, and
* eventually notify upper layer about session disconnection.
*/
for (cid = 0; cid < clt_path->s.con_num; cid++) {
if (!clt_path->s.con[cid])
break;
con = to_clt_con(clt_path->s.con[cid]);
stop_cm(con);
}
fail_all_outstanding_reqs(clt_path);
free_path_reqs(clt_path);
rtrs_clt_path_down(clt_path);
/*
* Wait for graceful shutdown, namely when peer side invokes
* rdma_disconnect(). 'connected_cnt' is decremented only on
* CM events, thus if other side had crashed and hb has detected
* something is wrong, here we will stuck for exactly timeout ms,
* since CM does not fire anything. That is fine, we are not in
* hurry.
*/
wait_event_timeout(clt_path->state_wq,
!atomic_read(&clt_path->connected_cnt),
msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
for (cid = 0; cid < clt_path->s.con_num; cid++) {
if (!clt_path->s.con[cid])
break;
con = to_clt_con(clt_path->s.con[cid]);
mutex_lock(&con->con_mutex);
destroy_con_cq_qp(con);
mutex_unlock(&con->con_mutex);
destroy_cm(con);
destroy_con(con);
}
}
static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_sess *clt = clt_path->clt;
struct rtrs_clt_path *next;
bool wait_for_grace = false;
int cpu;
mutex_lock(&clt->paths_mutex);
list_del_rcu(&clt_path->s.entry);
/* Make sure everybody observes path removal. */
synchronize_rcu();
/*
* At this point nobody sees @sess in the list, but still we have
* dangling pointer @pcpu_path which _can_ point to @sess. Since
* nobody can observe @sess in the list, we guarantee that IO path
* will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal
* to @sess, but can never again become @sess.
*/
/*
* Decrement paths number only after grace period, because
* caller of do_each_path() must firstly observe list without
* path and only then decremented paths number.
*
* Otherwise there can be the following situation:
* o Two paths exist and IO is coming.
* o One path is removed:
* CPU#0 CPU#1
* do_each_path(): rtrs_clt_remove_path_from_arr():
* path = get_next_path()
* ^^^ list_del_rcu(path)
* [!CONNECTED path] clt->paths_num--
* ^^^^^^^^^
* load clt->paths_num from 2 to 1
* ^^^^^^^^^
* sees 1
*
* path is observed as !CONNECTED, but do_each_path() loop
* ends, because expression i < clt->paths_num is false.
*/
clt->paths_num--;
/*
* Get @next connection from current @sess which is going to be
* removed. If @sess is the last element, then @next is NULL.
*/
rcu_read_lock();
next = rtrs_clt_get_next_path_or_null(&clt->paths_list, clt_path);
rcu_read_unlock();
/*
* @pcpu paths can still point to the path which is going to be
* removed, so change the pointer manually.
*/
for_each_possible_cpu(cpu) {
struct rtrs_clt_path __rcu **ppcpu_path;
ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
if (rcu_dereference_protected(*ppcpu_path,
lockdep_is_held(&clt->paths_mutex)) != clt_path)
/*
* synchronize_rcu() was called just after deleting
* entry from the list, thus IO code path cannot
* change pointer back to the pointer which is going
* to be removed, we are safe here.
*/
continue;
/*
* We race with IO code path, which also changes pointer,
* thus we have to be careful not to overwrite it.
*/
if (try_cmpxchg((struct rtrs_clt_path **)ppcpu_path, &clt_path,
next))
/*
* @ppcpu_path was successfully replaced with @next,
* that means that someone could also pick up the
* @sess and dereferencing it right now, so wait for
* a grace period is required.
*/
wait_for_grace = true;
}
if (wait_for_grace)
synchronize_rcu();
mutex_unlock(&clt->paths_mutex);
}
static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_sess *clt = clt_path->clt;
mutex_lock(&clt->paths_mutex);
clt->paths_num++;
list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
mutex_unlock(&clt->paths_mutex);
}
static void rtrs_clt_close_work(struct work_struct *work)
{
struct rtrs_clt_path *clt_path;
clt_path = container_of(work, struct rtrs_clt_path, close_work);
cancel_work_sync(&clt_path->err_recovery_work);
cancel_delayed_work_sync(&clt_path->reconnect_dwork);
rtrs_clt_stop_and_destroy_conns(clt_path);
rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
}
static int init_conns(struct rtrs_clt_path *clt_path)
{
unsigned int cid;
int err, i;
/*
* On every new session connections increase reconnect counter
* to avoid clashes with previous sessions not yet closed
* sessions on a server side.
*/
clt_path->s.recon_cnt++;
/* Establish all RDMA connections */
for (cid = 0; cid < clt_path->s.con_num; cid++) {
err = create_con(clt_path, cid);
if (err)
goto destroy;
err = create_cm(to_clt_con(clt_path->s.con[cid]));
if (err)
goto destroy;
}
err = alloc_path_reqs(clt_path);
if (err)
goto destroy;
rtrs_start_hb(&clt_path->s);
return 0;
destroy:
/* Make sure we do the cleanup in the order they are created */
for (i = 0; i <= cid; i++) {
struct rtrs_clt_con *con;
if (!clt_path->s.con[i])
break;
con = to_clt_con(clt_path->s.con[i]);
if (con->c.cm_id) {
stop_cm(con);
mutex_lock(&con->con_mutex);
destroy_con_cq_qp(con);
mutex_unlock(&con->con_mutex);
destroy_cm(con);
}
destroy_con(con);
}
/*
* If we've never taken async path and got an error, say,
* doing rdma_resolve_addr(), switch to CONNECTION_ERR state
* manually to keep reconnecting.
*/
rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
return err;
}
static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_iu *iu;
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(clt_path->clt, "Path info request send failed: %s\n",
ib_wc_status_msg(wc->status));
rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
return;
}
rtrs_clt_update_wc_stats(con);
}
static int process_info_rsp(struct rtrs_clt_path *clt_path,
const struct rtrs_msg_info_rsp *msg)
{
unsigned int sg_cnt, total_len;
int i, sgi;
sg_cnt = le16_to_cpu(msg->sg_cnt);
if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) {
rtrs_err(clt_path->clt,
"Incorrect sg_cnt %d, is not multiple\n",
sg_cnt);
return -EINVAL;
}
/*
* Check if IB immediate data size is enough to hold the mem_id and
* the offset inside the memory chunk.
*/
if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) >
MAX_IMM_PAYL_BITS) {
rtrs_err(clt_path->clt,
"RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size);
return -EINVAL;
}
total_len = 0;
for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) {
const struct rtrs_sg_desc *desc = &msg->desc[sgi];
u32 len, rkey;
u64 addr;
addr = le64_to_cpu(desc->addr);
rkey = le32_to_cpu(desc->key);
len = le32_to_cpu(desc->len);
total_len += len;
if (!len || (len % clt_path->chunk_size)) {
rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n",
sgi,
len);
return -EINVAL;
}
for ( ; len && i < clt_path->queue_depth; i++) {
clt_path->rbufs[i].addr = addr;
clt_path->rbufs[i].rkey = rkey;
len -= clt_path->chunk_size;
addr += clt_path->chunk_size;
}
}
/* Sanity check */
if (sgi != sg_cnt || i != clt_path->queue_depth) {
rtrs_err(clt_path->clt,
"Incorrect sg vector, not fully mapped\n");
return -EINVAL;
}
if (total_len != clt_path->chunk_size * clt_path->queue_depth) {
rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len);
return -EINVAL;
}
return 0;
}
static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_msg_info_rsp *msg;
enum rtrs_clt_state state;
struct rtrs_iu *iu;
size_t rx_sz;
int err;
state = RTRS_CLT_CONNECTING_ERR;
WARN_ON(con->c.cid);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(clt_path->clt, "Path info response recv failed: %s\n",
ib_wc_status_msg(wc->status));
goto out;
}
WARN_ON(wc->opcode != IB_WC_RECV);
if (wc->byte_len < sizeof(*msg)) {
rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
wc->byte_len);
goto out;
}
ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) {
rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n",
le16_to_cpu(msg->type));
goto out;
}
rx_sz = sizeof(*msg);
rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
if (wc->byte_len < rx_sz) {
rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
wc->byte_len);
goto out;
}
err = process_info_rsp(clt_path, msg);
if (err)
goto out;
err = post_recv_path(clt_path);
if (err)
goto out;
state = RTRS_CLT_CONNECTED;
out:
rtrs_clt_update_wc_stats(con);
rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
rtrs_clt_change_state_get_old(clt_path, state, NULL);
}
static int rtrs_send_path_info(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]);
struct rtrs_msg_info_req *msg;
struct rtrs_iu *tx_iu, *rx_iu;
size_t rx_sz;
int err;
rx_sz = sizeof(struct rtrs_msg_info_rsp);
rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth;
tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
clt_path->s.dev->ib_dev, DMA_TO_DEVICE,
rtrs_clt_info_req_done);
rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev,
DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
if (!tx_iu || !rx_iu) {
err = -ENOMEM;
goto out;
}
/* Prepare for getting info response */
err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
if (err) {
rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err);
goto out;
}
rx_iu = NULL;
msg = tx_iu->buf;
msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname));
ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
tx_iu->dma_addr,
tx_iu->size, DMA_TO_DEVICE);
/* Send info request */
err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
if (err) {
rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err);
goto out;
}
tx_iu = NULL;
/* Wait for state change */
wait_event_interruptible_timeout(clt_path->state_wq,
clt_path->state != RTRS_CLT_CONNECTING,
msecs_to_jiffies(
RTRS_CONNECT_TIMEOUT_MS));
if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) {
if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR)
err = -ECONNRESET;
else
err = -ETIMEDOUT;
}
out:
if (tx_iu)
rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1);
if (rx_iu)
rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1);
if (err)
/* If we've never taken async path because of malloc problems */
rtrs_clt_change_state_get_old(clt_path,
RTRS_CLT_CONNECTING_ERR, NULL);
return err;
}
/**
* init_path() - establishes all path connections and does handshake
* @clt_path: client path.
* In case of error full close or reconnect procedure should be taken,
* because reconnect or close async works can be started.
*/
static int init_path(struct rtrs_clt_path *clt_path)
{
int err;
char str[NAME_MAX];
struct rtrs_addr path = {
.src = &clt_path->s.src_addr,
.dst = &clt_path->s.dst_addr,
};
rtrs_addr_to_str(&path, str, sizeof(str));
mutex_lock(&clt_path->init_mutex);
err = init_conns(clt_path);
if (err) {
rtrs_err(clt_path->clt,
"init_conns() failed: err=%d path=%s [%s:%u]\n", err,
str, clt_path->hca_name, clt_path->hca_port);
goto out;
}
err = rtrs_send_path_info(clt_path);
if (err) {
rtrs_err(clt_path->clt,
"rtrs_send_path_info() failed: err=%d path=%s [%s:%u]\n",
err, str, clt_path->hca_name, clt_path->hca_port);
goto out;
}
rtrs_clt_path_up(clt_path);
out:
mutex_unlock(&clt_path->init_mutex);
return err;
}
static void rtrs_clt_reconnect_work(struct work_struct *work)
{
struct rtrs_clt_path *clt_path;
struct rtrs_clt_sess *clt;
int err;
clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
reconnect_dwork);
clt = clt_path->clt;
trace_rtrs_clt_reconnect_work(clt_path);
if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
return;
if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) {
/* Close a path completely if max attempts is reached */
rtrs_clt_close_conns(clt_path, false);
return;
}
clt_path->reconnect_attempts++;
msleep(RTRS_RECONNECT_BACKOFF);
if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
err = init_path(clt_path);
if (err)
goto reconnect_again;
}
return;
reconnect_again:
if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
clt_path->stats->reconnects.fail_cnt++;
queue_work(rtrs_wq, &clt_path->err_recovery_work);
}
}
static void rtrs_clt_dev_release(struct device *dev)
{
struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
dev);
mutex_destroy(&clt->paths_ev_mutex);
mutex_destroy(&clt->paths_mutex);
kfree(clt);
}
static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
u16 port, size_t pdu_sz, void *priv,
void (*link_ev)(void *priv,
enum rtrs_clt_link_ev ev),
unsigned int reconnect_delay_sec,
unsigned int max_reconnect_attempts)
{
struct rtrs_clt_sess *clt;
int err;
if (!paths_num || paths_num > MAX_PATHS_NUM)
return ERR_PTR(-EINVAL);
if (strlen(sessname) >= sizeof(clt->sessname))
return ERR_PTR(-EINVAL);
clt = kzalloc(sizeof(*clt), GFP_KERNEL);
if (!clt)
return ERR_PTR(-ENOMEM);
clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path));
if (!clt->pcpu_path) {
kfree(clt);
return ERR_PTR(-ENOMEM);
}
clt->dev.class = &rtrs_clt_dev_class;
clt->dev.release = rtrs_clt_dev_release;
uuid_gen(&clt->paths_uuid);
INIT_LIST_HEAD_RCU(&clt->paths_list);
clt->paths_num = paths_num;
clt->paths_up = MAX_PATHS_NUM;
clt->port = port;
clt->pdu_sz = pdu_sz;
clt->max_segments = RTRS_MAX_SEGMENTS;
clt->reconnect_delay_sec = reconnect_delay_sec;
clt->max_reconnect_attempts = max_reconnect_attempts;
clt->priv = priv;
clt->link_ev = link_ev;
clt->mp_policy = MP_POLICY_MIN_INFLIGHT;
strscpy(clt->sessname, sessname, sizeof(clt->sessname));
init_waitqueue_head(&clt->permits_wait);
mutex_init(&clt->paths_ev_mutex);
mutex_init(&clt->paths_mutex);
device_initialize(&clt->dev);
err = dev_set_name(&clt->dev, "%s", sessname);
if (err)
goto err_put;
/*
* Suppress user space notification until
* sysfs files are created
*/
dev_set_uevent_suppress(&clt->dev, true);
err = device_add(&clt->dev);
if (err)
goto err_put;
clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj);
if (!clt->kobj_paths) {
err = -ENOMEM;
goto err_del;
}
err = rtrs_clt_create_sysfs_root_files(clt);
if (err) {
kobject_del(clt->kobj_paths);
kobject_put(clt->kobj_paths);
goto err_del;
}
dev_set_uevent_suppress(&clt->dev, false);
kobject_uevent(&clt->dev.kobj, KOBJ_ADD);
return clt;
err_del:
device_del(&clt->dev);
err_put:
free_percpu(clt->pcpu_path);
put_device(&clt->dev);
return ERR_PTR(err);
}
static void free_clt(struct rtrs_clt_sess *clt)
{
free_percpu(clt->pcpu_path);
/*
* release callback will free clt and destroy mutexes in last put
*/
device_unregister(&clt->dev);
}
/**
* rtrs_clt_open() - Open a path to an RTRS server
* @ops: holds the link event callback and the private pointer.
* @pathname: name of the path to an RTRS server
* @paths: Paths to be established defined by their src and dst addresses
* @paths_num: Number of elements in the @paths array
* @port: port to be used by the RTRS session
* @pdu_sz: Size of extra payload which can be accessed after permit allocation.
* @reconnect_delay_sec: time between reconnect tries
* @max_reconnect_attempts: Number of times to reconnect on error before giving
* up, 0 for * disabled, -1 for forever
* @nr_poll_queues: number of polling mode connection using IB_POLL_DIRECT flag
*
* Starts session establishment with the rtrs_server. The function can block
* up to ~2000ms before it returns.
*
* Return a valid pointer on success otherwise PTR_ERR.
*/
struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
const char *pathname,
const struct rtrs_addr *paths,
size_t paths_num, u16 port,
size_t pdu_sz, u8 reconnect_delay_sec,
s16 max_reconnect_attempts, u32 nr_poll_queues)
{
struct rtrs_clt_path *clt_path, *tmp;
struct rtrs_clt_sess *clt;
int err, i;
if (strchr(pathname, '/') || strchr(pathname, '.')) {
pr_err("pathname cannot contain / and .\n");
err = -EINVAL;
goto out;
}
clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv,
ops->link_ev,
reconnect_delay_sec,
max_reconnect_attempts);
if (IS_ERR(clt)) {
err = PTR_ERR(clt);
goto out;
}
for (i = 0; i < paths_num; i++) {
struct rtrs_clt_path *clt_path;
clt_path = alloc_path(clt, &paths[i], nr_cpu_ids,
nr_poll_queues);
if (IS_ERR(clt_path)) {
err = PTR_ERR(clt_path);
goto close_all_path;
}
if (!i)
clt_path->for_new_clt = 1;
list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
err = init_path(clt_path);
if (err) {
list_del_rcu(&clt_path->s.entry);
rtrs_clt_close_conns(clt_path, true);
free_percpu(clt_path->stats->pcpu_stats);
kfree(clt_path->stats);
free_path(clt_path);
goto close_all_path;
}
err = rtrs_clt_create_path_files(clt_path);
if (err) {
list_del_rcu(&clt_path->s.entry);
rtrs_clt_close_conns(clt_path, true);
free_percpu(clt_path->stats->pcpu_stats);
kfree(clt_path->stats);
free_path(clt_path);
goto close_all_path;
}
}
err = alloc_permits(clt);
if (err)
goto close_all_path;
return clt;
close_all_path:
list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
rtrs_clt_destroy_path_files(clt_path, NULL);
rtrs_clt_close_conns(clt_path, true);
kobject_put(&clt_path->kobj);
}
rtrs_clt_destroy_sysfs_root(clt);
free_clt(clt);
out:
return ERR_PTR(err);
}
EXPORT_SYMBOL(rtrs_clt_open);
/**
* rtrs_clt_close() - Close a path
* @clt: Session handle. Session is freed upon return.
*/
void rtrs_clt_close(struct rtrs_clt_sess *clt)
{
struct rtrs_clt_path *clt_path, *tmp;
/* Firstly forbid sysfs access */
rtrs_clt_destroy_sysfs_root(clt);
/* Now it is safe to iterate over all paths without locks */
list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
rtrs_clt_close_conns(clt_path, true);
rtrs_clt_destroy_path_files(clt_path, NULL);
kobject_put(&clt_path->kobj);
}
free_permits(clt);
free_clt(clt);
}
EXPORT_SYMBOL(rtrs_clt_close);
int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
{
enum rtrs_clt_state old_state;
int err = -EBUSY;
bool changed;
changed = rtrs_clt_change_state_get_old(clt_path,
RTRS_CLT_RECONNECTING,
&old_state);
if (changed) {
clt_path->reconnect_attempts = 0;
rtrs_clt_stop_and_destroy_conns(clt_path);
queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
}
if (changed || old_state == RTRS_CLT_RECONNECTING) {
/*
* flush_delayed_work() queues pending work for immediate
* execution, so do the flush if we have queued something
* right now or work is pending.
*/
flush_delayed_work(&clt_path->reconnect_dwork);
err = (READ_ONCE(clt_path->state) ==
RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
}
return err;
}
int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path,
const struct attribute *sysfs_self)
{
enum rtrs_clt_state old_state;
bool changed;
/*
* Continue stopping path till state was changed to DEAD or
* state was observed as DEAD:
* 1. State was changed to DEAD - we were fast and nobody
* invoked rtrs_clt_reconnect(), which can again start
* reconnecting.
* 2. State was observed as DEAD - we have someone in parallel
* removing the path.
*/
do {
rtrs_clt_close_conns(clt_path, true);
changed = rtrs_clt_change_state_get_old(clt_path,
RTRS_CLT_DEAD,
&old_state);
} while (!changed && old_state != RTRS_CLT_DEAD);
if (changed) {
rtrs_clt_remove_path_from_arr(clt_path);
rtrs_clt_destroy_path_files(clt_path, sysfs_self);
kobject_put(&clt_path->kobj);
}
return 0;
}
void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value)
{
clt->max_reconnect_attempts = (unsigned int)value;
}
int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt)
{
return (int)clt->max_reconnect_attempts;
}
/**
* rtrs_clt_request() - Request data transfer to/from server via RDMA.
*
* @dir: READ/WRITE
* @ops: callback function to be called as confirmation, and the pointer.
* @clt: Session
* @permit: Preallocated permit
* @vec: Message that is sent to server together with the request.
* Sum of len of all @vec elements limited to <= IO_MSG_SIZE.
* Since the msg is copied internally it can be allocated on stack.
* @nr: Number of elements in @vec.
* @data_len: length of data sent to/from server
* @sg: Pages to be sent/received to/from server.
* @sg_cnt: Number of elements in the @sg
*
* Return:
* 0: Success
* <0: Error
*
* On dir=READ rtrs client will request a data transfer from Server to client.
* The data that the server will respond with will be stored in @sg when
* the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event.
* On dir=WRITE rtrs client will rdma write data in sg to server side.
*/
int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
struct rtrs_clt_sess *clt, struct rtrs_permit *permit,
const struct kvec *vec, size_t nr, size_t data_len,
struct scatterlist *sg, unsigned int sg_cnt)
{
struct rtrs_clt_io_req *req;
struct rtrs_clt_path *clt_path;
enum dma_data_direction dma_dir;
int err = -ECONNABORTED, i;
size_t usr_len, hdr_len;
struct path_it it;
/* Get kvec length */
for (i = 0, usr_len = 0; i < nr; i++)
usr_len += vec[i].iov_len;
if (dir == READ) {
hdr_len = sizeof(struct rtrs_msg_rdma_read) +
sg_cnt * sizeof(struct rtrs_sg_desc);
dma_dir = DMA_FROM_DEVICE;
} else {
hdr_len = sizeof(struct rtrs_msg_rdma_write);
dma_dir = DMA_TO_DEVICE;
}
rcu_read_lock();
for (path_it_init(&it, clt);
(clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
if (usr_len + hdr_len > clt_path->max_hdr_size) {
rtrs_wrn_rl(clt_path->clt,
"%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
dir == READ ? "Read" : "Write",
usr_len, hdr_len, clt_path->max_hdr_size);
err = -EMSGSIZE;
break;
}
req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv,
vec, usr_len, sg, sg_cnt, data_len,
dma_dir);
if (dir == READ)
err = rtrs_clt_read_req(req);
else
err = rtrs_clt_write_req(req);
if (err) {
req->in_use = false;
continue;
}
/* Success path */
break;
}
path_it_deinit(&it);
rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(rtrs_clt_request);
int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index)
{
/* If no path, return -1 for block layer not to try again */
int cnt = -1;
struct rtrs_con *con;
struct rtrs_clt_path *clt_path;
struct path_it it;
rcu_read_lock();
for (path_it_init(&it, clt);
(clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
con = clt_path->s.con[index + 1];
cnt = ib_process_cq_direct(con->cq, -1);
if (cnt)
break;
}
path_it_deinit(&it);
rcu_read_unlock();
return cnt;
}
EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct);
/**
* rtrs_clt_query() - queries RTRS session attributes
*@clt: session pointer
*@attr: query results for session attributes.
* Returns:
* 0 on success
* -ECOMM no connection to the server
*/
int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr)
{
if (!rtrs_clt_is_connected(clt))
return -ECOMM;
attr->queue_depth = clt->queue_depth;
attr->max_segments = clt->max_segments;
/* Cap max_io_size to min of remote buffer size and the fr pages */
attr->max_io_size = min_t(int, clt->max_io_size,
clt->max_segments * SZ_4K);
return 0;
}
EXPORT_SYMBOL(rtrs_clt_query);
int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
struct rtrs_addr *addr)
{
struct rtrs_clt_path *clt_path;
int err;
clt_path = alloc_path(clt, addr, nr_cpu_ids, 0);
if (IS_ERR(clt_path))
return PTR_ERR(clt_path);
mutex_lock(&clt->paths_mutex);
if (clt->paths_num == 0) {
/*
* When all the paths are removed for a session,
* the addition of the first path is like a new session for
* the storage server
*/
clt_path->for_new_clt = 1;
}
mutex_unlock(&clt->paths_mutex);
/*
* It is totally safe to add path in CONNECTING state: coming
* IO will never grab it. Also it is very important to add
* path before init, since init fires LINK_CONNECTED event.
*/
rtrs_clt_add_path_to_arr(clt_path);
err = init_path(clt_path);
if (err)
goto close_path;
err = rtrs_clt_create_path_files(clt_path);
if (err)
goto close_path;
return 0;
close_path:
rtrs_clt_remove_path_from_arr(clt_path);
rtrs_clt_close_conns(clt_path, true);
free_percpu(clt_path->stats->pcpu_stats);
kfree(clt_path->stats);
free_path(clt_path);
return err;
}
static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev)
{
if (!(dev->ib_dev->attrs.device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS)) {
pr_err("Memory registrations not supported.\n");
return -ENOTSUPP;
}
return 0;
}
static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = {
.init = rtrs_clt_ib_dev_init
};
static int __init rtrs_client_init(void)
{
int ret = 0;
rtrs_rdma_dev_pd_init(0, &dev_pd);
ret = class_register(&rtrs_clt_dev_class);
if (ret) {
pr_err("Failed to create rtrs-client dev class\n");
return ret;
}
rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0);
if (!rtrs_wq) {
class_unregister(&rtrs_clt_dev_class);
return -ENOMEM;
}
return 0;
}
static void __exit rtrs_client_exit(void)
{
destroy_workqueue(rtrs_wq);
class_unregister(&rtrs_clt_dev_class);
rtrs_rdma_dev_pd_deinit(&dev_pd);
}
module_init(rtrs_client_init);
module_exit(rtrs_client_exit);
| linux-master | drivers/infiniband/ulp/rtrs/rtrs-clt.c |
/*
* iSCSI Initiator over iSER Data-Path
*
* Copyright (C) 2004 Dmitry Yusupov
* Copyright (C) 2004 Alex Aizman
* Copyright (C) 2005 Mike Christie
* Copyright (c) 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
* maintained by [email protected]
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Credits:
* Christoph Hellwig
* FUJITA Tomonori
* Arne Redlich
* Zhenyu Wang
* Modified by:
* Erez Zilber
*/
#include <linux/types.h>
#include <linux/list.h>
#include <linux/hardirq.h>
#include <linux/kfifo.h>
#include <linux/blkdev.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/cdev.h>
#include <linux/in.h>
#include <linux/net.h>
#include <linux/scatterlist.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi.h>
#include <scsi/scsi_transport_iscsi.h>
#include "iscsi_iser.h"
MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz");
static const struct scsi_host_template iscsi_iser_sht;
static struct iscsi_transport iscsi_iser_transport;
static struct scsi_transport_template *iscsi_iser_scsi_transport;
static struct workqueue_struct *release_wq;
static DEFINE_MUTEX(unbind_iser_conn_mutex);
struct iser_global ig;
int iser_debug_level = 0;
module_param_named(debug_level, iser_debug_level, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)");
static int iscsi_iser_set(const char *val, const struct kernel_param *kp);
static const struct kernel_param_ops iscsi_iser_size_ops = {
.set = iscsi_iser_set,
.get = param_get_uint,
};
static unsigned int iscsi_max_lun = 512;
module_param_cb(max_lun, &iscsi_iser_size_ops, &iscsi_max_lun, S_IRUGO);
MODULE_PARM_DESC(max_lun, "Max LUNs to allow per session, should > 0 (default:512)");
unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
module_param_cb(max_sectors, &iscsi_iser_size_ops, &iser_max_sectors,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command, should > 0 (default:1024)");
bool iser_always_reg = true;
module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
MODULE_PARM_DESC(always_register,
"Always register memory, even for continuous memory regions (default:true)");
bool iser_pi_enable = false;
module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
{
int ret;
unsigned int n = 0;
ret = kstrtouint(val, 10, &n);
if (ret != 0 || n == 0)
return -EINVAL;
return param_set_uint(val, kp);
}
/*
* iscsi_iser_recv() - Process a successful recv completion
* @conn: iscsi connection
* @hdr: iscsi header
* @rx_data: buffer containing receive data payload
* @rx_data_len: length of rx_data
*
* Notes: In case of data length errors or iscsi PDU completion failures
* this routine will signal iscsi layer of connection failure.
*/
void iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
char *rx_data, int rx_data_len)
{
int rc = 0;
int datalen;
/* verify PDU length */
datalen = ntoh24(hdr->dlength);
if (datalen > rx_data_len || (datalen + 4) < rx_data_len) {
iser_err("wrong datalen %d (hdr), %d (IB)\n",
datalen, rx_data_len);
rc = ISCSI_ERR_DATALEN;
goto error;
}
if (datalen != rx_data_len)
iser_dbg("aligned datalen (%d) hdr, %d (IB)\n",
datalen, rx_data_len);
rc = iscsi_complete_pdu(conn, hdr, rx_data, rx_data_len);
if (rc && rc != ISCSI_ERR_NO_SCSI_CMD)
goto error;
return;
error:
iscsi_conn_failure(conn, rc);
}
/**
* iscsi_iser_pdu_alloc() - allocate an iscsi-iser PDU
* @task: iscsi task
* @opcode: iscsi command opcode
*
* Netes: This routine can't fail, just assign iscsi task
* hdr and max hdr size.
*/
static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
struct iscsi_iser_task *iser_task = task->dd_data;
task->hdr = (struct iscsi_hdr *)&iser_task->desc.iscsi_header;
task->hdr_max = sizeof(iser_task->desc.iscsi_header);
return 0;
}
/**
* iser_initialize_task_headers() - Initialize task headers
* @task: iscsi task
* @tx_desc: iser tx descriptor
*
* Notes:
* This routine may race with iser teardown flow for scsi
* error handling TMFs. So for TMF we should acquire the
* state mutex to avoid dereferencing the IB device which
* may have already been terminated.
*/
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc)
{
struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn.device;
struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr;
if (unlikely(iser_conn->state != ISER_CONN_UP))
return -ENODEV;
dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr))
return -ENOMEM;
tx_desc->inv_wr.next = NULL;
tx_desc->reg_wr.wr.next = NULL;
tx_desc->mapped = true;
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
iser_task->iser_conn = iser_conn;
return 0;
}
/**
* iscsi_iser_task_init() - Initialize iscsi-iser task
* @task: iscsi task
*
* Initialize the task for the scsi command or mgmt command.
*
* Return: Returns zero on success or -ENOMEM when failing
* to init task headers (dma mapping error).
*/
static int iscsi_iser_task_init(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
int ret;
ret = iser_initialize_task_headers(task, &iser_task->desc);
if (ret) {
iser_err("Failed to init task %p, err = %d\n",
iser_task, ret);
return ret;
}
/* mgmt task */
if (!task->sc)
return 0;
iser_task->command_sent = 0;
iser_task_rdma_init(iser_task);
iser_task->sc = task->sc;
return 0;
}
/**
* iscsi_iser_mtask_xmit() - xmit management (immediate) task
* @conn: iscsi connection
* @task: task management task
*
* Notes:
* The function can return -EAGAIN in which case caller must
* call it again later, or recover. '0' return code means successful
* xmit.
*
**/
static int iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
struct iscsi_task *task)
{
int error = 0;
iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt);
error = iser_send_control(conn, task);
/* since iser xmits control with zero copy, tasks can not be recycled
* right after sending them.
* The recycling scheme is based on whether a response is expected
* - if yes, the task is recycled at iscsi_complete_pdu
* - if no, the task is recycled at iser_snd_completion
*/
return error;
}
static int iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
struct iscsi_task *task)
{
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
struct iscsi_data hdr;
int error = 0;
/* Send data-out PDUs while there's still unsolicited data to send */
while (iscsi_task_has_unsol_data(task)) {
iscsi_prep_data_out_pdu(task, r2t, &hdr);
iser_dbg("Sending data-out: itt 0x%x, data count %d\n",
hdr.itt, r2t->data_count);
/* the buffer description has been passed with the command */
/* Send the command */
error = iser_send_data_out(conn, task, &hdr);
if (error) {
r2t->datasn--;
goto iscsi_iser_task_xmit_unsol_data_exit;
}
r2t->sent += r2t->data_count;
iser_dbg("Need to send %d more as data-out PDUs\n",
r2t->data_length - r2t->sent);
}
iscsi_iser_task_xmit_unsol_data_exit:
return error;
}
/**
* iscsi_iser_task_xmit() - xmit iscsi-iser task
* @task: iscsi task
*
* Return: zero on success or escalates $error on failure.
*/
static int iscsi_iser_task_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
struct iscsi_iser_task *iser_task = task->dd_data;
int error = 0;
if (!task->sc)
return iscsi_iser_mtask_xmit(conn, task);
if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
BUG_ON(scsi_bufflen(task->sc) == 0);
iser_dbg("cmd [itt %x total %d imm %d unsol_data %d\n",
task->itt, scsi_bufflen(task->sc),
task->imm_count, task->unsol_r2t.data_length);
}
iser_dbg("ctask xmit [cid %d itt 0x%x]\n",
conn->id, task->itt);
/* Send the cmd PDU */
if (!iser_task->command_sent) {
error = iser_send_command(conn, task);
if (error)
goto iscsi_iser_task_xmit_exit;
iser_task->command_sent = 1;
}
/* Send unsolicited data-out PDU(s) if necessary */
if (iscsi_task_has_unsol_data(task))
error = iscsi_iser_task_xmit_unsol_data(conn, task);
iscsi_iser_task_xmit_exit:
return error;
}
/**
* iscsi_iser_cleanup_task() - cleanup an iscsi-iser task
* @task: iscsi task
*
* Notes: In case the RDMA device is already NULL (might have
* been removed in DEVICE_REMOVAL CM event it will bail-out
* without doing dma unmapping.
*/
static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc = &iser_task->desc;
struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn.device;
/* DEVICE_REMOVAL event might have already released the device */
if (!device)
return;
if (likely(tx_desc->mapped)) {
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
tx_desc->mapped = false;
}
/* mgmt tasks do not need special cleanup */
if (!task->sc)
return;
if (iser_task->status == ISER_TASK_STATUS_STARTED) {
iser_task->status = ISER_TASK_STATUS_COMPLETED;
iser_task_rdma_finalize(iser_task);
}
}
/**
* iscsi_iser_check_protection() - check protection information status of task.
* @task: iscsi task
* @sector: error sector if exsists (output)
*
* Return: zero if no data-integrity errors have occured
* 0x1: data-integrity error occured in the guard-block
* 0x2: data-integrity error occured in the reference tag
* 0x3: data-integrity error occured in the application tag
*
* In addition the error sector is marked.
*/
static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
{
struct iscsi_iser_task *iser_task = task->dd_data;
enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ?
ISER_DIR_IN : ISER_DIR_OUT;
return iser_check_task_pi_status(iser_task, dir, sector);
}
/**
* iscsi_iser_conn_create() - create a new iscsi-iser connection
* @cls_session: iscsi class connection
* @conn_idx: connection index within the session (for MCS)
*
* Return: iscsi_cls_conn when iscsi_conn_setup succeeds or NULL
* otherwise.
*/
static struct iscsi_cls_conn *
iscsi_iser_conn_create(struct iscsi_cls_session *cls_session,
uint32_t conn_idx)
{
struct iscsi_conn *conn;
struct iscsi_cls_conn *cls_conn;
cls_conn = iscsi_conn_setup(cls_session, 0, conn_idx);
if (!cls_conn)
return NULL;
conn = cls_conn->dd_data;
/*
* due to issues with the login code re iser sematics
* this not set in iscsi_conn_setup - FIXME
*/
conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN;
return cls_conn;
}
/**
* iscsi_iser_conn_bind() - bind iscsi and iser connection structures
* @cls_session: iscsi class session
* @cls_conn: iscsi class connection
* @transport_eph: transport end-point handle
* @is_leading: indicate if this is the session leading connection (MCS)
*
* Return: zero on success, $error if iscsi_conn_bind fails and
* -EINVAL in case end-point doesn't exists anymore or iser connection
* state is not UP (teardown already started).
*/
static int iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
struct iscsi_cls_conn *cls_conn,
uint64_t transport_eph, int is_leading)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *iser_conn;
struct iscsi_endpoint *ep;
int error;
error = iscsi_conn_bind(cls_session, cls_conn, is_leading);
if (error)
return error;
/* the transport ep handle comes from user space so it must be
* verified against the global ib connections list */
ep = iscsi_lookup_endpoint(transport_eph);
if (!ep) {
iser_err("can't bind eph %llx\n",
(unsigned long long)transport_eph);
return -EINVAL;
}
iser_conn = ep->dd_data;
mutex_lock(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_UP) {
error = -EINVAL;
iser_err("iser_conn %p state is %d, teardown started\n",
iser_conn, iser_conn->state);
goto out;
}
error = iser_alloc_rx_descriptors(iser_conn, conn->session);
if (error)
goto out;
/* binds the iSER connection retrieved from the previously
* connected ep_handle to the iSCSI layer connection. exchanges
* connection pointers */
iser_info("binding iscsi conn %p to iser_conn %p\n", conn, iser_conn);
conn->dd_data = iser_conn;
iser_conn->iscsi_conn = conn;
out:
iscsi_put_endpoint(ep);
mutex_unlock(&iser_conn->state_mutex);
return error;
}
/**
* iscsi_iser_conn_start() - start iscsi-iser connection
* @cls_conn: iscsi class connection
*
* Notes: Here iser intialize (or re-initialize) stop_completion as
* from this point iscsi must call conn_stop in session/connection
* teardown so iser transport must wait for it.
*/
static int iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *iscsi_conn;
struct iser_conn *iser_conn;
iscsi_conn = cls_conn->dd_data;
iser_conn = iscsi_conn->dd_data;
reinit_completion(&iser_conn->stop_completion);
return iscsi_conn_start(cls_conn);
}
/**
* iscsi_iser_conn_stop() - stop iscsi-iser connection
* @cls_conn: iscsi class connection
* @flag: indicate if recover or terminate (passed as is)
*
* Notes: Calling iscsi_conn_stop might theoretically race with
* DEVICE_REMOVAL event and dereference a previously freed RDMA device
* handle, so we call it under iser the state lock to protect against
* this kind of race.
*/
static void iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *iser_conn = conn->dd_data;
iser_info("stopping iscsi_conn: %p, iser_conn: %p\n", conn, iser_conn);
/*
* Userspace may have goofed up and not bound the connection or
* might have only partially setup the connection.
*/
if (iser_conn) {
mutex_lock(&iser_conn->state_mutex);
mutex_lock(&unbind_iser_conn_mutex);
iser_conn_terminate(iser_conn);
iscsi_conn_stop(cls_conn, flag);
/* unbind */
iser_conn->iscsi_conn = NULL;
conn->dd_data = NULL;
mutex_unlock(&unbind_iser_conn_mutex);
complete(&iser_conn->stop_completion);
mutex_unlock(&iser_conn->state_mutex);
} else {
iscsi_conn_stop(cls_conn, flag);
}
}
/**
* iscsi_iser_session_destroy() - destroy iscsi-iser session
* @cls_session: iscsi class session
*
* Removes and free iscsi host.
*/
static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
iscsi_session_teardown(cls_session);
iscsi_host_remove(shost, false);
iscsi_host_free(shost);
}
static inline unsigned int iser_dif_prot_caps(int prot_caps)
{
int ret = 0;
if (prot_caps & IB_PROT_T10DIF_TYPE_1)
ret |= SHOST_DIF_TYPE1_PROTECTION |
SHOST_DIX_TYPE0_PROTECTION |
SHOST_DIX_TYPE1_PROTECTION;
if (prot_caps & IB_PROT_T10DIF_TYPE_2)
ret |= SHOST_DIF_TYPE2_PROTECTION |
SHOST_DIX_TYPE2_PROTECTION;
if (prot_caps & IB_PROT_T10DIF_TYPE_3)
ret |= SHOST_DIF_TYPE3_PROTECTION |
SHOST_DIX_TYPE3_PROTECTION;
return ret;
}
/**
* iscsi_iser_session_create() - create an iscsi-iser session
* @ep: iscsi end-point handle
* @cmds_max: maximum commands in this session
* @qdepth: session command queue depth
* @initial_cmdsn: initiator command sequnce number
*
* Allocates and adds a scsi host, expose DIF supprot if
* exists, and sets up an iscsi session.
*/
static struct iscsi_cls_session *
iscsi_iser_session_create(struct iscsi_endpoint *ep,
uint16_t cmds_max, uint16_t qdepth,
uint32_t initial_cmdsn)
{
struct iscsi_cls_session *cls_session;
struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
struct ib_device *ib_dev;
u32 max_fr_sectors;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
return NULL;
shost->transportt = iscsi_iser_scsi_transport;
shost->cmd_per_lun = qdepth;
shost->max_lun = iscsi_max_lun;
shost->max_id = 0;
shost->max_channel = 0;
shost->max_cmd_len = 16;
/*
* older userspace tools (before 2.0-870) did not pass us
* the leading conn's ep so this will be NULL;
*/
if (ep) {
iser_conn = ep->dd_data;
shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
shost->can_queue = min_t(u16, cmds_max, iser_conn->max_cmds);
mutex_lock(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_UP) {
iser_err("iser conn %p already started teardown\n",
iser_conn);
mutex_unlock(&iser_conn->state_mutex);
goto free_host;
}
ib_conn = &iser_conn->ib_conn;
ib_dev = ib_conn->device->ib_device;
if (ib_conn->pi_support) {
u32 sig_caps = ib_dev->attrs.sig_prot_cap;
shost->sg_prot_tablesize = shost->sg_tablesize;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP |
SHOST_DIX_GUARD_CRC);
}
if (!(ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
shost->virt_boundary_mask = SZ_4K - 1;
if (iscsi_host_add(shost, ib_dev->dev.parent)) {
mutex_unlock(&iser_conn->state_mutex);
goto free_host;
}
mutex_unlock(&iser_conn->state_mutex);
} else {
shost->can_queue = min_t(u16, cmds_max, ISER_DEF_XMIT_CMDS_MAX);
if (iscsi_host_add(shost, NULL))
goto free_host;
}
max_fr_sectors = (shost->sg_tablesize * PAGE_SIZE) >> 9;
shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
iser_conn, shost->sg_tablesize,
shost->max_sectors);
if (shost->max_sectors < iser_max_sectors)
iser_warn("max_sectors was reduced from %u to %u\n",
iser_max_sectors, shost->max_sectors);
cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
shost->can_queue, 0,
sizeof(struct iscsi_iser_task),
initial_cmdsn, 0);
if (!cls_session)
goto remove_host;
return cls_session;
remove_host:
iscsi_host_remove(shost, false);
free_host:
iscsi_host_free(shost);
return NULL;
}
static int iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
enum iscsi_param param, char *buf, int buflen)
{
int value;
switch (param) {
case ISCSI_PARAM_MAX_RECV_DLENGTH:
/* TBD */
break;
case ISCSI_PARAM_HDRDGST_EN:
sscanf(buf, "%d", &value);
if (value) {
iser_err("DataDigest wasn't negotiated to None\n");
return -EPROTO;
}
break;
case ISCSI_PARAM_DATADGST_EN:
sscanf(buf, "%d", &value);
if (value) {
iser_err("DataDigest wasn't negotiated to None\n");
return -EPROTO;
}
break;
case ISCSI_PARAM_IFMARKER_EN:
sscanf(buf, "%d", &value);
if (value) {
iser_err("IFMarker wasn't negotiated to No\n");
return -EPROTO;
}
break;
case ISCSI_PARAM_OFMARKER_EN:
sscanf(buf, "%d", &value);
if (value) {
iser_err("OFMarker wasn't negotiated to No\n");
return -EPROTO;
}
break;
default:
return iscsi_set_param(cls_conn, param, buf, buflen);
}
return 0;
}
/**
* iscsi_iser_conn_get_stats() - get iscsi connection statistics
* @cls_conn: iscsi class connection
* @stats: iscsi stats to output
*
* Output connection statistics.
*/
static void iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn,
struct iscsi_stats *stats)
{
struct iscsi_conn *conn = cls_conn->dd_data;
stats->txdata_octets = conn->txdata_octets;
stats->rxdata_octets = conn->rxdata_octets;
stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
stats->dataout_pdus = conn->dataout_pdus_cnt;
stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
stats->datain_pdus = conn->datain_pdus_cnt; /* always 0 */
stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */
stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
stats->custom_length = 0;
}
static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
enum iscsi_param param, char *buf)
{
struct iser_conn *iser_conn = ep->dd_data;
switch (param) {
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_CONN_ADDRESS:
if (!iser_conn || !iser_conn->ib_conn.cma_id)
return -ENOTCONN;
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
&iser_conn->ib_conn.cma_id->route.addr.dst_addr,
param, buf);
default:
break;
}
return -ENOSYS;
}
/**
* iscsi_iser_ep_connect() - Initiate iSER connection establishment
* @shost: scsi_host
* @dst_addr: destination address
* @non_blocking: indicate if routine can block
*
* Allocate an iscsi endpoint, an iser_conn structure and bind them.
* After that start RDMA connection establishment via rdma_cm. We
* don't allocate iser_conn embedded in iscsi_endpoint since in teardown
* the endpoint will be destroyed at ep_disconnect while iser_conn will
* cleanup its resources asynchronuously.
*
* Return: iscsi_endpoint created by iscsi layer or ERR_PTR(error)
* if fails.
*/
static struct iscsi_endpoint *iscsi_iser_ep_connect(struct Scsi_Host *shost,
struct sockaddr *dst_addr,
int non_blocking)
{
int err;
struct iser_conn *iser_conn;
struct iscsi_endpoint *ep;
ep = iscsi_create_endpoint(0);
if (!ep)
return ERR_PTR(-ENOMEM);
iser_conn = kzalloc(sizeof(*iser_conn), GFP_KERNEL);
if (!iser_conn) {
err = -ENOMEM;
goto failure;
}
ep->dd_data = iser_conn;
iser_conn->ep = ep;
iser_conn_init(iser_conn);
err = iser_connect(iser_conn, NULL, dst_addr, non_blocking);
if (err)
goto failure;
return ep;
failure:
iscsi_destroy_endpoint(ep);
return ERR_PTR(err);
}
/**
* iscsi_iser_ep_poll() - poll for iser connection establishment to complete
* @ep: iscsi endpoint (created at ep_connect)
* @timeout_ms: polling timeout allowed in ms.
*
* This routine boils down to waiting for up_completion signaling
* that cma_id got CONNECTED event.
*
* Return: 1 if succeeded in connection establishment, 0 if timeout expired
* (libiscsi will retry will kick in) or -1 if interrupted by signal
* or more likely iser connection state transitioned to TEMINATING or
* DOWN during the wait period.
*/
static int iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct iser_conn *iser_conn = ep->dd_data;
int rc;
rc = wait_for_completion_interruptible_timeout(&iser_conn->up_completion,
msecs_to_jiffies(timeout_ms));
/* if conn establishment failed, return error code to iscsi */
if (rc == 0) {
mutex_lock(&iser_conn->state_mutex);
if (iser_conn->state == ISER_CONN_TERMINATING ||
iser_conn->state == ISER_CONN_DOWN)
rc = -1;
mutex_unlock(&iser_conn->state_mutex);
}
iser_info("iser conn %p rc = %d\n", iser_conn, rc);
if (rc > 0)
return 1; /* success, this is the equivalent of EPOLLOUT */
else if (!rc)
return 0; /* timeout */
else
return rc; /* signal */
}
/**
* iscsi_iser_ep_disconnect() - Initiate connection teardown process
* @ep: iscsi endpoint handle
*
* This routine is not blocked by iser and RDMA termination process
* completion as we queue a deffered work for iser/RDMA destruction
* and cleanup or actually call it immediately in case we didn't pass
* iscsi conn bind/start stage, thus it is safe.
*/
static void iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
{
struct iser_conn *iser_conn = ep->dd_data;
iser_info("ep %p iser conn %p\n", ep, iser_conn);
mutex_lock(&iser_conn->state_mutex);
iser_conn_terminate(iser_conn);
/*
* if iser_conn and iscsi_conn are bound, we must wait for
* iscsi_conn_stop and flush errors completion before freeing
* the iser resources. Otherwise we are safe to free resources
* immediately.
*/
if (iser_conn->iscsi_conn) {
INIT_WORK(&iser_conn->release_work, iser_release_work);
queue_work(release_wq, &iser_conn->release_work);
mutex_unlock(&iser_conn->state_mutex);
} else {
iser_conn->state = ISER_CONN_DOWN;
mutex_unlock(&iser_conn->state_mutex);
iser_conn_release(iser_conn);
}
iscsi_destroy_endpoint(ep);
}
static umode_t iser_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
switch (param) {
case ISCSI_HOST_PARAM_NETDEV_NAME:
case ISCSI_HOST_PARAM_HWADDRESS:
case ISCSI_HOST_PARAM_INITIATOR_NAME:
return S_IRUGO;
default:
return 0;
}
case ISCSI_PARAM:
switch (param) {
case ISCSI_PARAM_MAX_RECV_DLENGTH:
case ISCSI_PARAM_MAX_XMIT_DLENGTH:
case ISCSI_PARAM_HDRDGST_EN:
case ISCSI_PARAM_DATADGST_EN:
case ISCSI_PARAM_CONN_ADDRESS:
case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_EXP_STATSN:
case ISCSI_PARAM_PERSISTENT_ADDRESS:
case ISCSI_PARAM_PERSISTENT_PORT:
case ISCSI_PARAM_PING_TMO:
case ISCSI_PARAM_RECV_TMO:
case ISCSI_PARAM_INITIAL_R2T_EN:
case ISCSI_PARAM_MAX_R2T:
case ISCSI_PARAM_IMM_DATA_EN:
case ISCSI_PARAM_FIRST_BURST:
case ISCSI_PARAM_MAX_BURST:
case ISCSI_PARAM_PDU_INORDER_EN:
case ISCSI_PARAM_DATASEQ_INORDER_EN:
case ISCSI_PARAM_TARGET_NAME:
case ISCSI_PARAM_TPGT:
case ISCSI_PARAM_USERNAME:
case ISCSI_PARAM_PASSWORD:
case ISCSI_PARAM_USERNAME_IN:
case ISCSI_PARAM_PASSWORD_IN:
case ISCSI_PARAM_FAST_ABORT:
case ISCSI_PARAM_ABORT_TMO:
case ISCSI_PARAM_LU_RESET_TMO:
case ISCSI_PARAM_TGT_RESET_TMO:
case ISCSI_PARAM_IFACE_NAME:
case ISCSI_PARAM_INITIATOR_NAME:
case ISCSI_PARAM_DISCOVERY_SESS:
return S_IRUGO;
default:
return 0;
}
}
return 0;
}
static const struct scsi_host_template iscsi_iser_sht = {
.module = THIS_MODULE,
.name = "iSCSI Initiator over iSER",
.queuecommand = iscsi_queuecommand,
.change_queue_depth = scsi_change_queue_depth,
.sg_tablesize = ISCSI_ISER_DEF_SG_TABLESIZE,
.cmd_per_lun = ISER_DEF_CMD_PER_LUN,
.eh_timed_out = iscsi_eh_cmd_timed_out,
.eh_abort_handler = iscsi_eh_abort,
.eh_device_reset_handler= iscsi_eh_device_reset,
.eh_target_reset_handler = iscsi_eh_recover_target,
.target_alloc = iscsi_target_alloc,
.proc_name = "iscsi_iser",
.this_id = -1,
.track_queue_depth = 1,
.cmd_size = sizeof(struct iscsi_cmd),
};
static struct iscsi_transport iscsi_iser_transport = {
.owner = THIS_MODULE,
.name = "iser",
.caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_TEXT_NEGO,
/* session management */
.create_session = iscsi_iser_session_create,
.destroy_session = iscsi_iser_session_destroy,
/* connection management */
.create_conn = iscsi_iser_conn_create,
.bind_conn = iscsi_iser_conn_bind,
.unbind_conn = iscsi_conn_unbind,
.destroy_conn = iscsi_conn_teardown,
.attr_is_visible = iser_attr_is_visible,
.set_param = iscsi_iser_set_param,
.get_conn_param = iscsi_conn_get_param,
.get_ep_param = iscsi_iser_get_ep_param,
.get_session_param = iscsi_session_get_param,
.start_conn = iscsi_iser_conn_start,
.stop_conn = iscsi_iser_conn_stop,
/* iscsi host params */
.get_host_param = iscsi_host_get_param,
.set_host_param = iscsi_host_set_param,
/* IO */
.send_pdu = iscsi_conn_send_pdu,
.get_stats = iscsi_iser_conn_get_stats,
.init_task = iscsi_iser_task_init,
.xmit_task = iscsi_iser_task_xmit,
.cleanup_task = iscsi_iser_cleanup_task,
.alloc_pdu = iscsi_iser_pdu_alloc,
.check_protection = iscsi_iser_check_protection,
/* recovery */
.session_recovery_timedout = iscsi_session_recovery_timedout,
.ep_connect = iscsi_iser_ep_connect,
.ep_poll = iscsi_iser_ep_poll,
.ep_disconnect = iscsi_iser_ep_disconnect
};
static int __init iser_init(void)
{
int err;
iser_dbg("Starting iSER datamover...\n");
memset(&ig, 0, sizeof(struct iser_global));
ig.desc_cache = kmem_cache_create("iser_descriptors",
sizeof(struct iser_tx_desc),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (ig.desc_cache == NULL)
return -ENOMEM;
/* device init is called only after the first addr resolution */
mutex_init(&ig.device_list_mutex);
INIT_LIST_HEAD(&ig.device_list);
mutex_init(&ig.connlist_mutex);
INIT_LIST_HEAD(&ig.connlist);
release_wq = alloc_workqueue("release workqueue", 0, 0);
if (!release_wq) {
iser_err("failed to allocate release workqueue\n");
err = -ENOMEM;
goto err_alloc_wq;
}
iscsi_iser_scsi_transport = iscsi_register_transport(
&iscsi_iser_transport);
if (!iscsi_iser_scsi_transport) {
iser_err("iscsi_register_transport failed\n");
err = -EINVAL;
goto err_reg;
}
return 0;
err_reg:
destroy_workqueue(release_wq);
err_alloc_wq:
kmem_cache_destroy(ig.desc_cache);
return err;
}
static void __exit iser_exit(void)
{
struct iser_conn *iser_conn, *n;
int connlist_empty;
iser_dbg("Removing iSER datamover...\n");
destroy_workqueue(release_wq);
mutex_lock(&ig.connlist_mutex);
connlist_empty = list_empty(&ig.connlist);
mutex_unlock(&ig.connlist_mutex);
if (!connlist_empty) {
iser_err("Error cleanup stage completed but we still have iser "
"connections, destroying them anyway\n");
list_for_each_entry_safe(iser_conn, n, &ig.connlist,
conn_list) {
iser_conn_release(iser_conn);
}
}
iscsi_unregister_transport(&iscsi_iser_transport);
kmem_cache_destroy(ig.desc_cache);
}
module_init(iser_init);
module_exit(iser_exit);
| linux-master | drivers/infiniband/ulp/iser/iscsi_iser.c |
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/kfifo.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include "iscsi_iser.h"
/* Register user buffer memory and initialize passive rdma
* dto descriptor. Data size is stored in
* task->data[ISER_DIR_IN].data_len, Protection size
* os stored in task->prot[ISER_DIR_IN].data_len
*/
static int iser_prepare_read_cmd(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_mem_reg *mem_reg;
int err;
struct iser_ctrl *hdr = &iser_task->desc.iser_header;
err = iser_dma_map_task_data(iser_task,
ISER_DIR_IN,
DMA_FROM_DEVICE);
if (err)
return err;
err = iser_reg_mem_fastreg(iser_task, ISER_DIR_IN, false);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
goto out_err;
}
mem_reg = &iser_task->rdma_reg[ISER_DIR_IN];
hdr->flags |= ISER_RSV;
hdr->read_stag = cpu_to_be32(mem_reg->rkey);
hdr->read_va = cpu_to_be64(mem_reg->sge.addr);
iser_dbg("Cmd itt:%d READ tags RKEY:%#.4X VA:%#llX\n",
task->itt, mem_reg->rkey,
(unsigned long long)mem_reg->sge.addr);
return 0;
out_err:
iser_dma_unmap_task_data(iser_task, ISER_DIR_IN, DMA_FROM_DEVICE);
return err;
}
/* Register user buffer memory and initialize passive rdma
* dto descriptor. Data size is stored in
* task->data[ISER_DIR_OUT].data_len, Protection size
* is stored at task->prot[ISER_DIR_OUT].data_len
*/
static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
unsigned int unsol_sz, unsigned int edtl)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_mem_reg *mem_reg;
int err;
struct iser_ctrl *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
err = iser_dma_map_task_data(iser_task,
ISER_DIR_OUT,
DMA_TO_DEVICE);
if (err)
return err;
err = iser_reg_mem_fastreg(iser_task, ISER_DIR_OUT,
buf_out->data_len == imm_sz);
if (err) {
iser_err("Failed to register write cmd RDMA mem\n");
goto out_err;
}
mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
if (unsol_sz < edtl) {
hdr->flags |= ISER_WSV;
if (buf_out->data_len > imm_sz) {
hdr->write_stag = cpu_to_be32(mem_reg->rkey);
hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz);
}
iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X VA:%#llX + unsol:%d\n",
task->itt, mem_reg->rkey,
(unsigned long long)mem_reg->sge.addr, unsol_sz);
}
if (imm_sz > 0) {
iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
task->itt, imm_sz);
tx_dsg->addr = mem_reg->sge.addr;
tx_dsg->length = imm_sz;
tx_dsg->lkey = mem_reg->sge.lkey;
iser_task->desc.num_sge = 2;
}
return 0;
out_err:
iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT, DMA_TO_DEVICE);
return err;
}
/* creates a new tx descriptor and adds header regd buffer */
static void iser_create_send_desc(struct iser_conn *iser_conn,
struct iser_tx_desc *tx_desc, enum iser_desc_type type,
void (*done)(struct ib_cq *cq, struct ib_wc *wc))
{
struct iser_device *device = iser_conn->ib_conn.device;
tx_desc->type = type;
tx_desc->cqe.done = done;
ib_dma_sync_single_for_cpu(device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
tx_desc->iser_header.flags = ISER_VER;
tx_desc->num_sge = 1;
}
static void iser_free_login_buf(struct iser_conn *iser_conn)
{
struct iser_device *device = iser_conn->ib_conn.device;
struct iser_login_desc *desc = &iser_conn->login_desc;
if (!desc->req)
return;
ib_dma_unmap_single(device->ib_device, desc->req_dma,
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
ib_dma_unmap_single(device->ib_device, desc->rsp_dma,
ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
kfree(desc->req);
kfree(desc->rsp);
/* make sure we never redo any unmapping */
desc->req = NULL;
desc->rsp = NULL;
}
static int iser_alloc_login_buf(struct iser_conn *iser_conn)
{
struct iser_device *device = iser_conn->ib_conn.device;
struct iser_login_desc *desc = &iser_conn->login_desc;
desc->req = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
if (!desc->req)
return -ENOMEM;
desc->req_dma = ib_dma_map_single(device->ib_device, desc->req,
ISCSI_DEF_MAX_RECV_SEG_LEN,
DMA_TO_DEVICE);
if (ib_dma_mapping_error(device->ib_device,
desc->req_dma))
goto free_req;
desc->rsp = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!desc->rsp)
goto unmap_req;
desc->rsp_dma = ib_dma_map_single(device->ib_device, desc->rsp,
ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
if (ib_dma_mapping_error(device->ib_device,
desc->rsp_dma))
goto free_rsp;
return 0;
free_rsp:
kfree(desc->rsp);
unmap_req:
ib_dma_unmap_single(device->ib_device, desc->req_dma,
ISCSI_DEF_MAX_RECV_SEG_LEN,
DMA_TO_DEVICE);
free_req:
kfree(desc->req);
return -ENOMEM;
}
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session)
{
int i, j;
u64 dma_addr;
struct iser_rx_desc *rx_desc;
struct ib_sge *rx_sg;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
iser_conn->qp_max_recv_dtos = session->cmds_max;
if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
iser_conn->pages_per_mr))
goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn))
goto alloc_login_buf_fail;
iser_conn->num_rx_descs = session->cmds_max;
iser_conn->rx_descs = kmalloc_array(iser_conn->num_rx_descs,
sizeof(struct iser_rx_desc),
GFP_KERNEL);
if (!iser_conn->rx_descs)
goto rx_desc_alloc_fail;
rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) {
dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr))
goto rx_desc_dma_map_failed;
rx_desc->dma_addr = dma_addr;
rx_desc->cqe.done = iser_task_rsp;
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->pd->local_dma_lkey;
}
return 0;
rx_desc_dma_map_failed:
rx_desc = iser_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(iser_conn->rx_descs);
iser_conn->rx_descs = NULL;
rx_desc_alloc_fail:
iser_free_login_buf(iser_conn);
alloc_login_buf_fail:
iser_free_fastreg_pool(ib_conn);
create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
}
void iser_free_rx_descriptors(struct iser_conn *iser_conn)
{
int i;
struct iser_rx_desc *rx_desc;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
iser_free_fastreg_pool(ib_conn);
rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(iser_conn->rx_descs);
/* make sure we never redo any unmapping */
iser_conn->rx_descs = NULL;
iser_free_login_buf(iser_conn);
}
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_session *session = conn->session;
int err = 0;
int i;
iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
/* check if this is the last login - going to full feature phase */
if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
goto out;
if (session->discovery_sess) {
iser_info("Discovery session, re-using login RX buffer\n");
goto out;
}
iser_info("Normal session, posting batch of RX %d buffers\n",
iser_conn->qp_max_recv_dtos - 1);
/*
* Initial post receive buffers.
* There is one already posted recv buffer (for the last login
* response). Therefore, the first recv buffer is skipped here.
*/
for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) {
err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]);
if (err)
goto out;
}
out:
return err;
}
/**
* iser_send_command - send command PDU
* @conn: link to matching iscsi connection
* @task: SCSI command task
*/
int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
unsigned long edtl;
int err;
struct iser_data_buf *data_buf, *prot_buf;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
edtl = ntohl(hdr->data_length);
/* build the tx desc regd header and add it to the tx desc dto */
iser_create_send_desc(iser_conn, tx_desc, ISCSI_TX_SCSI_COMMAND,
iser_cmd_comp);
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
data_buf = &iser_task->data[ISER_DIR_IN];
prot_buf = &iser_task->prot[ISER_DIR_IN];
} else {
data_buf = &iser_task->data[ISER_DIR_OUT];
prot_buf = &iser_task->prot[ISER_DIR_OUT];
}
if (scsi_sg_count(sc)) { /* using a scatter list */
data_buf->sg = scsi_sglist(sc);
data_buf->size = scsi_sg_count(sc);
}
data_buf->data_len = scsi_bufflen(sc);
if (scsi_prot_sg_count(sc)) {
prot_buf->sg = scsi_prot_sglist(sc);
prot_buf->size = scsi_prot_sg_count(sc);
prot_buf->data_len = (data_buf->data_len >>
ilog2(sc->device->sector_size)) * 8;
}
if (hdr->flags & ISCSI_FLAG_CMD_READ) {
err = iser_prepare_read_cmd(task);
if (err)
goto send_command_error;
}
if (hdr->flags & ISCSI_FLAG_CMD_WRITE) {
err = iser_prepare_write_cmd(task,
task->imm_count,
task->imm_count +
task->unsol_r2t.data_length,
edtl);
if (err)
goto send_command_error;
}
iser_task->status = ISER_TASK_STATUS_STARTED;
err = iser_post_send(&iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
send_command_error:
iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
return err;
}
/**
* iser_send_data_out - send data out PDU
* @conn: link to matching iscsi connection
* @task: SCSI command task
* @hdr: pointer to the LLD's iSCSI message header
*/
int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
struct iscsi_data *hdr)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc;
struct iser_mem_reg *mem_reg;
unsigned long buf_offset;
unsigned long data_seg_len;
uint32_t itt;
int err;
struct ib_sge *tx_dsg;
itt = (__force uint32_t)hdr->itt;
data_seg_len = ntoh24(hdr->dlength);
buf_offset = ntohl(hdr->offset);
iser_dbg("%s itt %d dseg_len %d offset %d\n",
__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
if (!tx_desc)
return -ENOMEM;
tx_desc->type = ISCSI_TX_DATAOUT;
tx_desc->cqe.done = iser_dataout_comp;
tx_desc->iser_header.flags = ISER_VER;
memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
/* build the tx desc */
err = iser_initialize_task_headers(task, tx_desc);
if (err)
goto send_data_out_error;
mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT];
tx_dsg = &tx_desc->tx_sg[1];
tx_dsg->addr = mem_reg->sge.addr + buf_offset;
tx_dsg->length = data_seg_len;
tx_dsg->lkey = mem_reg->sge.lkey;
tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Offset:%ld & DSL:%ld in Data-Out inconsistent with total len:%ld, itt:%d\n",
buf_offset, data_seg_len,
iser_task->data[ISER_DIR_OUT].data_len, itt);
err = -EINVAL;
goto send_data_out_error;
}
iser_dbg("data-out itt: %d, offset: %ld, sz: %ld\n",
itt, buf_offset, data_seg_len);
err = iser_post_send(&iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
send_data_out_error:
kmem_cache_free(ig.desc_cache, tx_desc);
iser_err("conn %p failed err %d\n", conn, err);
return err;
}
int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *mdesc = &iser_task->desc;
unsigned long data_seg_len;
int err = 0;
struct iser_device *device;
/* build the tx desc regd header and add it to the tx desc dto */
iser_create_send_desc(iser_conn, mdesc, ISCSI_TX_CONTROL,
iser_ctrl_comp);
device = iser_conn->ib_conn.device;
data_seg_len = ntoh24(task->hdr->dlength);
if (data_seg_len > 0) {
struct iser_login_desc *desc = &iser_conn->login_desc;
struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
if (task != conn->login_task) {
iser_err("data present on non login task!!!\n");
goto send_control_error;
}
ib_dma_sync_single_for_cpu(device->ib_device, desc->req_dma,
task->data_count, DMA_TO_DEVICE);
memcpy(desc->req, task->data, task->data_count);
ib_dma_sync_single_for_device(device->ib_device, desc->req_dma,
task->data_count, DMA_TO_DEVICE);
tx_dsg->addr = desc->req_dma;
tx_dsg->length = task->data_count;
tx_dsg->lkey = device->pd->local_dma_lkey;
mdesc->num_sge = 2;
}
if (task == conn->login_task) {
iser_dbg("op %x dsl %lx, posting login rx buffer\n",
task->hdr->opcode, data_seg_len);
err = iser_post_recvl(iser_conn);
if (err)
goto send_control_error;
err = iser_post_rx_bufs(conn, task->hdr);
if (err)
goto send_control_error;
}
err = iser_post_send(&iser_conn->ib_conn, mdesc);
if (!err)
return 0;
send_control_error:
iser_err("conn %p failed err %d\n",conn, err);
return err;
}
void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_conn *ib_conn = wc->qp->qp_context;
struct iser_conn *iser_conn = to_iser_conn(ib_conn);
struct iser_login_desc *desc = iser_login(wc->wr_cqe);
struct iscsi_hdr *hdr;
char *data;
int length;
bool full_feature_phase;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "login_rsp");
return;
}
ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
desc->rsp_dma, ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
hdr = desc->rsp + sizeof(struct iser_ctrl);
data = desc->rsp + ISER_HEADERS_LEN;
length = wc->byte_len - ISER_HEADERS_LEN;
full_feature_phase = ((hdr->flags & ISCSI_FULL_FEATURE_PHASE) ==
ISCSI_FULL_FEATURE_PHASE) &&
(hdr->flags & ISCSI_FLAG_CMD_FINAL);
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, length);
iscsi_iser_recv(iser_conn->iscsi_conn, hdr, data, length);
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
desc->rsp_dma, ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
if (!full_feature_phase ||
iser_conn->iscsi_conn->session->discovery_sess)
return;
/* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
iser_post_recvm(iser_conn, iser_conn->rx_descs);
}
static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
{
if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
(desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
iser_err("Bogus remote invalidation for rkey %#x\n", rkey);
return -EINVAL;
}
desc->rsc.mr_valid = 0;
return 0;
}
static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
struct iscsi_hdr *hdr)
{
if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
struct iscsi_task *task;
u32 rkey = wc->ex.invalidate_rkey;
iser_dbg("conn %p: remote invalidation for rkey %#x\n",
iser_conn, rkey);
if (unlikely(!iser_conn->snd_w_inv)) {
iser_err("conn %p: unexpected remote invalidation, terminating connection\n",
iser_conn);
return -EPROTO;
}
task = iscsi_itt_to_ctask(iser_conn->iscsi_conn, hdr->itt);
if (likely(task)) {
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_fr_desc *desc;
if (iser_task->dir[ISER_DIR_IN]) {
desc = iser_task->rdma_reg[ISER_DIR_IN].desc;
if (unlikely(iser_inv_desc(desc, rkey)))
return -EINVAL;
}
if (iser_task->dir[ISER_DIR_OUT]) {
desc = iser_task->rdma_reg[ISER_DIR_OUT].desc;
if (unlikely(iser_inv_desc(desc, rkey)))
return -EINVAL;
}
} else {
iser_err("failed to get task for itt=%d\n", hdr->itt);
return -EINVAL;
}
}
return 0;
}
void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
{
struct ib_conn *ib_conn = wc->qp->qp_context;
struct iser_conn *iser_conn = to_iser_conn(ib_conn);
struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
struct iscsi_hdr *hdr;
int length, err;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "task_rsp");
return;
}
ib_dma_sync_single_for_cpu(ib_conn->device->ib_device,
desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
DMA_FROM_DEVICE);
hdr = &desc->iscsi_header;
length = wc->byte_len - ISER_HEADERS_LEN;
iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
hdr->itt, length);
if (iser_check_remote_inv(iser_conn, wc, hdr)) {
iscsi_conn_failure(iser_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
return;
}
iscsi_iser_recv(iser_conn->iscsi_conn, hdr, desc->data, length);
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
DMA_FROM_DEVICE);
err = iser_post_recvm(iser_conn, desc);
if (err)
iser_err("posting rx buffer err %d\n", err);
}
void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
{
if (unlikely(wc->status != IB_WC_SUCCESS))
iser_err_comp(wc, "command");
}
void iser_ctrl_comp(struct ib_cq *cq, struct ib_wc *wc)
{
struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
struct iscsi_task *task;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "control");
return;
}
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *)desc - sizeof(struct iscsi_task);
if (task->hdr->itt == RESERVED_ITT)
iscsi_put_task(task);
}
void iser_dataout_comp(struct ib_cq *cq, struct ib_wc *wc)
{
struct iser_tx_desc *desc = iser_tx(wc->wr_cqe);
struct ib_conn *ib_conn = wc->qp->qp_context;
struct iser_device *device = ib_conn->device;
if (unlikely(wc->status != IB_WC_SUCCESS))
iser_err_comp(wc, "dataout");
ib_dma_unmap_single(device->ib_device, desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, desc);
}
void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
{
iser_task->status = ISER_TASK_STATUS_INIT;
iser_task->dir[ISER_DIR_IN] = 0;
iser_task->dir[ISER_DIR_OUT] = 0;
iser_task->data[ISER_DIR_IN].data_len = 0;
iser_task->data[ISER_DIR_OUT].data_len = 0;
iser_task->prot[ISER_DIR_IN].data_len = 0;
iser_task->prot[ISER_DIR_OUT].data_len = 0;
iser_task->prot[ISER_DIR_IN].dma_nents = 0;
iser_task->prot[ISER_DIR_OUT].dma_nents = 0;
memset(&iser_task->rdma_reg[ISER_DIR_IN], 0,
sizeof(struct iser_mem_reg));
memset(&iser_task->rdma_reg[ISER_DIR_OUT], 0,
sizeof(struct iser_mem_reg));
}
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
if (iser_task->dir[ISER_DIR_IN]) {
iser_unreg_mem_fastreg(iser_task, ISER_DIR_IN);
iser_dma_unmap_task_data(iser_task, ISER_DIR_IN,
DMA_FROM_DEVICE);
}
if (iser_task->dir[ISER_DIR_OUT]) {
iser_unreg_mem_fastreg(iser_task, ISER_DIR_OUT);
iser_dma_unmap_task_data(iser_task, ISER_DIR_OUT,
DMA_TO_DEVICE);
}
}
| linux-master | drivers/infiniband/ulp/iser/iser_initiator.c |
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
* Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include "iscsi_iser.h"
static void iser_qp_event_callback(struct ib_event *cause, void *context)
{
iser_err("qp event %s (%d)\n",
ib_event_msg(cause->event), cause->event);
}
static void iser_event_handler(struct ib_event_handler *handler,
struct ib_event *event)
{
iser_err("async event %s (%d) on device %s port %d\n",
ib_event_msg(event->event), event->event,
dev_name(&event->device->dev), event->element.port_num);
}
/*
* iser_create_device_ib_res - creates Protection Domain (PD), Completion
* Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
* the adaptor.
*
* Return: 0 on success, -1 on failure
*/
static int iser_create_device_ib_res(struct iser_device *device)
{
struct ib_device *ib_dev = device->ib_device;
if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS)) {
iser_err("IB device does not support memory registrations\n");
return -1;
}
device->pd = ib_alloc_pd(ib_dev,
iser_always_reg ? 0 : IB_PD_UNSAFE_GLOBAL_RKEY);
if (IS_ERR(device->pd))
goto pd_err;
INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev,
iser_event_handler);
ib_register_event_handler(&device->event_handler);
return 0;
pd_err:
iser_err("failed to allocate an IB resource\n");
return -1;
}
/*
* iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
* CQ and PD created with the device associated with the adaptor.
*/
static void iser_free_device_ib_res(struct iser_device *device)
{
ib_unregister_event_handler(&device->event_handler);
ib_dealloc_pd(device->pd);
device->pd = NULL;
}
static struct iser_fr_desc *
iser_create_fastreg_desc(struct iser_device *device,
struct ib_pd *pd,
bool pi_enable,
unsigned int size)
{
struct iser_fr_desc *desc;
struct ib_device *ib_dev = device->ib_device;
enum ib_mr_type mr_type;
int ret;
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return ERR_PTR(-ENOMEM);
if (ib_dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
mr_type = IB_MR_TYPE_SG_GAPS;
else
mr_type = IB_MR_TYPE_MEM_REG;
desc->rsc.mr = ib_alloc_mr(pd, mr_type, size);
if (IS_ERR(desc->rsc.mr)) {
ret = PTR_ERR(desc->rsc.mr);
iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
goto err_alloc_mr;
}
if (pi_enable) {
desc->rsc.sig_mr = ib_alloc_mr_integrity(pd, size, size);
if (IS_ERR(desc->rsc.sig_mr)) {
ret = PTR_ERR(desc->rsc.sig_mr);
iser_err("Failed to allocate sig_mr err=%d\n", ret);
goto err_alloc_mr_integrity;
}
}
desc->rsc.mr_valid = 0;
return desc;
err_alloc_mr_integrity:
ib_dereg_mr(desc->rsc.mr);
err_alloc_mr:
kfree(desc);
return ERR_PTR(ret);
}
static void iser_destroy_fastreg_desc(struct iser_fr_desc *desc)
{
struct iser_reg_resources *res = &desc->rsc;
ib_dereg_mr(res->mr);
if (res->sig_mr) {
ib_dereg_mr(res->sig_mr);
res->sig_mr = NULL;
}
kfree(desc);
}
/**
* iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
* for fast registration work requests.
* @ib_conn: connection RDMA resources
* @cmds_max: max number of SCSI commands for this connection
* @size: max number of pages per map request
*
* Return: 0 on success, or errno code on failure
*/
int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
unsigned cmds_max,
unsigned int size)
{
struct iser_device *device = ib_conn->device;
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_fr_desc *desc;
int i, ret;
INIT_LIST_HEAD(&fr_pool->list);
INIT_LIST_HEAD(&fr_pool->all_list);
spin_lock_init(&fr_pool->lock);
fr_pool->size = 0;
for (i = 0; i < cmds_max; i++) {
desc = iser_create_fastreg_desc(device, device->pd,
ib_conn->pi_support, size);
if (IS_ERR(desc)) {
ret = PTR_ERR(desc);
goto err;
}
list_add_tail(&desc->list, &fr_pool->list);
list_add_tail(&desc->all_list, &fr_pool->all_list);
fr_pool->size++;
}
return 0;
err:
iser_free_fastreg_pool(ib_conn);
return ret;
}
/**
* iser_free_fastreg_pool - releases the pool of fast_reg descriptors
* @ib_conn: connection RDMA resources
*/
void iser_free_fastreg_pool(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_fr_desc *desc, *tmp;
int i = 0;
if (list_empty(&fr_pool->all_list))
return;
iser_info("freeing conn %p fr pool\n", ib_conn);
list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
list_del(&desc->all_list);
iser_destroy_fastreg_desc(desc);
++i;
}
if (i < fr_pool->size)
iser_warn("pool still has %d regions registered\n",
fr_pool->size - i);
}
/*
* iser_create_ib_conn_res - Queue-Pair (QP)
*
* Return: 0 on success, -1 on failure
*/
static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
{
struct iser_conn *iser_conn = to_iser_conn(ib_conn);
struct iser_device *device;
struct ib_device *ib_dev;
struct ib_qp_init_attr init_attr;
int ret = -ENOMEM;
unsigned int max_send_wr, cq_size;
BUG_ON(ib_conn->device == NULL);
device = ib_conn->device;
ib_dev = device->ib_device;
/* +1 for drain */
if (ib_conn->pi_support)
max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
else
max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
max_send_wr = min_t(unsigned int, max_send_wr,
(unsigned int)ib_dev->attrs.max_qp_wr);
cq_size = max_send_wr + ISER_QP_MAX_RECV_DTOS;
ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ);
if (IS_ERR(ib_conn->cq)) {
ret = PTR_ERR(ib_conn->cq);
goto cq_err;
}
ib_conn->cq_size = cq_size;
memset(&init_attr, 0, sizeof(init_attr));
init_attr.event_handler = iser_qp_event_callback;
init_attr.qp_context = (void *)ib_conn;
init_attr.send_cq = ib_conn->cq;
init_attr.recv_cq = ib_conn->cq;
/* +1 for drain */
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS + 1;
init_attr.cap.max_send_sge = 2;
init_attr.cap.max_recv_sge = 1;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr.qp_type = IB_QPT_RC;
init_attr.cap.max_send_wr = max_send_wr;
if (ib_conn->pi_support)
init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
iser_conn->max_cmds = ISER_GET_MAX_XMIT_CMDS(max_send_wr - 1);
ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
if (ret)
goto out_err;
ib_conn->qp = ib_conn->cma_id->qp;
iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn,
ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr);
return ret;
out_err:
ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
cq_err:
iser_err("unable to alloc mem or create resource, err %d\n", ret);
return ret;
}
/*
* based on the resolved device node GUID see if there already allocated
* device for this device. If there's no such, create one.
*/
static
struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
mutex_lock(&ig.device_list_mutex);
list_for_each_entry(device, &ig.device_list, ig_list)
/* find if there's a match using the node GUID */
if (device->ib_device->node_guid == cma_id->device->node_guid)
goto inc_refcnt;
device = kzalloc(sizeof *device, GFP_KERNEL);
if (!device)
goto out;
/* assign this device to the device */
device->ib_device = cma_id->device;
/* init the device and link it into ig device list */
if (iser_create_device_ib_res(device)) {
kfree(device);
device = NULL;
goto out;
}
list_add(&device->ig_list, &ig.device_list);
inc_refcnt:
device->refcount++;
out:
mutex_unlock(&ig.device_list_mutex);
return device;
}
/* if there's no demand for this device, release it */
static void iser_device_try_release(struct iser_device *device)
{
mutex_lock(&ig.device_list_mutex);
device->refcount--;
iser_info("device %p refcount %d\n", device, device->refcount);
if (!device->refcount) {
iser_free_device_ib_res(device);
list_del(&device->ig_list);
kfree(device);
}
mutex_unlock(&ig.device_list_mutex);
}
void iser_release_work(struct work_struct *work)
{
struct iser_conn *iser_conn;
iser_conn = container_of(work, struct iser_conn, release_work);
/* Wait for conn_stop to complete */
wait_for_completion(&iser_conn->stop_completion);
/* Wait for IB resouces cleanup to complete */
wait_for_completion(&iser_conn->ib_completion);
mutex_lock(&iser_conn->state_mutex);
iser_conn->state = ISER_CONN_DOWN;
mutex_unlock(&iser_conn->state_mutex);
iser_conn_release(iser_conn);
}
/**
* iser_free_ib_conn_res - release IB related resources
* @iser_conn: iser connection struct
* @destroy: indicator if we need to try to release the
* iser device and memory regoins pool (only iscsi
* shutdown and DEVICE_REMOVAL will use this).
*
* This routine is called with the iser state mutex held
* so the cm_id removal is out of here. It is Safe to
* be invoked multiple times.
*/
static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
iser_info("freeing conn %p cma_id %p qp %p\n",
iser_conn, ib_conn->cma_id, ib_conn->qp);
if (ib_conn->qp) {
rdma_destroy_qp(ib_conn->cma_id);
ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
ib_conn->qp = NULL;
}
if (destroy) {
if (iser_conn->rx_descs)
iser_free_rx_descriptors(iser_conn);
if (device) {
iser_device_try_release(device);
ib_conn->device = NULL;
}
}
}
/**
* iser_conn_release - Frees all conn objects and deallocs conn descriptor
* @iser_conn: iSER connection context
*/
void iser_conn_release(struct iser_conn *iser_conn)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
mutex_lock(&ig.connlist_mutex);
list_del(&iser_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
mutex_lock(&iser_conn->state_mutex);
/* In case we endup here without ep_disconnect being invoked. */
if (iser_conn->state != ISER_CONN_DOWN) {
iser_warn("iser conn %p state %d, expected state down.\n",
iser_conn, iser_conn->state);
iscsi_destroy_endpoint(iser_conn->ep);
iser_conn->state = ISER_CONN_DOWN;
}
/*
* In case we never got to bind stage, we still need to
* release IB resources (which is safe to call more than once).
*/
iser_free_ib_conn_res(iser_conn, true);
mutex_unlock(&iser_conn->state_mutex);
if (ib_conn->cma_id) {
rdma_destroy_id(ib_conn->cma_id);
ib_conn->cma_id = NULL;
}
kfree(iser_conn);
}
/**
* iser_conn_terminate - triggers start of the disconnect procedures and
* waits for them to be done
* @iser_conn: iSER connection context
*
* Called with state mutex held
*/
int iser_conn_terminate(struct iser_conn *iser_conn)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
int err = 0;
lockdep_assert_held(&iser_conn->state_mutex);
/* terminate the iser conn only if the conn state is UP */
if (iser_conn->state != ISER_CONN_UP)
return 0;
iser_conn->state = ISER_CONN_TERMINATING;
iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);
/* suspend queuing of new iscsi commands */
if (iser_conn->iscsi_conn)
iscsi_suspend_queue(iser_conn->iscsi_conn);
/*
* In case we didn't already clean up the cma_id (peer initiated
* a disconnection), we need to Cause the CMA to change the QP
* state to ERROR.
*/
if (ib_conn->cma_id) {
err = rdma_disconnect(ib_conn->cma_id);
if (err)
iser_err("Failed to disconnect, conn: 0x%p err %d\n",
iser_conn, err);
/* block until all flush errors are consumed */
ib_drain_qp(ib_conn->qp);
}
return 1;
}
/*
* Called with state mutex held
*/
static void iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *iser_conn = cma_id->context;
lockdep_assert_held(&iser_conn->state_mutex);
iser_conn->state = ISER_CONN_TERMINATING;
}
static void iser_calc_scsi_params(struct iser_conn *iser_conn,
unsigned int max_sectors)
{
struct iser_device *device = iser_conn->ib_conn.device;
struct ib_device_attr *attr = &device->ib_device->attrs;
unsigned short sg_tablesize, sup_sg_tablesize;
unsigned short reserved_mr_pages;
u32 max_num_sg;
/*
* FRs without SG_GAPS can only map up to a (device) page per entry,
* but if the first entry is misaligned we'll end up using two entries
* (head and tail) for a single page worth data, so one additional
* entry is required.
*/
if (attr->kernel_cap_flags & IBK_SG_GAPS_REG)
reserved_mr_pages = 0;
else
reserved_mr_pages = 1;
if (iser_conn->ib_conn.pi_support)
max_num_sg = attr->max_pi_fast_reg_page_list_len;
else
max_num_sg = attr->max_fast_reg_page_list_len;
sg_tablesize = DIV_ROUND_UP(max_sectors * SECTOR_SIZE, SZ_4K);
sup_sg_tablesize = min_t(uint, ISCSI_ISER_MAX_SG_TABLESIZE,
max_num_sg - reserved_mr_pages);
iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
iser_conn->pages_per_mr =
iser_conn->scsi_sg_tablesize + reserved_mr_pages;
}
/*
* Called with state mutex held
*/
static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_conn *iser_conn = cma_id->context;
struct iser_device *device;
struct ib_conn *ib_conn;
int ret;
lockdep_assert_held(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
ib_conn = &iser_conn->ib_conn;
device = iser_device_find_by_ib_device(cma_id);
if (!device) {
iser_err("device lookup/creation failed\n");
iser_connect_error(cma_id);
return;
}
ib_conn->device = device;
/* connection T10-PI support */
if (iser_pi_enable) {
if (!(device->ib_device->attrs.kernel_cap_flags &
IBK_INTEGRITY_HANDOVER)) {
iser_warn("T10-PI requested but not supported on %s, "
"continue without T10-PI\n",
dev_name(&ib_conn->device->ib_device->dev));
ib_conn->pi_support = false;
} else {
ib_conn->pi_support = true;
}
}
iser_calc_scsi_params(iser_conn, iser_max_sectors);
ret = rdma_resolve_route(cma_id, 1000);
if (ret) {
iser_err("resolve route failed: %d\n", ret);
iser_connect_error(cma_id);
return;
}
}
/*
* Called with state mutex held
*/
static void iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
int ret;
struct iser_cm_hdr req_hdr;
struct iser_conn *iser_conn = cma_id->context;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct ib_device *ib_dev = ib_conn->device->ib_device;
lockdep_assert_held(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
ret = iser_create_ib_conn_res(ib_conn);
if (ret)
goto failure;
memset(&conn_param, 0, sizeof conn_param);
conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
conn_param.initiator_depth = 1;
conn_param.retry_count = 7;
conn_param.rnr_retry_count = 6;
memset(&req_hdr, 0, sizeof(req_hdr));
req_hdr.flags = ISER_ZBVA_NOT_SUP;
if (!iser_always_reg)
req_hdr.flags |= ISER_SEND_W_INV_NOT_SUP;
conn_param.private_data = (void *)&req_hdr;
conn_param.private_data_len = sizeof(struct iser_cm_hdr);
ret = rdma_connect_locked(cma_id, &conn_param);
if (ret) {
iser_err("failure connecting: %d\n", ret);
goto failure;
}
return;
failure:
iser_connect_error(cma_id);
}
/*
* Called with state mutex held
*/
static void iser_connected_handler(struct rdma_cm_id *cma_id,
const void *private_data)
{
struct iser_conn *iser_conn = cma_id->context;
struct ib_qp_attr attr;
struct ib_qp_init_attr init_attr;
lockdep_assert_held(&iser_conn->state_mutex);
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
(void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
if (private_data) {
u8 flags = *(u8 *)private_data;
iser_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP);
}
iser_info("conn %p: negotiated %s invalidation\n",
iser_conn, iser_conn->snd_w_inv ? "remote" : "local");
iser_conn->state = ISER_CONN_UP;
complete(&iser_conn->up_completion);
}
/*
* Called with state mutex held
*/
static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
bool destroy)
{
struct iser_conn *iser_conn = cma_id->context;
lockdep_assert_held(&iser_conn->state_mutex);
/*
* We are not guaranteed that we visited disconnected_handler
* by now, call it here to be safe that we handle CM drep
* and flush errors.
*/
if (iser_conn_terminate(iser_conn)) {
if (iser_conn->iscsi_conn)
iscsi_conn_failure(iser_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
else
iser_err("iscsi_iser connection isn't bound\n");
}
iser_free_ib_conn_res(iser_conn, destroy);
complete(&iser_conn->ib_completion);
}
static int iser_cma_handler(struct rdma_cm_id *cma_id,
struct rdma_cm_event *event)
{
struct iser_conn *iser_conn;
int ret = 0;
iser_conn = cma_id->context;
iser_info("%s (%d): status %d conn %p id %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id->context, cma_id);
mutex_lock(&iser_conn->state_mutex);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
iser_addr_handler(cma_id);
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
iser_route_handler(cma_id);
break;
case RDMA_CM_EVENT_ESTABLISHED:
iser_connected_handler(cma_id, event->param.conn.private_data);
break;
case RDMA_CM_EVENT_REJECTED:
iser_info("Connection rejected: %s\n",
rdma_reject_msg(cma_id, event->status));
fallthrough;
case RDMA_CM_EVENT_ADDR_ERROR:
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
iser_connect_error(cma_id);
break;
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
iser_cleanup_handler(cma_id, false);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
/*
* we *must* destroy the device as we cannot rely
* on iscsid to be around to initiate error handling.
* also if we are not in state DOWN implicitly destroy
* the cma_id.
*/
iser_cleanup_handler(cma_id, true);
if (iser_conn->state != ISER_CONN_DOWN) {
iser_conn->ib_conn.cma_id = NULL;
ret = 1;
}
break;
default:
iser_err("Unexpected RDMA CM event: %s (%d)\n",
rdma_event_msg(event->event), event->event);
break;
}
mutex_unlock(&iser_conn->state_mutex);
return ret;
}
void iser_conn_init(struct iser_conn *iser_conn)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
iser_conn->state = ISER_CONN_INIT;
init_completion(&iser_conn->stop_completion);
init_completion(&iser_conn->ib_completion);
init_completion(&iser_conn->up_completion);
INIT_LIST_HEAD(&iser_conn->conn_list);
mutex_init(&iser_conn->state_mutex);
ib_conn->reg_cqe.done = iser_reg_comp;
}
/*
* starts the process of connecting to the target
* sleeps until the connection is established or rejected
*/
int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr,
struct sockaddr *dst_addr, int non_blocking)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
int err = 0;
mutex_lock(&iser_conn->state_mutex);
sprintf(iser_conn->name, "%pISp", dst_addr);
iser_info("connecting to: %s\n", iser_conn->name);
/* the device is known only --after-- address resolution */
ib_conn->device = NULL;
iser_conn->state = ISER_CONN_PENDING;
ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
iser_conn, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ib_conn->cma_id)) {
err = PTR_ERR(ib_conn->cma_id);
iser_err("rdma_create_id failed: %d\n", err);
goto id_failure;
}
err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
if (err) {
iser_err("rdma_resolve_addr failed: %d\n", err);
goto addr_failure;
}
if (!non_blocking) {
wait_for_completion_interruptible(&iser_conn->up_completion);
if (iser_conn->state != ISER_CONN_UP) {
err = -EIO;
goto connect_failure;
}
}
mutex_unlock(&iser_conn->state_mutex);
mutex_lock(&ig.connlist_mutex);
list_add(&iser_conn->conn_list, &ig.connlist);
mutex_unlock(&ig.connlist_mutex);
return 0;
id_failure:
ib_conn->cma_id = NULL;
addr_failure:
iser_conn->state = ISER_CONN_DOWN;
connect_failure:
mutex_unlock(&iser_conn->state_mutex);
iser_conn_release(iser_conn);
return err;
}
int iser_post_recvl(struct iser_conn *iser_conn)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_login_desc *desc = &iser_conn->login_desc;
struct ib_recv_wr wr;
int ret;
desc->sge.addr = desc->rsp_dma;
desc->sge.length = ISER_RX_LOGIN_SIZE;
desc->sge.lkey = ib_conn->device->pd->local_dma_lkey;
desc->cqe.done = iser_login_rsp;
wr.wr_cqe = &desc->cqe;
wr.sg_list = &desc->sge;
wr.num_sge = 1;
wr.next = NULL;
ret = ib_post_recv(ib_conn->qp, &wr, NULL);
if (unlikely(ret))
iser_err("ib_post_recv login failed ret=%d\n", ret);
return ret;
}
int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct ib_recv_wr wr;
int ret;
rx_desc->cqe.done = iser_task_rsp;
wr.wr_cqe = &rx_desc->cqe;
wr.sg_list = &rx_desc->rx_sg;
wr.num_sge = 1;
wr.next = NULL;
ret = ib_post_recv(ib_conn->qp, &wr, NULL);
if (unlikely(ret))
iser_err("ib_post_recv failed ret=%d\n", ret);
return ret;
}
/**
* iser_post_send - Initiate a Send DTO operation
* @ib_conn: connection RDMA resources
* @tx_desc: iSER TX descriptor
*
* Return: 0 on success, -1 on failure
*/
int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
{
struct ib_send_wr *wr = &tx_desc->send_wr;
struct ib_send_wr *first_wr;
int ret;
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN,
DMA_TO_DEVICE);
wr->next = NULL;
wr->wr_cqe = &tx_desc->cqe;
wr->sg_list = tx_desc->tx_sg;
wr->num_sge = tx_desc->num_sge;
wr->opcode = IB_WR_SEND;
wr->send_flags = IB_SEND_SIGNALED;
if (tx_desc->inv_wr.next)
first_wr = &tx_desc->inv_wr;
else if (tx_desc->reg_wr.wr.next)
first_wr = &tx_desc->reg_wr.wr;
else
first_wr = wr;
ret = ib_post_send(ib_conn->qp, first_wr, NULL);
if (unlikely(ret))
iser_err("ib_post_send failed, ret:%d opcode:%d\n",
ret, wr->opcode);
return ret;
}
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector)
{
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
struct iser_fr_desc *desc = reg->desc;
unsigned long sector_size = iser_task->sc->device->sector_size;
struct ib_mr_status mr_status;
int ret;
if (desc && desc->sig_protected) {
desc->sig_protected = false;
ret = ib_check_mr_status(desc->rsc.sig_mr,
IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
iser_err("ib_check_mr_status failed, ret %d\n", ret);
/* Not a lot we can do, return ambiguous guard error */
*sector = 0;
return 0x1;
}
if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
sector_t sector_off = mr_status.sig_err.sig_err_offset;
sector_div(sector_off, sector_size + 8);
*sector = scsi_get_sector(iser_task->sc) + sector_off;
iser_err("PI error found type %d at sector %llx "
"expected %x vs actual %x\n",
mr_status.sig_err.err_type,
(unsigned long long)*sector,
mr_status.sig_err.expected,
mr_status.sig_err.actual);
switch (mr_status.sig_err.err_type) {
case IB_SIG_BAD_GUARD:
return 0x1;
case IB_SIG_BAD_REFTAG:
return 0x3;
case IB_SIG_BAD_APPTAG:
return 0x2;
}
}
}
return 0;
}
void iser_err_comp(struct ib_wc *wc, const char *type)
{
if (wc->status != IB_WC_WR_FLUSH_ERR) {
struct iser_conn *iser_conn = to_iser_conn(wc->qp->qp_context);
iser_err("%s failure: %s (%d) vend_err %#x\n", type,
ib_wc_status_msg(wc->status), wc->status,
wc->vendor_err);
if (iser_conn->iscsi_conn)
iscsi_conn_failure(iser_conn->iscsi_conn,
ISCSI_ERR_CONN_FAILED);
} else {
iser_dbg("%s failure: %s (%d)\n", type,
ib_wc_status_msg(wc->status), wc->status);
}
}
| linux-master | drivers/infiniband/ulp/iser/iser_verbs.c |
/*
* Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
* Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include "iscsi_iser.h"
void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
{
iser_err_comp(wc, "memreg");
}
static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_fr_desc *desc;
unsigned long flags;
spin_lock_irqsave(&fr_pool->lock, flags);
desc = list_first_entry(&fr_pool->list,
struct iser_fr_desc, list);
list_del(&desc->list);
spin_unlock_irqrestore(&fr_pool->lock, flags);
return desc;
}
static void iser_reg_desc_put_fr(struct ib_conn *ib_conn,
struct iser_fr_desc *desc)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
unsigned long flags;
spin_lock_irqsave(&fr_pool->lock, flags);
list_add(&desc->list, &fr_pool->list);
spin_unlock_irqrestore(&fr_pool->lock, flags);
}
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir)
{
struct iser_data_buf *data = &iser_task->data[iser_dir];
struct ib_device *dev;
iser_task->dir[iser_dir] = 1;
dev = iser_task->iser_conn->ib_conn.device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->sg, data->size, dma_dir);
if (unlikely(data->dma_nents == 0)) {
iser_err("dma_map_sg failed!!!\n");
return -EINVAL;
}
if (scsi_prot_sg_count(iser_task->sc)) {
struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
pdata->dma_nents = ib_dma_map_sg(dev, pdata->sg, pdata->size, dma_dir);
if (unlikely(pdata->dma_nents == 0)) {
iser_err("protection dma_map_sg failed!!!\n");
goto out_unmap;
}
}
return 0;
out_unmap:
ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
return -EINVAL;
}
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
enum iser_data_dir iser_dir,
enum dma_data_direction dma_dir)
{
struct iser_data_buf *data = &iser_task->data[iser_dir];
struct ib_device *dev;
dev = iser_task->iser_conn->ib_conn.device->ib_device;
ib_dma_unmap_sg(dev, data->sg, data->size, dma_dir);
if (scsi_prot_sg_count(iser_task->sc)) {
struct iser_data_buf *pdata = &iser_task->prot[iser_dir];
ib_dma_unmap_sg(dev, pdata->sg, pdata->size, dma_dir);
}
}
static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
struct iser_mem_reg *reg)
{
struct scatterlist *sg = mem->sg;
reg->sge.lkey = device->pd->local_dma_lkey;
/*
* FIXME: rework the registration code path to differentiate
* rkey/lkey use cases
*/
if (device->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
reg->rkey = device->pd->unsafe_global_rkey;
else
reg->rkey = 0;
reg->sge.addr = sg_dma_address(&sg[0]);
reg->sge.length = sg_dma_len(&sg[0]);
iser_dbg("Single DMA entry: lkey=0x%x, rkey=0x%x, addr=0x%llx,"
" length=0x%x\n", reg->sge.lkey, reg->rkey,
reg->sge.addr, reg->sge.length);
return 0;
}
void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir)
{
struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
struct iser_fr_desc *desc;
struct ib_mr_status mr_status;
desc = reg->desc;
if (!desc)
return;
/*
* The signature MR cannot be invalidated and reused without checking.
* libiscsi calls the check_protection transport handler only if
* SCSI-Response is received. And the signature MR is not checked if
* the task is completed for some other reason like a timeout or error
* handling. That's why we must check the signature MR here before
* putting it to the free pool.
*/
if (unlikely(desc->sig_protected)) {
desc->sig_protected = false;
ib_check_mr_status(desc->rsc.sig_mr, IB_MR_CHECK_SIG_STATUS,
&mr_status);
}
iser_reg_desc_put_fr(&iser_task->iser_conn->ib_conn, reg->desc);
reg->desc = NULL;
}
static void iser_set_dif_domain(struct scsi_cmnd *sc,
struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
domain->sig.dif.ref_tag = t10_pi_ref_tag(scsi_cmd_to_rq(sc));
/*
* At the moment we hard code those, but in the future
* we will take them from sc.
*/
domain->sig.dif.apptag_check_mask = 0xffff;
domain->sig.dif.app_escape = true;
domain->sig.dif.ref_escape = true;
if (sc->prot_flags & SCSI_PROT_REF_INCREMENT)
domain->sig.dif.ref_remap = true;
}
static int iser_set_sig_attrs(struct scsi_cmnd *sc,
struct ib_sig_attrs *sig_attrs)
{
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_WRITE_INSERT:
case SCSI_PROT_READ_STRIP:
sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
iser_set_dif_domain(sc, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
break;
case SCSI_PROT_READ_INSERT:
case SCSI_PROT_WRITE_STRIP:
sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
iser_set_dif_domain(sc, &sig_attrs->mem);
sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
IB_T10DIF_CSUM : IB_T10DIF_CRC;
break;
case SCSI_PROT_READ_PASS:
case SCSI_PROT_WRITE_PASS:
iser_set_dif_domain(sc, &sig_attrs->wire);
sig_attrs->wire.sig.dif.bg_type = IB_T10DIF_CRC;
iser_set_dif_domain(sc, &sig_attrs->mem);
sig_attrs->mem.sig.dif.bg_type = sc->prot_flags & SCSI_PROT_IP_CHECKSUM ?
IB_T10DIF_CSUM : IB_T10DIF_CRC;
break;
default:
iser_err("Unsupported PI operation %d\n",
scsi_get_prot_op(sc));
return -EINVAL;
}
return 0;
}
static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{
*mask = 0;
if (sc->prot_flags & SCSI_PROT_REF_CHECK)
*mask |= IB_SIG_CHECK_REFTAG;
if (sc->prot_flags & SCSI_PROT_GUARD_CHECK)
*mask |= IB_SIG_CHECK_GUARD;
}
static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr,
struct ib_cqe *cqe, struct ib_send_wr *next_wr)
{
inv_wr->opcode = IB_WR_LOCAL_INV;
inv_wr->wr_cqe = cqe;
inv_wr->ex.invalidate_rkey = mr->rkey;
inv_wr->send_flags = 0;
inv_wr->num_sge = 0;
inv_wr->next = next_wr;
}
static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
struct iser_data_buf *sig_mem,
struct iser_reg_resources *rsc,
struct iser_mem_reg *sig_reg)
{
struct iser_tx_desc *tx_desc = &iser_task->desc;
struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
struct ib_mr *mr = rsc->sig_mr;
struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
struct ib_reg_wr *wr = &tx_desc->reg_wr;
int ret;
memset(sig_attrs, 0, sizeof(*sig_attrs));
ret = iser_set_sig_attrs(iser_task->sc, sig_attrs);
if (ret)
goto err;
iser_set_prot_checks(iser_task->sc, &sig_attrs->check_mask);
if (rsc->mr_valid)
iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
ret = ib_map_mr_sg_pi(mr, mem->sg, mem->dma_nents, NULL,
sig_mem->sg, sig_mem->dma_nents, NULL, SZ_4K);
if (unlikely(ret)) {
iser_err("failed to map PI sg (%d)\n",
mem->dma_nents + sig_mem->dma_nents);
goto err;
}
memset(wr, 0, sizeof(*wr));
wr->wr.next = &tx_desc->send_wr;
wr->wr.opcode = IB_WR_REG_MR_INTEGRITY;
wr->wr.wr_cqe = cqe;
wr->wr.num_sge = 0;
wr->wr.send_flags = 0;
wr->mr = mr;
wr->key = mr->rkey;
wr->access = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE;
rsc->mr_valid = 1;
sig_reg->sge.lkey = mr->lkey;
sig_reg->rkey = mr->rkey;
sig_reg->sge.addr = mr->iova;
sig_reg->sge.length = mr->length;
iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=%u\n",
sig_reg->sge.lkey, sig_reg->rkey, sig_reg->sge.addr,
sig_reg->sge.length);
err:
return ret;
}
static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct iser_data_buf *mem,
struct iser_reg_resources *rsc,
struct iser_mem_reg *reg)
{
struct iser_tx_desc *tx_desc = &iser_task->desc;
struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
struct ib_mr *mr = rsc->mr;
struct ib_reg_wr *wr = &tx_desc->reg_wr;
int n;
if (rsc->mr_valid)
iser_inv_rkey(&tx_desc->inv_wr, mr, cqe, &wr->wr);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SZ_4K);
if (unlikely(n != mem->dma_nents)) {
iser_err("failed to map sg (%d/%d)\n",
n, mem->dma_nents);
return n < 0 ? n : -EINVAL;
}
wr->wr.next = &tx_desc->send_wr;
wr->wr.opcode = IB_WR_REG_MR;
wr->wr.wr_cqe = cqe;
wr->wr.send_flags = 0;
wr->wr.num_sge = 0;
wr->mr = mr;
wr->key = mr->rkey;
wr->access = IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ;
rsc->mr_valid = 1;
reg->sge.lkey = mr->lkey;
reg->rkey = mr->rkey;
reg->sge.addr = mr->iova;
reg->sge.length = mr->length;
iser_dbg("lkey=0x%x rkey=0x%x addr=0x%llx length=0x%x\n",
reg->sge.lkey, reg->rkey, reg->sge.addr, reg->sge.length);
return 0;
}
int iser_reg_mem_fastreg(struct iscsi_iser_task *task,
enum iser_data_dir dir,
bool all_imm)
{
struct ib_conn *ib_conn = &task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
struct iser_data_buf *mem = &task->data[dir];
struct iser_mem_reg *reg = &task->rdma_reg[dir];
struct iser_fr_desc *desc;
bool use_dma_key;
int err;
use_dma_key = mem->dma_nents == 1 && (all_imm || !iser_always_reg) &&
scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL;
if (use_dma_key)
return iser_reg_dma(device, mem, reg);
desc = iser_reg_desc_get_fr(ib_conn);
if (scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL) {
err = iser_fast_reg_mr(task, mem, &desc->rsc, reg);
if (unlikely(err))
goto err_reg;
} else {
err = iser_reg_sig_mr(task, mem, &task->prot[dir],
&desc->rsc, reg);
if (unlikely(err))
goto err_reg;
desc->sig_protected = true;
}
reg->desc = desc;
return 0;
err_reg:
iser_reg_desc_put_fr(ib_conn, desc);
return err;
}
| linux-master | drivers/infiniband/ulp/iser/iser_memory.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*******************************************************************************
* This file contains iSCSI extentions for RDMA (iSER) Verbs
*
* (c) Copyright 2013 Datera, Inc.
*
* Nicholas A. Bellinger <[email protected]>
*
****************************************************************************/
#include <linux/string.h>
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/in6.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_cm.h>
#include <rdma/rdma_cm.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include <target/iscsi/iscsi_transport.h>
#include <linux/semaphore.h>
#include "ib_isert.h"
static int isert_debug_level;
module_param_named(debug_level, isert_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
static int isert_sg_tablesize_set(const char *val,
const struct kernel_param *kp);
static const struct kernel_param_ops sg_tablesize_ops = {
.set = isert_sg_tablesize_set,
.get = param_get_int,
};
static int isert_sg_tablesize = ISCSI_ISER_MIN_SG_TABLESIZE;
module_param_cb(sg_tablesize, &sg_tablesize_ops, &isert_sg_tablesize, 0644);
MODULE_PARM_DESC(sg_tablesize,
"Number of gather/scatter entries in a single scsi command, should >= 128 (default: 128, max: 4096)");
static DEFINE_MUTEX(device_list_mutex);
static LIST_HEAD(device_list);
static struct workqueue_struct *isert_login_wq;
static struct workqueue_struct *isert_comp_wq;
static struct workqueue_struct *isert_release_wq;
static int
isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd);
static int
isert_login_post_recv(struct isert_conn *isert_conn);
static int
isert_rdma_accept(struct isert_conn *isert_conn);
struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
static void isert_release_work(struct work_struct *work);
static void isert_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void isert_send_done(struct ib_cq *cq, struct ib_wc *wc);
static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc);
static int isert_sg_tablesize_set(const char *val, const struct kernel_param *kp)
{
int n = 0, ret;
ret = kstrtoint(val, 10, &n);
if (ret != 0 || n < ISCSI_ISER_MIN_SG_TABLESIZE ||
n > ISCSI_ISER_MAX_SG_TABLESIZE)
return -EINVAL;
return param_set_int(val, kp);
}
static inline bool
isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
{
return (conn->pi_support &&
cmd->prot_op != TARGET_PROT_NORMAL);
}
static void
isert_qp_event_callback(struct ib_event *e, void *context)
{
struct isert_conn *isert_conn = context;
isert_err("%s (%d): conn %p\n",
ib_event_msg(e->event), e->event, isert_conn);
switch (e->event) {
case IB_EVENT_COMM_EST:
rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
break;
case IB_EVENT_QP_LAST_WQE_REACHED:
isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
break;
default:
break;
}
}
static struct ib_qp *
isert_create_qp(struct isert_conn *isert_conn,
struct rdma_cm_id *cma_id)
{
u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2;
struct isert_device *device = isert_conn->device;
struct ib_device *ib_dev = device->ib_device;
struct ib_qp_init_attr attr;
int ret, factor;
isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE);
if (IS_ERR(isert_conn->cq)) {
isert_err("Unable to allocate cq\n");
ret = PTR_ERR(isert_conn->cq);
return ERR_PTR(ret);
}
isert_conn->cq_size = cq_size;
memset(&attr, 0, sizeof(struct ib_qp_init_attr));
attr.event_handler = isert_qp_event_callback;
attr.qp_context = isert_conn;
attr.send_cq = isert_conn->cq;
attr.recv_cq = isert_conn->cq;
attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS + 1;
attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
factor = rdma_rw_mr_factor(device->ib_device, cma_id->port_num,
isert_sg_tablesize);
attr.cap.max_rdma_ctxs = ISCSI_DEF_XMIT_CMDS_MAX * factor;
attr.cap.max_send_sge = device->ib_device->attrs.max_send_sge;
attr.cap.max_recv_sge = 1;
attr.sq_sig_type = IB_SIGNAL_REQ_WR;
attr.qp_type = IB_QPT_RC;
if (device->pi_capable)
attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
ret = rdma_create_qp(cma_id, device->pd, &attr);
if (ret) {
isert_err("rdma_create_qp failed for cma_id %d\n", ret);
ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size);
return ERR_PTR(ret);
}
return cma_id->qp;
}
static int
isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
{
struct isert_device *device = isert_conn->device;
struct ib_device *ib_dev = device->ib_device;
struct iser_rx_desc *rx_desc;
struct ib_sge *rx_sg;
u64 dma_addr;
int i, j;
isert_conn->rx_descs = kcalloc(ISERT_QP_MAX_RECV_DTOS,
sizeof(struct iser_rx_desc),
GFP_KERNEL);
if (!isert_conn->rx_descs)
return -ENOMEM;
rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf,
ISER_RX_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(ib_dev, dma_addr))
goto dma_map_fail;
rx_desc->dma_addr = dma_addr;
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc);
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->pd->local_dma_lkey;
rx_desc->rx_cqe.done = isert_recv_done;
}
return 0;
dma_map_fail:
rx_desc = isert_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->rx_descs);
isert_conn->rx_descs = NULL;
isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
return -ENOMEM;
}
static void
isert_free_rx_descriptors(struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->device->ib_device;
struct iser_rx_desc *rx_desc;
int i;
if (!isert_conn->rx_descs)
return;
rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->rx_descs);
isert_conn->rx_descs = NULL;
}
static int
isert_create_device_ib_res(struct isert_device *device)
{
struct ib_device *ib_dev = device->ib_device;
int ret;
isert_dbg("devattr->max_send_sge: %d devattr->max_recv_sge %d\n",
ib_dev->attrs.max_send_sge, ib_dev->attrs.max_recv_sge);
isert_dbg("devattr->max_sge_rd: %d\n", ib_dev->attrs.max_sge_rd);
device->pd = ib_alloc_pd(ib_dev, 0);
if (IS_ERR(device->pd)) {
ret = PTR_ERR(device->pd);
isert_err("failed to allocate pd, device %p, ret=%d\n",
device, ret);
return ret;
}
/* Check signature cap */
if (ib_dev->attrs.kernel_cap_flags & IBK_INTEGRITY_HANDOVER)
device->pi_capable = true;
else
device->pi_capable = false;
return 0;
}
static void
isert_free_device_ib_res(struct isert_device *device)
{
isert_info("device %p\n", device);
ib_dealloc_pd(device->pd);
}
static void
isert_device_put(struct isert_device *device)
{
mutex_lock(&device_list_mutex);
device->refcount--;
isert_info("device %p refcount %d\n", device, device->refcount);
if (!device->refcount) {
isert_free_device_ib_res(device);
list_del(&device->dev_node);
kfree(device);
}
mutex_unlock(&device_list_mutex);
}
static struct isert_device *
isert_device_get(struct rdma_cm_id *cma_id)
{
struct isert_device *device;
int ret;
mutex_lock(&device_list_mutex);
list_for_each_entry(device, &device_list, dev_node) {
if (device->ib_device->node_guid == cma_id->device->node_guid) {
device->refcount++;
isert_info("Found iser device %p refcount %d\n",
device, device->refcount);
mutex_unlock(&device_list_mutex);
return device;
}
}
device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
if (!device) {
mutex_unlock(&device_list_mutex);
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&device->dev_node);
device->ib_device = cma_id->device;
ret = isert_create_device_ib_res(device);
if (ret) {
kfree(device);
mutex_unlock(&device_list_mutex);
return ERR_PTR(ret);
}
device->refcount++;
list_add_tail(&device->dev_node, &device_list);
isert_info("Created a new iser device %p refcount %d\n",
device, device->refcount);
mutex_unlock(&device_list_mutex);
return device;
}
static void
isert_init_conn(struct isert_conn *isert_conn)
{
isert_conn->state = ISER_CONN_INIT;
INIT_LIST_HEAD(&isert_conn->node);
init_completion(&isert_conn->login_comp);
init_completion(&isert_conn->login_req_comp);
init_waitqueue_head(&isert_conn->rem_wait);
kref_init(&isert_conn->kref);
mutex_init(&isert_conn->mutex);
INIT_WORK(&isert_conn->release_work, isert_release_work);
}
static void
isert_free_login_buf(struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->device->ib_device;
ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
kfree(isert_conn->login_rsp_buf);
ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
kfree(isert_conn->login_desc);
}
static int
isert_alloc_login_buf(struct isert_conn *isert_conn,
struct ib_device *ib_dev)
{
int ret;
isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc),
GFP_KERNEL);
if (!isert_conn->login_desc)
return -ENOMEM;
isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev,
isert_conn->login_desc->buf,
ISER_RX_SIZE, DMA_FROM_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr);
if (ret) {
isert_err("login_desc dma mapping error: %d\n", ret);
isert_conn->login_desc->dma_addr = 0;
goto out_free_login_desc;
}
isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
if (!isert_conn->login_rsp_buf) {
ret = -ENOMEM;
goto out_unmap_login_desc;
}
isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
isert_conn->login_rsp_buf,
ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
if (ret) {
isert_err("login_rsp_dma mapping error: %d\n", ret);
isert_conn->login_rsp_dma = 0;
goto out_free_login_rsp_buf;
}
return 0;
out_free_login_rsp_buf:
kfree(isert_conn->login_rsp_buf);
out_unmap_login_desc:
ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
out_free_login_desc:
kfree(isert_conn->login_desc);
return ret;
}
static void
isert_set_nego_params(struct isert_conn *isert_conn,
struct rdma_conn_param *param)
{
struct ib_device_attr *attr = &isert_conn->device->ib_device->attrs;
/* Set max inflight RDMA READ requests */
isert_conn->initiator_depth = min_t(u8, param->initiator_depth,
attr->max_qp_init_rd_atom);
isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
if (param->private_data) {
u8 flags = *(u8 *)param->private_data;
/*
* use remote invalidation if the both initiator
* and the HCA support it
*/
isert_conn->snd_w_inv = !(flags & ISER_SEND_W_INV_NOT_SUP) &&
(attr->device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
if (isert_conn->snd_w_inv)
isert_info("Using remote invalidation\n");
}
}
static void
isert_destroy_qp(struct isert_conn *isert_conn)
{
ib_destroy_qp(isert_conn->qp);
ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size);
}
static int
isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
struct isert_np *isert_np = cma_id->context;
struct iscsi_np *np = isert_np->np;
struct isert_conn *isert_conn;
struct isert_device *device;
int ret = 0;
spin_lock_bh(&np->np_thread_lock);
if (!np->enabled) {
spin_unlock_bh(&np->np_thread_lock);
isert_dbg("iscsi_np is not enabled, reject connect request\n");
return rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
}
spin_unlock_bh(&np->np_thread_lock);
isert_dbg("cma_id: %p, portal: %p\n",
cma_id, cma_id->context);
isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
if (!isert_conn)
return -ENOMEM;
isert_init_conn(isert_conn);
isert_conn->cm_id = cma_id;
device = isert_device_get(cma_id);
if (IS_ERR(device)) {
ret = PTR_ERR(device);
goto out;
}
isert_conn->device = device;
ret = isert_alloc_login_buf(isert_conn, cma_id->device);
if (ret)
goto out_conn_dev;
isert_set_nego_params(isert_conn, &event->param.conn);
isert_conn->qp = isert_create_qp(isert_conn, cma_id);
if (IS_ERR(isert_conn->qp)) {
ret = PTR_ERR(isert_conn->qp);
goto out_rsp_dma_map;
}
ret = isert_login_post_recv(isert_conn);
if (ret)
goto out_destroy_qp;
ret = isert_rdma_accept(isert_conn);
if (ret)
goto out_destroy_qp;
mutex_lock(&isert_np->mutex);
list_add_tail(&isert_conn->node, &isert_np->accepted);
mutex_unlock(&isert_np->mutex);
return 0;
out_destroy_qp:
isert_destroy_qp(isert_conn);
out_rsp_dma_map:
isert_free_login_buf(isert_conn);
out_conn_dev:
isert_device_put(device);
out:
kfree(isert_conn);
rdma_reject(cma_id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
return ret;
}
static void
isert_connect_release(struct isert_conn *isert_conn)
{
struct isert_device *device = isert_conn->device;
isert_dbg("conn %p\n", isert_conn);
BUG_ON(!device);
isert_free_rx_descriptors(isert_conn);
if (isert_conn->cm_id &&
!isert_conn->dev_removed)
rdma_destroy_id(isert_conn->cm_id);
if (isert_conn->qp)
isert_destroy_qp(isert_conn);
if (isert_conn->login_desc)
isert_free_login_buf(isert_conn);
isert_device_put(device);
if (isert_conn->dev_removed)
wake_up_interruptible(&isert_conn->rem_wait);
else
kfree(isert_conn);
}
static void
isert_connected_handler(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
struct isert_np *isert_np = cma_id->context;
isert_info("conn %p\n", isert_conn);
mutex_lock(&isert_conn->mutex);
isert_conn->state = ISER_CONN_UP;
kref_get(&isert_conn->kref);
mutex_unlock(&isert_conn->mutex);
mutex_lock(&isert_np->mutex);
list_move_tail(&isert_conn->node, &isert_np->pending);
mutex_unlock(&isert_np->mutex);
isert_info("np %p: Allow accept_np to continue\n", isert_np);
up(&isert_np->sem);
}
static void
isert_release_kref(struct kref *kref)
{
struct isert_conn *isert_conn = container_of(kref,
struct isert_conn, kref);
isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
current->pid);
isert_connect_release(isert_conn);
}
static void
isert_put_conn(struct isert_conn *isert_conn)
{
kref_put(&isert_conn->kref, isert_release_kref);
}
static void
isert_handle_unbound_conn(struct isert_conn *isert_conn)
{
struct isert_np *isert_np = isert_conn->cm_id->context;
mutex_lock(&isert_np->mutex);
if (!list_empty(&isert_conn->node)) {
/*
* This means iscsi doesn't know this connection
* so schedule a cleanup ourselves
*/
list_del_init(&isert_conn->node);
isert_put_conn(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);
}
mutex_unlock(&isert_np->mutex);
}
/**
* isert_conn_terminate() - Initiate connection termination
* @isert_conn: isert connection struct
*
* Notes:
* In case the connection state is BOUND, move state
* to TEMINATING and start teardown sequence (rdma_disconnect).
* In case the connection state is UP, complete flush as well.
*
* This routine must be called with mutex held. Thus it is
* safe to call multiple times.
*/
static void
isert_conn_terminate(struct isert_conn *isert_conn)
{
int err;
if (isert_conn->state >= ISER_CONN_TERMINATING)
return;
isert_info("Terminating conn %p state %d\n",
isert_conn, isert_conn->state);
isert_conn->state = ISER_CONN_TERMINATING;
err = rdma_disconnect(isert_conn->cm_id);
if (err)
isert_warn("Failed rdma_disconnect isert_conn %p\n",
isert_conn);
}
static int
isert_np_cma_handler(struct isert_np *isert_np,
enum rdma_cm_event_type event)
{
isert_dbg("%s (%d): isert np %p\n",
rdma_event_msg(event), event, isert_np);
switch (event) {
case RDMA_CM_EVENT_DEVICE_REMOVAL:
isert_np->cm_id = NULL;
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
isert_np->cm_id = isert_setup_id(isert_np);
if (IS_ERR(isert_np->cm_id)) {
isert_err("isert np %p setup id failed: %ld\n",
isert_np, PTR_ERR(isert_np->cm_id));
isert_np->cm_id = NULL;
}
break;
default:
isert_err("isert np %p Unexpected event %d\n",
isert_np, event);
}
return -1;
}
static int
isert_disconnected_handler(struct rdma_cm_id *cma_id,
enum rdma_cm_event_type event)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
mutex_lock(&isert_conn->mutex);
switch (isert_conn->state) {
case ISER_CONN_TERMINATING:
break;
case ISER_CONN_UP:
isert_conn_terminate(isert_conn);
ib_drain_qp(isert_conn->qp);
isert_handle_unbound_conn(isert_conn);
break;
case ISER_CONN_BOUND:
case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
break;
default:
isert_warn("conn %p terminating in state %d\n",
isert_conn, isert_conn->state);
}
mutex_unlock(&isert_conn->mutex);
return 0;
}
static int
isert_connect_error(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
struct isert_np *isert_np = cma_id->context;
ib_drain_qp(isert_conn->qp);
mutex_lock(&isert_np->mutex);
list_del_init(&isert_conn->node);
mutex_unlock(&isert_np->mutex);
isert_conn->cm_id = NULL;
isert_put_conn(isert_conn);
return -1;
}
static int
isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
{
struct isert_np *isert_np = cma_id->context;
struct isert_conn *isert_conn;
int ret = 0;
isert_info("%s (%d): status %d id %p np %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id, cma_id->context);
if (isert_np->cm_id == cma_id)
return isert_np_cma_handler(cma_id->context, event->event);
switch (event->event) {
case RDMA_CM_EVENT_CONNECT_REQUEST:
ret = isert_connect_request(cma_id, event);
if (ret)
isert_err("failed handle connect request %d\n", ret);
break;
case RDMA_CM_EVENT_ESTABLISHED:
isert_connected_handler(cma_id);
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
ret = isert_disconnected_handler(cma_id, event->event);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
isert_conn = cma_id->qp->qp_context;
isert_conn->dev_removed = true;
isert_disconnected_handler(cma_id, event->event);
wait_event_interruptible(isert_conn->rem_wait,
isert_conn->state == ISER_CONN_DOWN);
kfree(isert_conn);
/*
* return non-zero from the callback to destroy
* the rdma cm id
*/
return 1;
case RDMA_CM_EVENT_REJECTED:
isert_info("Connection rejected: %s\n",
rdma_reject_msg(cma_id, event->status));
fallthrough;
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_CONNECT_ERROR:
ret = isert_connect_error(cma_id);
break;
default:
isert_err("Unhandled RDMA CMA event: %d\n", event->event);
break;
}
return ret;
}
static int
isert_post_recvm(struct isert_conn *isert_conn, u32 count)
{
struct ib_recv_wr *rx_wr;
int i, ret;
struct iser_rx_desc *rx_desc;
for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
rx_desc = &isert_conn->rx_descs[i];
rx_wr->wr_cqe = &rx_desc->rx_cqe;
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
rx_desc->in_use = false;
}
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr, NULL);
if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
return ret;
}
static int
isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
{
struct ib_recv_wr rx_wr;
int ret;
if (!rx_desc->in_use) {
/*
* if the descriptor is not in-use we already reposted it
* for recv, so just silently return
*/
return 0;
}
rx_desc->in_use = false;
rx_wr.wr_cqe = &rx_desc->rx_cqe;
rx_wr.sg_list = &rx_desc->rx_sg;
rx_wr.num_sge = 1;
rx_wr.next = NULL;
ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
if (ret)
isert_err("ib_post_recv() failed with ret: %d\n", ret);
return ret;
}
static int
isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
{
struct ib_device *ib_dev = isert_conn->cm_id->device;
struct ib_send_wr send_wr;
int ret;
ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
tx_desc->tx_cqe.done = isert_login_send_done;
send_wr.next = NULL;
send_wr.wr_cqe = &tx_desc->tx_cqe;
send_wr.sg_list = tx_desc->tx_sg;
send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
ret = ib_post_send(isert_conn->qp, &send_wr, NULL);
if (ret)
isert_err("ib_post_send() failed, ret: %d\n", ret);
return ret;
}
static void
__isert_create_send_desc(struct isert_device *device,
struct iser_tx_desc *tx_desc)
{
memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
tx_desc->iser_header.flags = ISCSI_CTRL;
tx_desc->num_sge = 1;
if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
}
}
static void
isert_create_send_desc(struct isert_conn *isert_conn,
struct isert_cmd *isert_cmd,
struct iser_tx_desc *tx_desc)
{
struct isert_device *device = isert_conn->device;
struct ib_device *ib_dev = device->ib_device;
ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
__isert_create_send_desc(device, tx_desc);
}
static int
isert_init_tx_hdrs(struct isert_conn *isert_conn,
struct iser_tx_desc *tx_desc)
{
struct isert_device *device = isert_conn->device;
struct ib_device *ib_dev = device->ib_device;
u64 dma_addr;
dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
if (ib_dma_mapping_error(ib_dev, dma_addr)) {
isert_err("ib_dma_mapping_error() failed\n");
return -ENOMEM;
}
tx_desc->dma_addr = dma_addr;
tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
tx_desc->tx_sg[0].lkey);
return 0;
}
static void
isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
struct ib_send_wr *send_wr)
{
struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
tx_desc->tx_cqe.done = isert_send_done;
send_wr->wr_cqe = &tx_desc->tx_cqe;
if (isert_conn->snd_w_inv && isert_cmd->inv_rkey) {
send_wr->opcode = IB_WR_SEND_WITH_INV;
send_wr->ex.invalidate_rkey = isert_cmd->inv_rkey;
} else {
send_wr->opcode = IB_WR_SEND;
}
send_wr->sg_list = &tx_desc->tx_sg[0];
send_wr->num_sge = isert_cmd->tx_desc.num_sge;
send_wr->send_flags = IB_SEND_SIGNALED;
}
static int
isert_login_post_recv(struct isert_conn *isert_conn)
{
struct ib_recv_wr rx_wr;
struct ib_sge sge;
int ret;
memset(&sge, 0, sizeof(struct ib_sge));
sge.addr = isert_conn->login_desc->dma_addr +
isert_get_hdr_offset(isert_conn->login_desc);
sge.length = ISER_RX_PAYLOAD_SIZE;
sge.lkey = isert_conn->device->pd->local_dma_lkey;
isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
isert_conn->login_desc->rx_cqe.done = isert_login_recv_done;
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
ret = ib_post_recv(isert_conn->qp, &rx_wr, NULL);
if (ret)
isert_err("ib_post_recv() failed: %d\n", ret);
return ret;
}
static int
isert_put_login_tx(struct iscsit_conn *conn, struct iscsi_login *login,
u32 length)
{
struct isert_conn *isert_conn = conn->context;
struct isert_device *device = isert_conn->device;
struct ib_device *ib_dev = device->ib_device;
struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
int ret;
__isert_create_send_desc(device, tx_desc);
memcpy(&tx_desc->iscsi_header, &login->rsp[0],
sizeof(struct iscsi_hdr));
isert_init_tx_hdrs(isert_conn, tx_desc);
if (length > 0) {
struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
length, DMA_TO_DEVICE);
memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
length, DMA_TO_DEVICE);
tx_dsg->addr = isert_conn->login_rsp_dma;
tx_dsg->length = length;
tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey;
tx_desc->num_sge = 2;
}
if (!login->login_failed) {
if (login->login_complete) {
ret = isert_alloc_rx_descriptors(isert_conn);
if (ret)
return ret;
ret = isert_post_recvm(isert_conn,
ISERT_QP_MAX_RECV_DTOS);
if (ret)
return ret;
/* Now we are in FULL_FEATURE phase */
mutex_lock(&isert_conn->mutex);
isert_conn->state = ISER_CONN_FULL_FEATURE;
mutex_unlock(&isert_conn->mutex);
goto post_send;
}
ret = isert_login_post_recv(isert_conn);
if (ret)
return ret;
}
post_send:
ret = isert_login_post_send(isert_conn, tx_desc);
if (ret)
return ret;
return 0;
}
static void
isert_rx_login_req(struct isert_conn *isert_conn)
{
struct iser_rx_desc *rx_desc = isert_conn->login_desc;
int rx_buflen = isert_conn->login_req_len;
struct iscsit_conn *conn = isert_conn->conn;
struct iscsi_login *login = conn->conn_login;
int size;
isert_info("conn %p\n", isert_conn);
WARN_ON_ONCE(!login);
if (login->first_request) {
struct iscsi_login_req *login_req =
(struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc);
/*
* Setup the initial iscsi_login values from the leading
* login request PDU.
*/
login->leading_connection = (!login_req->tsih) ? 1 : 0;
login->current_stage = ISCSI_LOGIN_CURRENT_STAGE(
login_req->flags);
login->version_min = login_req->min_version;
login->version_max = login_req->max_version;
memcpy(login->isid, login_req->isid, 6);
login->cmd_sn = be32_to_cpu(login_req->cmdsn);
login->init_task_tag = login_req->itt;
login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
login->cid = be16_to_cpu(login_req->cid);
login->tsih = be16_to_cpu(login_req->tsih);
}
memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN);
size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
isert_dbg("Using login payload size: %d, rx_buflen: %d "
"MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
MAX_KEY_VALUE_PAIRS);
memcpy(login->req_buf, isert_get_data(rx_desc), size);
if (login->first_request) {
complete(&isert_conn->login_comp);
return;
}
queue_delayed_work(isert_login_wq, &conn->login_work, 0);
}
static struct iscsit_cmd
*isert_allocate_cmd(struct iscsit_conn *conn, struct iser_rx_desc *rx_desc)
{
struct isert_conn *isert_conn = conn->context;
struct isert_cmd *isert_cmd;
struct iscsit_cmd *cmd;
cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
if (!cmd) {
isert_err("Unable to allocate iscsit_cmd + isert_cmd\n");
return NULL;
}
isert_cmd = iscsit_priv_cmd(cmd);
isert_cmd->conn = isert_conn;
isert_cmd->iscsit_cmd = cmd;
isert_cmd->rx_desc = rx_desc;
return cmd;
}
static int
isert_handle_scsi_cmd(struct isert_conn *isert_conn,
struct isert_cmd *isert_cmd, struct iscsit_cmd *cmd,
struct iser_rx_desc *rx_desc, unsigned char *buf)
{
struct iscsit_conn *conn = isert_conn->conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
int imm_data, imm_data_len, unsol_data, sg_nents, rc;
bool dump_payload = false;
unsigned int data_len;
rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
if (rc < 0)
return rc;
imm_data = cmd->immediate_data;
imm_data_len = cmd->first_burst_len;
unsol_data = cmd->unsolicited_data;
data_len = cmd->se_cmd.data_length;
if (imm_data && imm_data_len == data_len)
cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
if (rc < 0) {
return 0;
} else if (rc > 0) {
dump_payload = true;
goto sequence_cmd;
}
if (!imm_data)
return 0;
if (imm_data_len != data_len) {
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
isert_get_data(rx_desc), imm_data_len);
isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
sg_nents, imm_data_len);
} else {
sg_init_table(&isert_cmd->sg, 1);
cmd->se_cmd.t_data_sg = &isert_cmd->sg;
cmd->se_cmd.t_data_nents = 1;
sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc),
imm_data_len);
isert_dbg("Transfer Immediate imm_data_len: %d\n",
imm_data_len);
}
cmd->write_data_done += imm_data_len;
if (cmd->write_data_done == cmd->se_cmd.data_length) {
spin_lock_bh(&cmd->istate_lock);
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
}
sequence_cmd:
rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
if (!rc && !dump_payload && unsol_data)
iscsit_set_unsolicited_dataout(cmd);
else if (dump_payload && imm_data)
target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
static int
isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
struct iser_rx_desc *rx_desc, unsigned char *buf)
{
struct scatterlist *sg_start;
struct iscsit_conn *conn = isert_conn->conn;
struct iscsit_cmd *cmd = NULL;
struct iscsi_data *hdr = (struct iscsi_data *)buf;
u32 unsol_data_len = ntoh24(hdr->dlength);
int rc, sg_nents, sg_off, page_off;
rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
if (rc < 0)
return rc;
else if (!cmd)
return 0;
/*
* FIXME: Unexpected unsolicited_data out
*/
if (!cmd->unsolicited_data) {
isert_err("Received unexpected solicited data payload\n");
dump_stack();
return -1;
}
isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
"write_data_done: %u, data_length: %u\n",
unsol_data_len, cmd->write_data_done,
cmd->se_cmd.data_length);
sg_off = cmd->write_data_done / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off];
sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
page_off = cmd->write_data_done % PAGE_SIZE;
/*
* FIXME: Non page-aligned unsolicited_data out
*/
if (page_off) {
isert_err("unexpected non-page aligned data payload\n");
dump_stack();
return -1;
}
isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
"sg_nents: %u from %p %u\n", sg_start, sg_off,
sg_nents, isert_get_data(rx_desc), unsol_data_len);
sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc),
unsol_data_len);
rc = iscsit_check_dataout_payload(cmd, hdr, false);
if (rc < 0)
return rc;
/*
* multiple data-outs on the same command can arrive -
* so post the buffer before hand
*/
return isert_post_recv(isert_conn, rx_desc);
}
static int
isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
unsigned char *buf)
{
struct iscsit_conn *conn = isert_conn->conn;
struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
int rc;
rc = iscsit_setup_nop_out(conn, cmd, hdr);
if (rc < 0)
return rc;
/*
* FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
*/
return iscsit_process_nop_out(conn, cmd, hdr);
}
static int
isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
struct iscsit_cmd *cmd, struct iser_rx_desc *rx_desc,
struct iscsi_text *hdr)
{
struct iscsit_conn *conn = isert_conn->conn;
u32 payload_length = ntoh24(hdr->dlength);
int rc;
unsigned char *text_in = NULL;
rc = iscsit_setup_text_cmd(conn, cmd, hdr);
if (rc < 0)
return rc;
if (payload_length) {
text_in = kzalloc(payload_length, GFP_KERNEL);
if (!text_in)
return -ENOMEM;
}
cmd->text_in_ptr = text_in;
memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length);
return iscsit_process_text_cmd(conn, cmd, hdr);
}
static int
isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
uint32_t read_stag, uint64_t read_va,
uint32_t write_stag, uint64_t write_va)
{
struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
struct iscsit_conn *conn = isert_conn->conn;
struct iscsit_cmd *cmd;
struct isert_cmd *isert_cmd;
int ret = -EINVAL;
u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
if (conn->sess->sess_ops->SessionType &&
(!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
" ignoring\n", opcode);
return 0;
}
switch (opcode) {
case ISCSI_OP_SCSI_CMD:
cmd = isert_allocate_cmd(conn, rx_desc);
if (!cmd)
break;
isert_cmd = iscsit_priv_cmd(cmd);
isert_cmd->read_stag = read_stag;
isert_cmd->read_va = read_va;
isert_cmd->write_stag = write_stag;
isert_cmd->write_va = write_va;
isert_cmd->inv_rkey = read_stag ? read_stag : write_stag;
ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
rx_desc, (unsigned char *)hdr);
break;
case ISCSI_OP_NOOP_OUT:
cmd = isert_allocate_cmd(conn, rx_desc);
if (!cmd)
break;
isert_cmd = iscsit_priv_cmd(cmd);
ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
rx_desc, (unsigned char *)hdr);
break;
case ISCSI_OP_SCSI_DATA_OUT:
ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
(unsigned char *)hdr);
break;
case ISCSI_OP_SCSI_TMFUNC:
cmd = isert_allocate_cmd(conn, rx_desc);
if (!cmd)
break;
ret = iscsit_handle_task_mgt_cmd(conn, cmd,
(unsigned char *)hdr);
break;
case ISCSI_OP_LOGOUT:
cmd = isert_allocate_cmd(conn, rx_desc);
if (!cmd)
break;
ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
break;
case ISCSI_OP_TEXT:
if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
else
cmd = isert_allocate_cmd(conn, rx_desc);
if (!cmd)
break;
isert_cmd = iscsit_priv_cmd(cmd);
ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
rx_desc, (struct iscsi_text *)hdr);
break;
default:
isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
dump_stack();
break;
}
return ret;
}
static void
isert_print_wc(struct ib_wc *wc, const char *type)
{
if (wc->status != IB_WC_WR_FLUSH_ERR)
isert_err("%s failure: %s (%d) vend_err %x\n", type,
ib_wc_status_msg(wc->status), wc->status,
wc->vendor_err);
else
isert_dbg("%s failure: %s (%d)\n", type,
ib_wc_status_msg(wc->status), wc->status);
}
static void
isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc);
uint64_t read_va = 0, write_va = 0;
uint32_t read_stag = 0, write_stag = 0;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
isert_print_wc(wc, "recv");
if (wc->status != IB_WC_WR_FLUSH_ERR)
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
return;
}
rx_desc->in_use = true;
ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
(int)(wc->byte_len - ISER_HEADERS_LEN));
switch (iser_ctrl->flags & 0xF0) {
case ISCSI_CTRL:
if (iser_ctrl->flags & ISER_RSV) {
read_stag = be32_to_cpu(iser_ctrl->read_stag);
read_va = be64_to_cpu(iser_ctrl->read_va);
isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
read_stag, (unsigned long long)read_va);
}
if (iser_ctrl->flags & ISER_WSV) {
write_stag = be32_to_cpu(iser_ctrl->write_stag);
write_va = be64_to_cpu(iser_ctrl->write_va);
isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
write_stag, (unsigned long long)write_va);
}
isert_dbg("ISER ISCSI_CTRL PDU\n");
break;
case ISER_HELLO:
isert_err("iSER Hello message\n");
break;
default:
isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_ctrl->flags);
break;
}
isert_rx_opcode(isert_conn, rx_desc,
read_stag, read_va, write_stag, write_va);
ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
}
static void
isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->device->ib_device;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
isert_print_wc(wc, "login recv");
return;
}
ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
if (isert_conn->conn) {
struct iscsi_login *login = isert_conn->conn->conn_login;
if (login && !login->first_request)
isert_rx_login_req(isert_conn);
}
mutex_lock(&isert_conn->mutex);
complete(&isert_conn->login_req_comp);
mutex_unlock(&isert_conn->mutex);
ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
}
static void
isert_rdma_rw_ctx_destroy(struct isert_cmd *cmd, struct isert_conn *conn)
{
struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
if (!cmd->rw.nr_ops)
return;
if (isert_prot_cmd(conn, se_cmd)) {
rdma_rw_ctx_destroy_signature(&cmd->rw, conn->qp,
conn->cm_id->port_num, se_cmd->t_data_sg,
se_cmd->t_data_nents, se_cmd->t_prot_sg,
se_cmd->t_prot_nents, dir);
} else {
rdma_rw_ctx_destroy(&cmd->rw, conn->qp, conn->cm_id->port_num,
se_cmd->t_data_sg, se_cmd->t_data_nents, dir);
}
cmd->rw.nr_ops = 0;
}
static void
isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
{
struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
struct isert_conn *isert_conn = isert_cmd->conn;
struct iscsit_conn *conn = isert_conn->conn;
struct iscsi_text_rsp *hdr;
isert_dbg("Cmd %p\n", isert_cmd);
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
if (cmd->data_direction == DMA_TO_DEVICE) {
iscsit_stop_dataout_timer(cmd);
/*
* Check for special case during comp_err where
* WRITE_PENDING has been handed off from core,
* but requires an extra target_put_sess_cmd()
* before transport_generic_free_cmd() below.
*/
if (comp_err &&
cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
struct se_cmd *se_cmd = &cmd->se_cmd;
target_put_sess_cmd(se_cmd);
}
}
isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
case ISCSI_OP_SCSI_TMFUNC:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
case ISCSI_OP_REJECT:
case ISCSI_OP_NOOP_OUT:
case ISCSI_OP_TEXT:
hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
/* If the continue bit is on, keep the command alive */
if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
break;
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
/*
* Handle special case for REJECT when iscsi_add_reject*() has
* overwritten the original iscsi_opcode assignment, and the
* associated cmd->se_cmd needs to be released.
*/
if (cmd->se_cmd.se_tfo != NULL) {
isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
cmd->iscsi_opcode);
transport_generic_free_cmd(&cmd->se_cmd, 0);
break;
}
fallthrough;
default:
iscsit_release_cmd(cmd);
break;
}
}
static void
isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
{
if (tx_desc->dma_addr != 0) {
isert_dbg("unmap single for tx_desc->dma_addr\n");
ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
tx_desc->dma_addr = 0;
}
}
static void
isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
struct ib_device *ib_dev, bool comp_err)
{
if (isert_cmd->pdu_buf_dma != 0) {
isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
isert_cmd->pdu_buf_dma = 0;
}
isert_unmap_tx_desc(tx_desc, ib_dev);
isert_put_cmd(isert_cmd, comp_err);
}
static int
isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
{
struct ib_mr_status mr_status;
int ret;
ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
if (ret) {
isert_err("ib_check_mr_status failed, ret %d\n", ret);
goto fail_mr_status;
}
if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
u64 sec_offset_err;
u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
switch (mr_status.sig_err.err_type) {
case IB_SIG_BAD_GUARD:
se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
break;
case IB_SIG_BAD_REFTAG:
se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
break;
case IB_SIG_BAD_APPTAG:
se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
break;
}
sec_offset_err = mr_status.sig_err.sig_err_offset;
do_div(sec_offset_err, block_size);
se_cmd->sense_info = sec_offset_err + se_cmd->t_task_lba;
isert_err("PI error found type %d at sector 0x%llx "
"expected 0x%x vs actual 0x%x\n",
mr_status.sig_err.err_type,
(unsigned long long)se_cmd->sense_info,
mr_status.sig_err.expected,
mr_status.sig_err.actual);
ret = 1;
}
fail_mr_status:
return ret;
}
static void
isert_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct isert_conn *isert_conn = wc->qp->qp_context;
struct isert_device *device = isert_conn->device;
struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
struct se_cmd *cmd = &isert_cmd->iscsit_cmd->se_cmd;
int ret = 0;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
isert_print_wc(wc, "rdma write");
if (wc->status != IB_WC_WR_FLUSH_ERR)
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
isert_completion_put(desc, isert_cmd, device->ib_device, true);
return;
}
isert_dbg("Cmd %p\n", isert_cmd);
ret = isert_check_pi_status(cmd, isert_cmd->rw.reg->mr);
isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
if (ret) {
/*
* transport_generic_request_failure() expects to have
* plus two references to handle queue-full, so re-add
* one here as target-core will have already dropped
* it after the first isert_put_datain() callback.
*/
kref_get(&cmd->cmd_kref);
transport_generic_request_failure(cmd, cmd->pi_err);
} else {
/*
* XXX: isert_put_response() failure is not retried.
*/
ret = isert_put_response(isert_conn->conn, isert_cmd->iscsit_cmd);
if (ret)
pr_warn_ratelimited("isert_put_response() ret: %d\n", ret);
}
}
static void
isert_rdma_read_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct isert_conn *isert_conn = wc->qp->qp_context;
struct isert_device *device = isert_conn->device;
struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe);
struct isert_cmd *isert_cmd = tx_desc_to_cmd(desc);
struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
struct se_cmd *se_cmd = &cmd->se_cmd;
int ret = 0;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
isert_print_wc(wc, "rdma read");
if (wc->status != IB_WC_WR_FLUSH_ERR)
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
isert_completion_put(desc, isert_cmd, device->ib_device, true);
return;
}
isert_dbg("Cmd %p\n", isert_cmd);
iscsit_stop_dataout_timer(cmd);
if (isert_prot_cmd(isert_conn, se_cmd))
ret = isert_check_pi_status(se_cmd, isert_cmd->rw.reg->mr);
isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
cmd->write_data_done = 0;
isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);
/*
* transport_generic_request_failure() will drop the extra
* se_cmd->cmd_kref reference after T10-PI error, and handle
* any non-zero ->queue_status() callback error retries.
*/
if (ret)
transport_generic_request_failure(se_cmd, se_cmd->pi_err);
else
target_execute_cmd(se_cmd);
}
static void
isert_do_control_comp(struct work_struct *work)
{
struct isert_cmd *isert_cmd = container_of(work,
struct isert_cmd, comp_work);
struct isert_conn *isert_conn = isert_cmd->conn;
struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iscsit_cmd *cmd = isert_cmd->iscsit_cmd;
isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
switch (cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
iscsit_tmr_post_handler(cmd, cmd->conn);
fallthrough;
case ISTATE_SEND_REJECT:
case ISTATE_SEND_TEXTRSP:
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
ib_dev, false);
break;
case ISTATE_SEND_LOGOUTRSP:
iscsit_logout_post_handler(cmd, cmd->conn);
break;
default:
isert_err("Unknown i_state %d\n", cmd->i_state);
dump_stack();
break;
}
}
static void
isert_login_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
isert_print_wc(wc, "login send");
if (wc->status != IB_WC_WR_FLUSH_ERR)
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
}
isert_unmap_tx_desc(tx_desc, ib_dev);
}
static void
isert_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe);
struct isert_cmd *isert_cmd = tx_desc_to_cmd(tx_desc);
if (unlikely(wc->status != IB_WC_SUCCESS)) {
isert_print_wc(wc, "send");
if (wc->status != IB_WC_WR_FLUSH_ERR)
iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
return;
}
isert_dbg("Cmd %p\n", isert_cmd);
switch (isert_cmd->iscsit_cmd->i_state) {
case ISTATE_SEND_TASKMGTRSP:
case ISTATE_SEND_LOGOUTRSP:
case ISTATE_SEND_REJECT:
case ISTATE_SEND_TEXTRSP:
isert_unmap_tx_desc(tx_desc, ib_dev);
INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
queue_work(isert_comp_wq, &isert_cmd->comp_work);
return;
default:
isert_cmd->iscsit_cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
break;
}
}
static int
isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
{
int ret;
ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
if (ret)
return ret;
ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr, NULL);
if (ret) {
isert_err("ib_post_send failed with %d\n", ret);
return ret;
}
return ret;
}
static int
isert_put_response(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
&isert_cmd->tx_desc.iscsi_header;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
iscsit_build_rsp_pdu(cmd, conn, true, hdr);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
/*
* Attach SENSE DATA payload to iSCSI Response PDU
*/
if (cmd->se_cmd.sense_buffer &&
((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
(cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
struct isert_device *device = isert_conn->device;
struct ib_device *ib_dev = device->ib_device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
u32 padding, pdu_len;
put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
cmd->sense_buffer);
cmd->se_cmd.scsi_sense_length += sizeof(__be16);
padding = -(cmd->se_cmd.scsi_sense_length) & 3;
hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
pdu_len = cmd->se_cmd.scsi_sense_length + padding;
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->sense_buffer, pdu_len,
DMA_TO_DEVICE);
if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
return -ENOMEM;
isert_cmd->pdu_buf_len = pdu_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = pdu_len;
tx_dsg->lkey = device->pd->local_dma_lkey;
isert_cmd->tx_desc.num_sge = 2;
}
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
isert_dbg("Posting SCSI Response\n");
return isert_post_response(isert_conn, isert_cmd);
}
static void
isert_aborted_task(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
if (cmd->data_direction == DMA_TO_DEVICE)
iscsit_stop_dataout_timer(cmd);
isert_rdma_rw_ctx_destroy(isert_cmd, isert_conn);
}
static enum target_prot_op
isert_get_sup_prot_ops(struct iscsit_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
struct isert_device *device = isert_conn->device;
if (conn->tpg->tpg_attrib.t10_pi) {
if (device->pi_capable) {
isert_info("conn %p PI offload enabled\n", isert_conn);
isert_conn->pi_support = true;
return TARGET_PROT_ALL;
}
}
isert_info("conn %p PI offload disabled\n", isert_conn);
isert_conn->pi_support = false;
return TARGET_PROT_NORMAL;
}
static int
isert_put_nopin(struct iscsit_cmd *cmd, struct iscsit_conn *conn,
bool nopout_response)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
&isert_cmd->tx_desc.iscsi_header,
nopout_response);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
static int
isert_put_logout_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
isert_dbg("conn %p Posting Logout Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
static int
isert_put_tm_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
static int
isert_put_reject(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct isert_device *device = isert_conn->device;
struct ib_device *ib_dev = device->ib_device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
struct iscsi_reject *hdr =
(struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
iscsit_build_reject(cmd, conn, hdr);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
hton24(hdr->dlength, ISCSI_HDR_LEN);
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
(void *)cmd->buf_ptr, ISCSI_HDR_LEN,
DMA_TO_DEVICE);
if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
return -ENOMEM;
isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = ISCSI_HDR_LEN;
tx_dsg->lkey = device->pd->local_dma_lkey;
isert_cmd->tx_desc.num_sge = 2;
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
isert_dbg("conn %p Posting Reject\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
static int
isert_put_text_rsp(struct iscsit_cmd *cmd, struct iscsit_conn *conn)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
struct iscsi_text_rsp *hdr =
(struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
u32 txt_rsp_len;
int rc;
isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
if (rc < 0)
return rc;
txt_rsp_len = rc;
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
if (txt_rsp_len) {
struct isert_device *device = isert_conn->device;
struct ib_device *ib_dev = device->ib_device;
struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
void *txt_rsp_buf = cmd->buf_ptr;
isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
if (ib_dma_mapping_error(ib_dev, isert_cmd->pdu_buf_dma))
return -ENOMEM;
isert_cmd->pdu_buf_len = txt_rsp_len;
tx_dsg->addr = isert_cmd->pdu_buf_dma;
tx_dsg->length = txt_rsp_len;
tx_dsg->lkey = device->pd->local_dma_lkey;
isert_cmd->tx_desc.num_sge = 2;
}
isert_init_send_wr(isert_conn, isert_cmd, send_wr);
isert_dbg("conn %p Text Response\n", isert_conn);
return isert_post_response(isert_conn, isert_cmd);
}
static inline void
isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.bg_type = IB_T10DIF_CRC;
domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
domain->sig.dif.ref_tag = se_cmd->reftag_seed;
/*
* At the moment we hard code those, but if in the future
* the target core would like to use it, we will take it
* from se_cmd.
*/
domain->sig.dif.apptag_check_mask = 0xffff;
domain->sig.dif.app_escape = true;
domain->sig.dif.ref_escape = true;
if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
domain->sig.dif.ref_remap = true;
}
static int
isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
{
memset(sig_attrs, 0, sizeof(*sig_attrs));
switch (se_cmd->prot_op) {
case TARGET_PROT_DIN_INSERT:
case TARGET_PROT_DOUT_STRIP:
sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
isert_set_dif_domain(se_cmd, &sig_attrs->wire);
break;
case TARGET_PROT_DOUT_INSERT:
case TARGET_PROT_DIN_STRIP:
sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
isert_set_dif_domain(se_cmd, &sig_attrs->mem);
break;
case TARGET_PROT_DIN_PASS:
case TARGET_PROT_DOUT_PASS:
isert_set_dif_domain(se_cmd, &sig_attrs->wire);
isert_set_dif_domain(se_cmd, &sig_attrs->mem);
break;
default:
isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
return -EINVAL;
}
if (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)
sig_attrs->check_mask |= IB_SIG_CHECK_GUARD;
if (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)
sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG;
if (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG)
sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG;
return 0;
}
static int
isert_rdma_rw_ctx_post(struct isert_cmd *cmd, struct isert_conn *conn,
struct ib_cqe *cqe, struct ib_send_wr *chain_wr)
{
struct se_cmd *se_cmd = &cmd->iscsit_cmd->se_cmd;
enum dma_data_direction dir = target_reverse_dma_direction(se_cmd);
u8 port_num = conn->cm_id->port_num;
u64 addr;
u32 rkey, offset;
int ret;
if (cmd->ctx_init_done)
goto rdma_ctx_post;
if (dir == DMA_FROM_DEVICE) {
addr = cmd->write_va;
rkey = cmd->write_stag;
offset = cmd->iscsit_cmd->write_data_done;
} else {
addr = cmd->read_va;
rkey = cmd->read_stag;
offset = 0;
}
if (isert_prot_cmd(conn, se_cmd)) {
struct ib_sig_attrs sig_attrs;
ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
if (ret)
return ret;
WARN_ON_ONCE(offset);
ret = rdma_rw_ctx_signature_init(&cmd->rw, conn->qp, port_num,
se_cmd->t_data_sg, se_cmd->t_data_nents,
se_cmd->t_prot_sg, se_cmd->t_prot_nents,
&sig_attrs, addr, rkey, dir);
} else {
ret = rdma_rw_ctx_init(&cmd->rw, conn->qp, port_num,
se_cmd->t_data_sg, se_cmd->t_data_nents,
offset, addr, rkey, dir);
}
if (ret < 0) {
isert_err("Cmd: %p failed to prepare RDMA res\n", cmd);
return ret;
}
cmd->ctx_init_done = true;
rdma_ctx_post:
ret = rdma_rw_ctx_post(&cmd->rw, conn->qp, port_num, cqe, chain_wr);
if (ret < 0)
isert_err("Cmd: %p failed to post RDMA res\n", cmd);
return ret;
}
static int
isert_put_datain(struct iscsit_conn *conn, struct iscsit_cmd *cmd)
{
struct se_cmd *se_cmd = &cmd->se_cmd;
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
struct isert_conn *isert_conn = conn->context;
struct ib_cqe *cqe = NULL;
struct ib_send_wr *chain_wr = NULL;
int rc;
isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
isert_cmd, se_cmd->data_length);
if (isert_prot_cmd(isert_conn, se_cmd)) {
isert_cmd->tx_desc.tx_cqe.done = isert_rdma_write_done;
cqe = &isert_cmd->tx_desc.tx_cqe;
} else {
/*
* Build isert_conn->tx_desc for iSCSI response PDU and attach
*/
isert_create_send_desc(isert_conn, isert_cmd,
&isert_cmd->tx_desc);
iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
&isert_cmd->tx_desc.iscsi_header);
isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
isert_init_send_wr(isert_conn, isert_cmd,
&isert_cmd->tx_desc.send_wr);
rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
if (rc)
return rc;
chain_wr = &isert_cmd->tx_desc.send_wr;
}
rc = isert_rdma_rw_ctx_post(isert_cmd, isert_conn, cqe, chain_wr);
isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ rc: %d\n",
isert_cmd, rc);
return rc;
}
static int
isert_get_dataout(struct iscsit_conn *conn, struct iscsit_cmd *cmd, bool recovery)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
int ret;
isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
isert_cmd, cmd->se_cmd.data_length, cmd->write_data_done);
isert_cmd->tx_desc.tx_cqe.done = isert_rdma_read_done;
ret = isert_rdma_rw_ctx_post(isert_cmd, conn->context,
&isert_cmd->tx_desc.tx_cqe, NULL);
isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE rc: %d\n",
isert_cmd, ret);
return ret;
}
static int
isert_immediate_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
{
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
int ret = 0;
switch (state) {
case ISTATE_REMOVE:
spin_lock_bh(&conn->cmd_lock);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
isert_put_cmd(isert_cmd, true);
break;
case ISTATE_SEND_NOPIN_WANT_RESPONSE:
ret = isert_put_nopin(cmd, conn, false);
break;
default:
isert_err("Unknown immediate state: 0x%02x\n", state);
ret = -EINVAL;
break;
}
return ret;
}
static int
isert_response_queue(struct iscsit_conn *conn, struct iscsit_cmd *cmd, int state)
{
struct isert_conn *isert_conn = conn->context;
int ret;
switch (state) {
case ISTATE_SEND_LOGOUTRSP:
ret = isert_put_logout_rsp(cmd, conn);
if (!ret)
isert_conn->logout_posted = true;
break;
case ISTATE_SEND_NOPIN:
ret = isert_put_nopin(cmd, conn, true);
break;
case ISTATE_SEND_TASKMGTRSP:
ret = isert_put_tm_rsp(cmd, conn);
break;
case ISTATE_SEND_REJECT:
ret = isert_put_reject(cmd, conn);
break;
case ISTATE_SEND_TEXTRSP:
ret = isert_put_text_rsp(cmd, conn);
break;
case ISTATE_SEND_STATUS:
/*
* Special case for sending non GOOD SCSI status from TX thread
* context during pre se_cmd excecution failure.
*/
ret = isert_put_response(conn, cmd);
break;
default:
isert_err("Unknown response state: 0x%02x\n", state);
ret = -EINVAL;
break;
}
return ret;
}
struct rdma_cm_id *
isert_setup_id(struct isert_np *isert_np)
{
struct iscsi_np *np = isert_np->np;
struct rdma_cm_id *id;
struct sockaddr *sa;
int ret;
sa = (struct sockaddr *)&np->np_sockaddr;
isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
id = rdma_create_id(&init_net, isert_cma_handler, isert_np,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(id)) {
isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
ret = PTR_ERR(id);
goto out;
}
isert_dbg("id %p context %p\n", id, id->context);
/*
* Allow both IPv4 and IPv6 sockets to bind a single port
* at the same time.
*/
ret = rdma_set_afonly(id, 1);
if (ret) {
isert_err("rdma_set_afonly() failed: %d\n", ret);
goto out_id;
}
ret = rdma_bind_addr(id, sa);
if (ret) {
isert_err("rdma_bind_addr() failed: %d\n", ret);
goto out_id;
}
ret = rdma_listen(id, 0);
if (ret) {
isert_err("rdma_listen() failed: %d\n", ret);
goto out_id;
}
return id;
out_id:
rdma_destroy_id(id);
out:
return ERR_PTR(ret);
}
static int
isert_setup_np(struct iscsi_np *np,
struct sockaddr_storage *ksockaddr)
{
struct isert_np *isert_np;
struct rdma_cm_id *isert_lid;
int ret;
isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
if (!isert_np)
return -ENOMEM;
sema_init(&isert_np->sem, 0);
mutex_init(&isert_np->mutex);
INIT_LIST_HEAD(&isert_np->accepted);
INIT_LIST_HEAD(&isert_np->pending);
isert_np->np = np;
/*
* Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code..
*/
memcpy(&np->np_sockaddr, ksockaddr,
sizeof(struct sockaddr_storage));
isert_lid = isert_setup_id(isert_np);
if (IS_ERR(isert_lid)) {
ret = PTR_ERR(isert_lid);
goto out;
}
isert_np->cm_id = isert_lid;
np->np_context = isert_np;
return 0;
out:
kfree(isert_np);
return ret;
}
static int
isert_rdma_accept(struct isert_conn *isert_conn)
{
struct rdma_cm_id *cm_id = isert_conn->cm_id;
struct rdma_conn_param cp;
int ret;
struct iser_cm_hdr rsp_hdr;
memset(&cp, 0, sizeof(struct rdma_conn_param));
cp.initiator_depth = isert_conn->initiator_depth;
cp.retry_count = 7;
cp.rnr_retry_count = 7;
memset(&rsp_hdr, 0, sizeof(rsp_hdr));
rsp_hdr.flags = ISERT_ZBVA_NOT_USED;
if (!isert_conn->snd_w_inv)
rsp_hdr.flags = rsp_hdr.flags | ISERT_SEND_W_INV_NOT_USED;
cp.private_data = (void *)&rsp_hdr;
cp.private_data_len = sizeof(rsp_hdr);
ret = rdma_accept(cm_id, &cp);
if (ret) {
isert_err("rdma_accept() failed with: %d\n", ret);
return ret;
}
return 0;
}
static int
isert_get_login_rx(struct iscsit_conn *conn, struct iscsi_login *login)
{
struct isert_conn *isert_conn = conn->context;
int ret;
isert_info("before login_req comp conn: %p\n", isert_conn);
ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
if (ret) {
isert_err("isert_conn %p interrupted before got login req\n",
isert_conn);
return ret;
}
reinit_completion(&isert_conn->login_req_comp);
/*
* For login requests after the first PDU, isert_rx_login_req() will
* kick queue_delayed_work(isert_login_wq, &conn->login_work) as
* the packet is received, which turns this callback from
* iscsi_target_do_login_rx() into a NOP.
*/
if (!login->first_request)
return 0;
isert_rx_login_req(isert_conn);
isert_info("before login_comp conn: %p\n", conn);
ret = wait_for_completion_interruptible(&isert_conn->login_comp);
if (ret)
return ret;
isert_info("processing login->req: %p\n", login->req);
return 0;
}
static void
isert_set_conn_info(struct iscsi_np *np, struct iscsit_conn *conn,
struct isert_conn *isert_conn)
{
struct rdma_cm_id *cm_id = isert_conn->cm_id;
struct rdma_route *cm_route = &cm_id->route;
conn->login_family = np->np_sockaddr.ss_family;
conn->login_sockaddr = cm_route->addr.dst_addr;
conn->local_sockaddr = cm_route->addr.src_addr;
}
static int
isert_accept_np(struct iscsi_np *np, struct iscsit_conn *conn)
{
struct isert_np *isert_np = np->np_context;
struct isert_conn *isert_conn;
int ret;
accept_wait:
ret = down_interruptible(&isert_np->sem);
if (ret)
return -ENODEV;
spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock);
isert_dbg("np_thread_state %d\n",
np->np_thread_state);
/*
* No point in stalling here when np_thread
* is in state RESET/SHUTDOWN/EXIT - bail
*/
return -ENODEV;
}
spin_unlock_bh(&np->np_thread_lock);
mutex_lock(&isert_np->mutex);
if (list_empty(&isert_np->pending)) {
mutex_unlock(&isert_np->mutex);
goto accept_wait;
}
isert_conn = list_first_entry(&isert_np->pending,
struct isert_conn, node);
list_del_init(&isert_conn->node);
mutex_unlock(&isert_np->mutex);
conn->context = isert_conn;
isert_conn->conn = conn;
isert_conn->state = ISER_CONN_BOUND;
isert_set_conn_info(np, conn, isert_conn);
isert_dbg("Processing isert_conn: %p\n", isert_conn);
return 0;
}
static void
isert_free_np(struct iscsi_np *np)
{
struct isert_np *isert_np = np->np_context;
struct isert_conn *isert_conn, *n;
LIST_HEAD(drop_conn_list);
if (isert_np->cm_id)
rdma_destroy_id(isert_np->cm_id);
/*
* FIXME: At this point we don't have a good way to insure
* that at this point we don't have hanging connections that
* completed RDMA establishment but didn't start iscsi login
* process. So work-around this by cleaning up what ever piled
* up in accepted and pending lists.
*/
mutex_lock(&isert_np->mutex);
if (!list_empty(&isert_np->pending)) {
isert_info("Still have isert pending connections\n");
list_for_each_entry_safe(isert_conn, n,
&isert_np->pending,
node) {
isert_info("cleaning isert_conn %p state (%d)\n",
isert_conn, isert_conn->state);
list_move_tail(&isert_conn->node, &drop_conn_list);
}
}
if (!list_empty(&isert_np->accepted)) {
isert_info("Still have isert accepted connections\n");
list_for_each_entry_safe(isert_conn, n,
&isert_np->accepted,
node) {
isert_info("cleaning isert_conn %p state (%d)\n",
isert_conn, isert_conn->state);
list_move_tail(&isert_conn->node, &drop_conn_list);
}
}
mutex_unlock(&isert_np->mutex);
list_for_each_entry_safe(isert_conn, n, &drop_conn_list, node) {
list_del_init(&isert_conn->node);
isert_connect_release(isert_conn);
}
np->np_context = NULL;
kfree(isert_np);
}
static void isert_release_work(struct work_struct *work)
{
struct isert_conn *isert_conn = container_of(work,
struct isert_conn,
release_work);
isert_info("Starting release conn %p\n", isert_conn);
mutex_lock(&isert_conn->mutex);
isert_conn->state = ISER_CONN_DOWN;
mutex_unlock(&isert_conn->mutex);
isert_info("Destroying conn %p\n", isert_conn);
isert_put_conn(isert_conn);
}
static void
isert_wait4logout(struct isert_conn *isert_conn)
{
struct iscsit_conn *conn = isert_conn->conn;
isert_info("conn %p\n", isert_conn);
if (isert_conn->logout_posted) {
isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
wait_for_completion_timeout(&conn->conn_logout_comp,
SECONDS_FOR_LOGOUT_COMP * HZ);
}
}
static void
isert_wait4cmds(struct iscsit_conn *conn)
{
isert_info("iscsit_conn %p\n", conn);
if (conn->sess) {
target_stop_cmd_counter(conn->cmd_cnt);
target_wait_for_cmds(conn->cmd_cnt);
}
}
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
* @conn: iscsi connection
*
* We might still have commands that are waiting for unsolicited
* dataouts messages. We must put the extra reference on those
* before blocking on the target_wait_for_session_cmds
*/
static void
isert_put_unsol_pending_cmds(struct iscsit_conn *conn)
{
struct iscsit_cmd *cmd, *tmp;
static LIST_HEAD(drop_cmd_list);
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
(cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
(cmd->write_data_done < cmd->se_cmd.data_length))
list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
}
spin_unlock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
list_del_init(&cmd->i_conn_node);
if (cmd->i_state != ISTATE_REMOVE) {
struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
isert_info("conn %p dropping cmd %p\n", conn, cmd);
isert_put_cmd(isert_cmd, true);
}
}
}
static void isert_wait_conn(struct iscsit_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
isert_info("Starting conn %p\n", isert_conn);
mutex_lock(&isert_conn->mutex);
isert_conn_terminate(isert_conn);
mutex_unlock(&isert_conn->mutex);
ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);
}
static void isert_free_conn(struct iscsit_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
ib_drain_qp(isert_conn->qp);
isert_put_conn(isert_conn);
}
static void isert_get_rx_pdu(struct iscsit_conn *conn)
{
struct completion comp;
init_completion(&comp);
wait_for_completion_interruptible(&comp);
}
static struct iscsit_transport iser_target_transport = {
.name = "IB/iSER",
.transport_type = ISCSI_INFINIBAND,
.rdma_shutdown = true,
.priv_size = sizeof(struct isert_cmd),
.owner = THIS_MODULE,
.iscsit_setup_np = isert_setup_np,
.iscsit_accept_np = isert_accept_np,
.iscsit_free_np = isert_free_np,
.iscsit_wait_conn = isert_wait_conn,
.iscsit_free_conn = isert_free_conn,
.iscsit_get_login_rx = isert_get_login_rx,
.iscsit_put_login_tx = isert_put_login_tx,
.iscsit_immediate_queue = isert_immediate_queue,
.iscsit_response_queue = isert_response_queue,
.iscsit_get_dataout = isert_get_dataout,
.iscsit_queue_data_in = isert_put_datain,
.iscsit_queue_status = isert_put_response,
.iscsit_aborted_task = isert_aborted_task,
.iscsit_get_rx_pdu = isert_get_rx_pdu,
.iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
};
static int __init isert_init(void)
{
isert_login_wq = alloc_workqueue("isert_login_wq", 0, 0);
if (!isert_login_wq) {
isert_err("Unable to allocate isert_login_wq\n");
return -ENOMEM;
}
isert_comp_wq = alloc_workqueue("isert_comp_wq",
WQ_UNBOUND | WQ_HIGHPRI, 0);
if (!isert_comp_wq) {
isert_err("Unable to allocate isert_comp_wq\n");
goto destroy_login_wq;
}
isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
WQ_UNBOUND_MAX_ACTIVE);
if (!isert_release_wq) {
isert_err("Unable to allocate isert_release_wq\n");
goto destroy_comp_wq;
}
iscsit_register_transport(&iser_target_transport);
isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
return 0;
destroy_comp_wq:
destroy_workqueue(isert_comp_wq);
destroy_login_wq:
destroy_workqueue(isert_login_wq);
return -ENOMEM;
}
static void __exit isert_exit(void)
{
flush_workqueue(isert_login_wq);
destroy_workqueue(isert_release_wq);
destroy_workqueue(isert_comp_wq);
iscsit_unregister_transport(&iser_target_transport);
isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
destroy_workqueue(isert_login_wq);
}
MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
MODULE_AUTHOR("[email protected]");
MODULE_LICENSE("GPL");
module_init(isert_init);
module_exit(isert_exit);
| linux-master | drivers/infiniband/ulp/isert/ib_isert.c |
/*
* Copyright (c) 2005 Cisco Systems. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/random.h>
#include <linux/jiffies.h>
#include <linux/lockdep.h>
#include <linux/inet.h>
#include <rdma/ib_cache.h>
#include <linux/atomic.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_tcq.h>
#include <scsi/srp.h>
#include <scsi/scsi_transport_srp.h>
#include "ib_srp.h"
#define DRV_NAME "ib_srp"
#define PFX DRV_NAME ": "
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
MODULE_LICENSE("Dual BSD/GPL");
static unsigned int srp_sg_tablesize;
static unsigned int cmd_sg_entries;
static unsigned int indirect_sg_entries;
static bool allow_ext_sg;
static bool register_always = true;
static bool never_register;
static int topspin_workarounds = 1;
module_param(srp_sg_tablesize, uint, 0444);
MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
module_param(cmd_sg_entries, uint, 0444);
MODULE_PARM_DESC(cmd_sg_entries,
"Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
module_param(indirect_sg_entries, uint, 0444);
MODULE_PARM_DESC(indirect_sg_entries,
"Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
module_param(allow_ext_sg, bool, 0444);
MODULE_PARM_DESC(allow_ext_sg,
"Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
module_param(topspin_workarounds, int, 0444);
MODULE_PARM_DESC(topspin_workarounds,
"Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
module_param(register_always, bool, 0444);
MODULE_PARM_DESC(register_always,
"Use memory registration even for contiguous memory regions");
module_param(never_register, bool, 0444);
MODULE_PARM_DESC(never_register, "Never register memory");
static const struct kernel_param_ops srp_tmo_ops;
static int srp_reconnect_delay = 10;
module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
static int srp_fast_io_fail_tmo = 15;
module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(fast_io_fail_tmo,
"Number of seconds between the observation of a transport"
" layer error and failing all I/O. \"off\" means that this"
" functionality is disabled.");
static int srp_dev_loss_tmo = 600;
module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dev_loss_tmo,
"Maximum number of seconds that the SRP transport should"
" insulate transport layer errors. After this time has been"
" exceeded the SCSI host is removed. Should be"
" between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
" if fast_io_fail_tmo has not been set. \"off\" means that"
" this functionality is disabled.");
static bool srp_use_imm_data = true;
module_param_named(use_imm_data, srp_use_imm_data, bool, 0644);
MODULE_PARM_DESC(use_imm_data,
"Whether or not to request permission to use immediate data during SRP login.");
static unsigned int srp_max_imm_data = 8 * 1024;
module_param_named(max_imm_data, srp_max_imm_data, uint, 0644);
MODULE_PARM_DESC(max_imm_data, "Maximum immediate data size.");
static unsigned ch_count;
module_param(ch_count, uint, 0444);
MODULE_PARM_DESC(ch_count,
"Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
static int srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device, void *client_data);
static void srp_rename_dev(struct ib_device *device, void *client_data);
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
const char *opname);
static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
const struct ib_cm_event *event);
static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event);
static struct scsi_transport_template *ib_srp_transport_template;
static struct workqueue_struct *srp_remove_wq;
static struct ib_client srp_client = {
.name = "srp",
.add = srp_add_one,
.remove = srp_remove_one,
.rename = srp_rename_dev
};
static struct ib_sa_client srp_sa_client;
static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
{
int tmo = *(int *)kp->arg;
if (tmo >= 0)
return sysfs_emit(buffer, "%d\n", tmo);
else
return sysfs_emit(buffer, "off\n");
}
static int srp_tmo_set(const char *val, const struct kernel_param *kp)
{
int tmo, res;
res = srp_parse_tmo(&tmo, val);
if (res)
goto out;
if (kp->arg == &srp_reconnect_delay)
res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
srp_dev_loss_tmo);
else if (kp->arg == &srp_fast_io_fail_tmo)
res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
else
res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
tmo);
if (res)
goto out;
*(int *)kp->arg = tmo;
out:
return res;
}
static const struct kernel_param_ops srp_tmo_ops = {
.get = srp_tmo_get,
.set = srp_tmo_set,
};
static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
{
return (struct srp_target_port *) host->hostdata;
}
static const char *srp_target_info(struct Scsi_Host *host)
{
return host_to_target(host)->target_name;
}
static int srp_target_is_topspin(struct srp_target_port *target)
{
static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
return topspin_workarounds &&
(!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
!memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
}
static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
gfp_t gfp_mask,
enum dma_data_direction direction)
{
struct srp_iu *iu;
iu = kmalloc(sizeof *iu, gfp_mask);
if (!iu)
goto out;
iu->buf = kzalloc(size, gfp_mask);
if (!iu->buf)
goto out_free_iu;
iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
direction);
if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
goto out_free_buf;
iu->size = size;
iu->direction = direction;
return iu;
out_free_buf:
kfree(iu->buf);
out_free_iu:
kfree(iu);
out:
return NULL;
}
static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
{
if (!iu)
return;
ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
iu->direction);
kfree(iu->buf);
kfree(iu);
}
static void srp_qp_event(struct ib_event *event, void *context)
{
pr_debug("QP event %s (%d)\n",
ib_event_msg(event->event), event->event);
}
static int srp_init_ib_qp(struct srp_target_port *target,
struct ib_qp *qp)
{
struct ib_qp_attr *attr;
int ret;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr)
return -ENOMEM;
ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
target->srp_host->port,
be16_to_cpu(target->ib_cm.pkey),
&attr->pkey_index);
if (ret)
goto out;
attr->qp_state = IB_QPS_INIT;
attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE);
attr->port_num = target->srp_host->port;
ret = ib_modify_qp(qp, attr,
IB_QP_STATE |
IB_QP_PKEY_INDEX |
IB_QP_ACCESS_FLAGS |
IB_QP_PORT);
out:
kfree(attr);
return ret;
}
static int srp_new_ib_cm_id(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct ib_cm_id *new_cm_id;
new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
srp_ib_cm_handler, ch);
if (IS_ERR(new_cm_id))
return PTR_ERR(new_cm_id);
if (ch->ib_cm.cm_id)
ib_destroy_cm_id(ch->ib_cm.cm_id);
ch->ib_cm.cm_id = new_cm_id;
if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev,
target->srp_host->port))
ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA;
else
ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB;
ch->ib_cm.path.sgid = target->sgid;
ch->ib_cm.path.dgid = target->ib_cm.orig_dgid;
ch->ib_cm.path.pkey = target->ib_cm.pkey;
ch->ib_cm.path.service_id = target->ib_cm.service_id;
return 0;
}
static int srp_new_rdma_cm_id(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct rdma_cm_id *new_cm_id;
int ret;
new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch,
RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(new_cm_id)) {
ret = PTR_ERR(new_cm_id);
new_cm_id = NULL;
goto out;
}
init_completion(&ch->done);
ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ?
&target->rdma_cm.src.sa : NULL,
&target->rdma_cm.dst.sa,
SRP_PATH_REC_TIMEOUT_MS);
if (ret) {
pr_err("No route available from %pISpsc to %pISpsc (%d)\n",
&target->rdma_cm.src, &target->rdma_cm.dst, ret);
goto out;
}
ret = wait_for_completion_interruptible(&ch->done);
if (ret < 0)
goto out;
ret = ch->status;
if (ret) {
pr_err("Resolving address %pISpsc failed (%d)\n",
&target->rdma_cm.dst, ret);
goto out;
}
swap(ch->rdma_cm.cm_id, new_cm_id);
out:
if (new_cm_id)
rdma_destroy_id(new_cm_id);
return ret;
}
static int srp_new_cm_id(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) :
srp_new_ib_cm_id(ch);
}
/**
* srp_destroy_fr_pool() - free the resources owned by a pool
* @pool: Fast registration pool to be destroyed.
*/
static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
{
int i;
struct srp_fr_desc *d;
if (!pool)
return;
for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
if (d->mr)
ib_dereg_mr(d->mr);
}
kfree(pool);
}
/**
* srp_create_fr_pool() - allocate and initialize a pool for fast registration
* @device: IB device to allocate fast registration descriptors for.
* @pd: Protection domain associated with the FR descriptors.
* @pool_size: Number of descriptors to allocate.
* @max_page_list_len: Maximum fast registration work request page list length.
*/
static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
struct ib_pd *pd, int pool_size,
int max_page_list_len)
{
struct srp_fr_pool *pool;
struct srp_fr_desc *d;
struct ib_mr *mr;
int i, ret = -EINVAL;
enum ib_mr_type mr_type;
if (pool_size <= 0)
goto err;
ret = -ENOMEM;
pool = kzalloc(struct_size(pool, desc, pool_size), GFP_KERNEL);
if (!pool)
goto err;
pool->size = pool_size;
pool->max_page_list_len = max_page_list_len;
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->free_list);
if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
mr_type = IB_MR_TYPE_SG_GAPS;
else
mr_type = IB_MR_TYPE_MEM_REG;
for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
if (ret == -ENOMEM)
pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
dev_name(&device->dev));
goto destroy_pool;
}
d->mr = mr;
list_add_tail(&d->entry, &pool->free_list);
}
out:
return pool;
destroy_pool:
srp_destroy_fr_pool(pool);
err:
pool = ERR_PTR(ret);
goto out;
}
/**
* srp_fr_pool_get() - obtain a descriptor suitable for fast registration
* @pool: Pool to obtain descriptor from.
*/
static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
{
struct srp_fr_desc *d = NULL;
unsigned long flags;
spin_lock_irqsave(&pool->lock, flags);
if (!list_empty(&pool->free_list)) {
d = list_first_entry(&pool->free_list, typeof(*d), entry);
list_del(&d->entry);
}
spin_unlock_irqrestore(&pool->lock, flags);
return d;
}
/**
* srp_fr_pool_put() - put an FR descriptor back in the free list
* @pool: Pool the descriptor was allocated from.
* @desc: Pointer to an array of fast registration descriptor pointers.
* @n: Number of descriptors to put back.
*
* Note: The caller must already have queued an invalidation request for
* desc->mr->rkey before calling this function.
*/
static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
int n)
{
unsigned long flags;
int i;
spin_lock_irqsave(&pool->lock, flags);
for (i = 0; i < n; i++)
list_add(&desc[i]->entry, &pool->free_list);
spin_unlock_irqrestore(&pool->lock, flags);
}
static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
{
struct srp_device *dev = target->srp_host->srp_dev;
return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
dev->max_pages_per_mr);
}
/**
* srp_destroy_qp() - destroy an RDMA queue pair
* @ch: SRP RDMA channel.
*
* Drain the qp before destroying it. This avoids that the receive
* completion handler can access the queue pair while it is
* being destroyed.
*/
static void srp_destroy_qp(struct srp_rdma_ch *ch)
{
spin_lock_irq(&ch->lock);
ib_process_cq_direct(ch->send_cq, -1);
spin_unlock_irq(&ch->lock);
ib_drain_qp(ch->qp);
ib_destroy_qp(ch->qp);
}
static int srp_create_ch_ib(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
const struct ib_device_attr *attr = &dev->dev->attrs;
struct ib_qp_init_attr *init_attr;
struct ib_cq *recv_cq, *send_cq;
struct ib_qp *qp;
struct srp_fr_pool *fr_pool = NULL;
const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
int ret;
init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
if (!init_attr)
return -ENOMEM;
/* queue_size + 1 for ib_drain_rq() */
recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
ch->comp_vector, IB_POLL_SOFTIRQ);
if (IS_ERR(recv_cq)) {
ret = PTR_ERR(recv_cq);
goto err;
}
send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
ch->comp_vector, IB_POLL_DIRECT);
if (IS_ERR(send_cq)) {
ret = PTR_ERR(send_cq);
goto err_recv_cq;
}
init_attr->event_handler = srp_qp_event;
init_attr->cap.max_send_wr = m * target->queue_size;
init_attr->cap.max_recv_wr = target->queue_size + 1;
init_attr->cap.max_recv_sge = 1;
init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge);
init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr->qp_type = IB_QPT_RC;
init_attr->send_cq = send_cq;
init_attr->recv_cq = recv_cq;
ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U);
if (target->using_rdma_cm) {
ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr);
qp = ch->rdma_cm.cm_id->qp;
} else {
qp = ib_create_qp(dev->pd, init_attr);
if (!IS_ERR(qp)) {
ret = srp_init_ib_qp(target, qp);
if (ret)
ib_destroy_qp(qp);
} else {
ret = PTR_ERR(qp);
}
}
if (ret) {
pr_err("QP creation failed for dev %s: %d\n",
dev_name(&dev->dev->dev), ret);
goto err_send_cq;
}
if (dev->use_fast_reg) {
fr_pool = srp_alloc_fr_pool(target);
if (IS_ERR(fr_pool)) {
ret = PTR_ERR(fr_pool);
shost_printk(KERN_WARNING, target->scsi_host, PFX
"FR pool allocation failed (%d)\n", ret);
goto err_qp;
}
}
if (ch->qp)
srp_destroy_qp(ch);
if (ch->recv_cq)
ib_free_cq(ch->recv_cq);
if (ch->send_cq)
ib_free_cq(ch->send_cq);
ch->qp = qp;
ch->recv_cq = recv_cq;
ch->send_cq = send_cq;
if (dev->use_fast_reg) {
if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
ch->fr_pool = fr_pool;
}
kfree(init_attr);
return 0;
err_qp:
if (target->using_rdma_cm)
rdma_destroy_qp(ch->rdma_cm.cm_id);
else
ib_destroy_qp(qp);
err_send_cq:
ib_free_cq(send_cq);
err_recv_cq:
ib_free_cq(recv_cq);
err:
kfree(init_attr);
return ret;
}
/*
* Note: this function may be called without srp_alloc_iu_bufs() having been
* invoked. Hence the ch->[rt]x_ring checks.
*/
static void srp_free_ch_ib(struct srp_target_port *target,
struct srp_rdma_ch *ch)
{
struct srp_device *dev = target->srp_host->srp_dev;
int i;
if (!ch->target)
return;
if (target->using_rdma_cm) {
if (ch->rdma_cm.cm_id) {
rdma_destroy_id(ch->rdma_cm.cm_id);
ch->rdma_cm.cm_id = NULL;
}
} else {
if (ch->ib_cm.cm_id) {
ib_destroy_cm_id(ch->ib_cm.cm_id);
ch->ib_cm.cm_id = NULL;
}
}
/* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
if (!ch->qp)
return;
if (dev->use_fast_reg) {
if (ch->fr_pool)
srp_destroy_fr_pool(ch->fr_pool);
}
srp_destroy_qp(ch);
ib_free_cq(ch->send_cq);
ib_free_cq(ch->recv_cq);
/*
* Avoid that the SCSI error handler tries to use this channel after
* it has been freed. The SCSI error handler can namely continue
* trying to perform recovery actions after scsi_remove_host()
* returned.
*/
ch->target = NULL;
ch->qp = NULL;
ch->send_cq = ch->recv_cq = NULL;
if (ch->rx_ring) {
for (i = 0; i < target->queue_size; ++i)
srp_free_iu(target->srp_host, ch->rx_ring[i]);
kfree(ch->rx_ring);
ch->rx_ring = NULL;
}
if (ch->tx_ring) {
for (i = 0; i < target->queue_size; ++i)
srp_free_iu(target->srp_host, ch->tx_ring[i]);
kfree(ch->tx_ring);
ch->tx_ring = NULL;
}
}
static void srp_path_rec_completion(int status,
struct sa_path_rec *pathrec,
unsigned int num_paths, void *ch_ptr)
{
struct srp_rdma_ch *ch = ch_ptr;
struct srp_target_port *target = ch->target;
ch->status = status;
if (status)
shost_printk(KERN_ERR, target->scsi_host,
PFX "Got failed path rec status %d\n", status);
else
ch->ib_cm.path = *pathrec;
complete(&ch->done);
}
static int srp_ib_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
int ret;
ch->ib_cm.path.numb_path = 1;
init_completion(&ch->done);
ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client,
target->srp_host->srp_dev->dev,
target->srp_host->port,
&ch->ib_cm.path,
IB_SA_PATH_REC_SERVICE_ID |
IB_SA_PATH_REC_DGID |
IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_NUMB_PATH |
IB_SA_PATH_REC_PKEY,
SRP_PATH_REC_TIMEOUT_MS,
GFP_KERNEL,
srp_path_rec_completion,
ch, &ch->ib_cm.path_query);
if (ch->ib_cm.path_query_id < 0)
return ch->ib_cm.path_query_id;
ret = wait_for_completion_interruptible(&ch->done);
if (ret < 0)
return ret;
if (ch->status < 0)
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Path record query failed: sgid %pI6, dgid %pI6, pkey %#04x, service_id %#16llx\n",
ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw,
be16_to_cpu(target->ib_cm.pkey),
be64_to_cpu(target->ib_cm.service_id));
return ch->status;
}
static int srp_rdma_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
int ret;
init_completion(&ch->done);
ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS);
if (ret)
return ret;
wait_for_completion_interruptible(&ch->done);
if (ch->status != 0)
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Path resolution failed\n");
return ch->status;
}
static int srp_lookup_path(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
return target->using_rdma_cm ? srp_rdma_lookup_path(ch) :
srp_ib_lookup_path(ch);
}
static u8 srp_get_subnet_timeout(struct srp_host *host)
{
struct ib_port_attr attr;
int ret;
u8 subnet_timeout = 18;
ret = ib_query_port(host->srp_dev->dev, host->port, &attr);
if (ret == 0)
subnet_timeout = attr.subnet_timeout;
if (unlikely(subnet_timeout < 15))
pr_warn("%s: subnet timeout %d may cause SRP login to fail.\n",
dev_name(&host->srp_dev->dev->dev), subnet_timeout);
return subnet_timeout;
}
static int srp_send_req(struct srp_rdma_ch *ch, uint32_t max_iu_len,
bool multich)
{
struct srp_target_port *target = ch->target;
struct {
struct rdma_conn_param rdma_param;
struct srp_login_req_rdma rdma_req;
struct ib_cm_req_param ib_param;
struct srp_login_req ib_req;
} *req = NULL;
char *ipi, *tpi;
int status;
req = kzalloc(sizeof *req, GFP_KERNEL);
if (!req)
return -ENOMEM;
req->ib_param.flow_control = 1;
req->ib_param.retry_count = target->tl_retry_count;
/*
* Pick some arbitrary defaults here; we could make these
* module parameters if anyone cared about setting them.
*/
req->ib_param.responder_resources = 4;
req->ib_param.rnr_retry_count = 7;
req->ib_param.max_cm_retries = 15;
req->ib_req.opcode = SRP_LOGIN_REQ;
req->ib_req.tag = 0;
req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len);
req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
SRP_BUF_FORMAT_INDIRECT);
req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI :
SRP_MULTICHAN_SINGLE);
if (srp_use_imm_data) {
req->ib_req.req_flags |= SRP_IMMED_REQUESTED;
req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET);
}
if (target->using_rdma_cm) {
req->rdma_param.flow_control = req->ib_param.flow_control;
req->rdma_param.responder_resources =
req->ib_param.responder_resources;
req->rdma_param.initiator_depth = req->ib_param.initiator_depth;
req->rdma_param.retry_count = req->ib_param.retry_count;
req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count;
req->rdma_param.private_data = &req->rdma_req;
req->rdma_param.private_data_len = sizeof(req->rdma_req);
req->rdma_req.opcode = req->ib_req.opcode;
req->rdma_req.tag = req->ib_req.tag;
req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len;
req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt;
req->rdma_req.req_flags = req->ib_req.req_flags;
req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset;
ipi = req->rdma_req.initiator_port_id;
tpi = req->rdma_req.target_port_id;
} else {
u8 subnet_timeout;
subnet_timeout = srp_get_subnet_timeout(target->srp_host);
req->ib_param.primary_path = &ch->ib_cm.path;
req->ib_param.alternate_path = NULL;
req->ib_param.service_id = target->ib_cm.service_id;
get_random_bytes(&req->ib_param.starting_psn, 4);
req->ib_param.starting_psn &= 0xffffff;
req->ib_param.qp_num = ch->qp->qp_num;
req->ib_param.qp_type = ch->qp->qp_type;
req->ib_param.local_cm_response_timeout = subnet_timeout + 2;
req->ib_param.remote_cm_response_timeout = subnet_timeout + 2;
req->ib_param.private_data = &req->ib_req;
req->ib_param.private_data_len = sizeof(req->ib_req);
ipi = req->ib_req.initiator_port_id;
tpi = req->ib_req.target_port_id;
}
/*
* In the published SRP specification (draft rev. 16a), the
* port identifier format is 8 bytes of ID extension followed
* by 8 bytes of GUID. Older drafts put the two halves in the
* opposite order, so that the GUID comes first.
*
* Targets conforming to these obsolete drafts can be
* recognized by the I/O Class they report.
*/
if (target->io_class == SRP_REV10_IB_IO_CLASS) {
memcpy(ipi, &target->sgid.global.interface_id, 8);
memcpy(ipi + 8, &target->initiator_ext, 8);
memcpy(tpi, &target->ioc_guid, 8);
memcpy(tpi + 8, &target->id_ext, 8);
} else {
memcpy(ipi, &target->initiator_ext, 8);
memcpy(ipi + 8, &target->sgid.global.interface_id, 8);
memcpy(tpi, &target->id_ext, 8);
memcpy(tpi + 8, &target->ioc_guid, 8);
}
/*
* Topspin/Cisco SRP targets will reject our login unless we
* zero out the first 8 bytes of our initiator port ID and set
* the second 8 bytes to the local node GUID.
*/
if (srp_target_is_topspin(target)) {
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Topspin/Cisco initiator port ID workaround "
"activated for target GUID %016llx\n",
be64_to_cpu(target->ioc_guid));
memset(ipi, 0, 8);
memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8);
}
if (target->using_rdma_cm)
status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param);
else
status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param);
kfree(req);
return status;
}
static bool srp_queue_remove_work(struct srp_target_port *target)
{
bool changed = false;
spin_lock_irq(&target->lock);
if (target->state != SRP_TARGET_REMOVED) {
target->state = SRP_TARGET_REMOVED;
changed = true;
}
spin_unlock_irq(&target->lock);
if (changed)
queue_work(srp_remove_wq, &target->remove_work);
return changed;
}
static void srp_disconnect_target(struct srp_target_port *target)
{
struct srp_rdma_ch *ch;
int i, ret;
/* XXX should send SRP_I_LOGOUT request */
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
ch->connected = false;
ret = 0;
if (target->using_rdma_cm) {
if (ch->rdma_cm.cm_id)
rdma_disconnect(ch->rdma_cm.cm_id);
} else {
if (ch->ib_cm.cm_id)
ret = ib_send_cm_dreq(ch->ib_cm.cm_id,
NULL, 0);
}
if (ret < 0) {
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Sending CM DREQ failed\n");
}
}
}
static int srp_exit_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
struct srp_target_port *target = host_to_target(shost);
struct srp_device *dev = target->srp_host->srp_dev;
struct ib_device *ibdev = dev->dev;
struct srp_request *req = scsi_cmd_priv(cmd);
kfree(req->fr_list);
if (req->indirect_dma_addr) {
ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
target->indirect_size,
DMA_TO_DEVICE);
}
kfree(req->indirect_desc);
return 0;
}
static int srp_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
{
struct srp_target_port *target = host_to_target(shost);
struct srp_device *srp_dev = target->srp_host->srp_dev;
struct ib_device *ibdev = srp_dev->dev;
struct srp_request *req = scsi_cmd_priv(cmd);
dma_addr_t dma_addr;
int ret = -ENOMEM;
if (srp_dev->use_fast_reg) {
req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *),
GFP_KERNEL);
if (!req->fr_list)
goto out;
}
req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
if (!req->indirect_desc)
goto out;
dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
target->indirect_size,
DMA_TO_DEVICE);
if (ib_dma_mapping_error(ibdev, dma_addr)) {
srp_exit_cmd_priv(shost, cmd);
goto out;
}
req->indirect_dma_addr = dma_addr;
ret = 0;
out:
return ret;
}
/**
* srp_del_scsi_host_attr() - Remove attributes defined in the host template.
* @shost: SCSI host whose attributes to remove from sysfs.
*
* Note: Any attributes defined in the host template and that did not exist
* before invocation of this function will be ignored.
*/
static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
{
const struct attribute_group **g;
struct attribute **attr;
for (g = shost->hostt->shost_groups; *g; ++g) {
for (attr = (*g)->attrs; *attr; ++attr) {
struct device_attribute *dev_attr =
container_of(*attr, typeof(*dev_attr), attr);
device_remove_file(&shost->shost_dev, dev_attr);
}
}
}
static void srp_remove_target(struct srp_target_port *target)
{
struct srp_rdma_ch *ch;
int i;
WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
srp_del_scsi_host_attr(target->scsi_host);
srp_rport_get(target->rport);
srp_remove_host(target->scsi_host);
scsi_remove_host(target->scsi_host);
srp_stop_rport_timers(target->rport);
srp_disconnect_target(target);
kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
srp_free_ch_ib(target, ch);
}
cancel_work_sync(&target->tl_err_work);
srp_rport_put(target->rport);
kfree(target->ch);
target->ch = NULL;
spin_lock(&target->srp_host->target_lock);
list_del(&target->list);
spin_unlock(&target->srp_host->target_lock);
scsi_host_put(target->scsi_host);
}
static void srp_remove_work(struct work_struct *work)
{
struct srp_target_port *target =
container_of(work, struct srp_target_port, remove_work);
WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
srp_remove_target(target);
}
static void srp_rport_delete(struct srp_rport *rport)
{
struct srp_target_port *target = rport->lld_data;
srp_queue_remove_work(target);
}
/**
* srp_connected_ch() - number of connected channels
* @target: SRP target port.
*/
static int srp_connected_ch(struct srp_target_port *target)
{
int i, c = 0;
for (i = 0; i < target->ch_count; i++)
c += target->ch[i].connected;
return c;
}
static int srp_connect_ch(struct srp_rdma_ch *ch, uint32_t max_iu_len,
bool multich)
{
struct srp_target_port *target = ch->target;
int ret;
WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
ret = srp_lookup_path(ch);
if (ret)
goto out;
while (1) {
init_completion(&ch->done);
ret = srp_send_req(ch, max_iu_len, multich);
if (ret)
goto out;
ret = wait_for_completion_interruptible(&ch->done);
if (ret < 0)
goto out;
/*
* The CM event handling code will set status to
* SRP_PORT_REDIRECT if we get a port redirect REJ
* back, or SRP_DLID_REDIRECT if we get a lid/qp
* redirect REJ back.
*/
ret = ch->status;
switch (ret) {
case 0:
ch->connected = true;
goto out;
case SRP_PORT_REDIRECT:
ret = srp_lookup_path(ch);
if (ret)
goto out;
break;
case SRP_DLID_REDIRECT:
break;
case SRP_STALE_CONN:
shost_printk(KERN_ERR, target->scsi_host, PFX
"giving up on stale connection\n");
ret = -ECONNRESET;
goto out;
default:
goto out;
}
}
out:
return ret <= 0 ? ret : -ENODEV;
}
static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
{
srp_handle_qp_err(cq, wc, "INV RKEY");
}
static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
u32 rkey)
{
struct ib_send_wr wr = {
.opcode = IB_WR_LOCAL_INV,
.next = NULL,
.num_sge = 0,
.send_flags = 0,
.ex.invalidate_rkey = rkey,
};
wr.wr_cqe = &req->reg_cqe;
req->reg_cqe.done = srp_inv_rkey_err_done;
return ib_post_send(ch->qp, &wr, NULL);
}
static void srp_unmap_data(struct scsi_cmnd *scmnd,
struct srp_rdma_ch *ch,
struct srp_request *req)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
struct ib_device *ibdev = dev->dev;
int i, res;
if (!scsi_sglist(scmnd) ||
(scmnd->sc_data_direction != DMA_TO_DEVICE &&
scmnd->sc_data_direction != DMA_FROM_DEVICE))
return;
if (dev->use_fast_reg) {
struct srp_fr_desc **pfr;
for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
if (res < 0) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"Queueing INV WR for rkey %#x failed (%d)\n",
(*pfr)->mr->rkey, res);
queue_work(system_long_wq,
&target->tl_err_work);
}
}
if (req->nmdesc)
srp_fr_pool_put(ch->fr_pool, req->fr_list,
req->nmdesc);
}
ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
scmnd->sc_data_direction);
}
/**
* srp_claim_req - Take ownership of the scmnd associated with a request.
* @ch: SRP RDMA channel.
* @req: SRP request.
* @sdev: If not NULL, only take ownership for this SCSI device.
* @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
* ownership of @req->scmnd if it equals @scmnd.
*
* Return value:
* Either NULL or a pointer to the SCSI command the caller became owner of.
*/
static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
struct srp_request *req,
struct scsi_device *sdev,
struct scsi_cmnd *scmnd)
{
unsigned long flags;
spin_lock_irqsave(&ch->lock, flags);
if (req->scmnd &&
(!sdev || req->scmnd->device == sdev) &&
(!scmnd || req->scmnd == scmnd)) {
scmnd = req->scmnd;
req->scmnd = NULL;
} else {
scmnd = NULL;
}
spin_unlock_irqrestore(&ch->lock, flags);
return scmnd;
}
/**
* srp_free_req() - Unmap data and adjust ch->req_lim.
* @ch: SRP RDMA channel.
* @req: Request to be freed.
* @scmnd: SCSI command associated with @req.
* @req_lim_delta: Amount to be added to @target->req_lim.
*/
static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
struct scsi_cmnd *scmnd, s32 req_lim_delta)
{
unsigned long flags;
srp_unmap_data(scmnd, ch, req);
spin_lock_irqsave(&ch->lock, flags);
ch->req_lim += req_lim_delta;
spin_unlock_irqrestore(&ch->lock, flags);
}
static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
struct scsi_device *sdev, int result)
{
struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
if (scmnd) {
srp_free_req(ch, req, scmnd, 0);
scmnd->result = result;
scsi_done(scmnd);
}
}
struct srp_terminate_context {
struct srp_target_port *srp_target;
int scsi_result;
};
static bool srp_terminate_cmd(struct scsi_cmnd *scmnd, void *context_ptr)
{
struct srp_terminate_context *context = context_ptr;
struct srp_target_port *target = context->srp_target;
u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
struct srp_request *req = scsi_cmd_priv(scmnd);
srp_finish_req(ch, req, NULL, context->scsi_result);
return true;
}
static void srp_terminate_io(struct srp_rport *rport)
{
struct srp_target_port *target = rport->lld_data;
struct srp_terminate_context context = { .srp_target = target,
.scsi_result = DID_TRANSPORT_FAILFAST << 16 };
scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context);
}
/* Calculate maximum initiator to target information unit length. */
static uint32_t srp_max_it_iu_len(int cmd_sg_cnt, bool use_imm_data,
uint32_t max_it_iu_size)
{
uint32_t max_iu_len = sizeof(struct srp_cmd) + SRP_MAX_ADD_CDB_LEN +
sizeof(struct srp_indirect_buf) +
cmd_sg_cnt * sizeof(struct srp_direct_buf);
if (use_imm_data)
max_iu_len = max(max_iu_len, SRP_IMM_DATA_OFFSET +
srp_max_imm_data);
if (max_it_iu_size)
max_iu_len = min(max_iu_len, max_it_iu_size);
pr_debug("max_iu_len = %d\n", max_iu_len);
return max_iu_len;
}
/*
* It is up to the caller to ensure that srp_rport_reconnect() calls are
* serialized and that no concurrent srp_queuecommand(), srp_abort(),
* srp_reset_device() or srp_reset_host() calls will occur while this function
* is in progress. One way to realize that is not to call this function
* directly but to call srp_reconnect_rport() instead since that last function
* serializes calls of this function via rport->mutex and also blocks
* srp_queuecommand() calls before invoking this function.
*/
static int srp_rport_reconnect(struct srp_rport *rport)
{
struct srp_target_port *target = rport->lld_data;
struct srp_rdma_ch *ch;
uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
srp_use_imm_data,
target->max_it_iu_size);
int i, j, ret = 0;
bool multich = false;
srp_disconnect_target(target);
if (target->state == SRP_TARGET_SCANNING)
return -ENODEV;
/*
* Now get a new local CM ID so that we avoid confusing the target in
* case things are really fouled up. Doing so also ensures that all CM
* callbacks will have finished before a new QP is allocated.
*/
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
ret += srp_new_cm_id(ch);
}
{
struct srp_terminate_context context = {
.srp_target = target, .scsi_result = DID_RESET << 16};
scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd,
&context);
}
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
/*
* Whether or not creating a new CM ID succeeded, create a new
* QP. This guarantees that all completion callback function
* invocations have finished before request resetting starts.
*/
ret += srp_create_ch_ib(ch);
INIT_LIST_HEAD(&ch->free_tx);
for (j = 0; j < target->queue_size; ++j)
list_add(&ch->tx_ring[j]->list, &ch->free_tx);
}
target->qp_in_error = false;
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
if (ret)
break;
ret = srp_connect_ch(ch, max_iu_len, multich);
multich = true;
}
if (ret == 0)
shost_printk(KERN_INFO, target->scsi_host,
PFX "reconnect succeeded\n");
return ret;
}
static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
unsigned int dma_len, u32 rkey)
{
struct srp_direct_buf *desc = state->desc;
WARN_ON_ONCE(!dma_len);
desc->va = cpu_to_be64(dma_addr);
desc->key = cpu_to_be32(rkey);
desc->len = cpu_to_be32(dma_len);
state->total_len += dma_len;
state->desc++;
state->ndesc++;
}
static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
{
srp_handle_qp_err(cq, wc, "FAST REG");
}
/*
* Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
* where to start in the first element. If sg_offset_p != NULL then
* *sg_offset_p is updated to the offset in state->sg[retval] of the first
* byte that has not yet been mapped.
*/
static int srp_map_finish_fr(struct srp_map_state *state,
struct srp_request *req,
struct srp_rdma_ch *ch, int sg_nents,
unsigned int *sg_offset_p)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
struct ib_reg_wr wr;
struct srp_fr_desc *desc;
u32 rkey;
int n, err;
if (state->fr.next >= state->fr.end) {
shost_printk(KERN_ERR, ch->target->scsi_host,
PFX "Out of MRs (mr_per_cmd = %d)\n",
ch->target->mr_per_cmd);
return -ENOMEM;
}
WARN_ON_ONCE(!dev->use_fast_reg);
if (sg_nents == 1 && target->global_rkey) {
unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
sg_dma_len(state->sg) - sg_offset,
target->global_rkey);
if (sg_offset_p)
*sg_offset_p = 0;
return 1;
}
desc = srp_fr_pool_get(ch->fr_pool);
if (!desc)
return -ENOMEM;
rkey = ib_inc_rkey(desc->mr->rkey);
ib_update_fast_reg_key(desc->mr, rkey);
n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
dev->mr_page_size);
if (unlikely(n < 0)) {
srp_fr_pool_put(ch->fr_pool, &desc, 1);
pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
sg_offset_p ? *sg_offset_p : -1, n);
return n;
}
WARN_ON_ONCE(desc->mr->length == 0);
req->reg_cqe.done = srp_reg_mr_err_done;
wr.wr.next = NULL;
wr.wr.opcode = IB_WR_REG_MR;
wr.wr.wr_cqe = &req->reg_cqe;
wr.wr.num_sge = 0;
wr.wr.send_flags = 0;
wr.mr = desc->mr;
wr.key = desc->mr->rkey;
wr.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE);
*state->fr.next++ = desc;
state->nmdesc++;
srp_map_desc(state, desc->mr->iova,
desc->mr->length, desc->mr->rkey);
err = ib_post_send(ch->qp, &wr.wr, NULL);
if (unlikely(err)) {
WARN_ON_ONCE(err == -ENOMEM);
return err;
}
return n;
}
static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct srp_request *req, struct scatterlist *scat,
int count)
{
unsigned int sg_offset = 0;
state->fr.next = req->fr_list;
state->fr.end = req->fr_list + ch->target->mr_per_cmd;
state->sg = scat;
if (count == 0)
return 0;
while (count) {
int i, n;
n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
if (unlikely(n < 0))
return n;
count -= n;
for (i = 0; i < n; i++)
state->sg = sg_next(state->sg);
}
return 0;
}
static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
struct srp_request *req, struct scatterlist *scat,
int count)
{
struct srp_target_port *target = ch->target;
struct scatterlist *sg;
int i;
for_each_sg(scat, sg, count, i) {
srp_map_desc(state, sg_dma_address(sg), sg_dma_len(sg),
target->global_rkey);
}
return 0;
}
/*
* Register the indirect data buffer descriptor with the HCA.
*
* Note: since the indirect data buffer descriptor has been allocated with
* kmalloc() it is guaranteed that this buffer is a physically contiguous
* memory buffer.
*/
static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
void **next_mr, void **end_mr, u32 idb_len,
__be32 *idb_rkey)
{
struct srp_target_port *target = ch->target;
struct srp_device *dev = target->srp_host->srp_dev;
struct srp_map_state state;
struct srp_direct_buf idb_desc;
struct scatterlist idb_sg[1];
int ret;
memset(&state, 0, sizeof(state));
memset(&idb_desc, 0, sizeof(idb_desc));
state.gen.next = next_mr;
state.gen.end = end_mr;
state.desc = &idb_desc;
state.base_dma_addr = req->indirect_dma_addr;
state.dma_len = idb_len;
if (dev->use_fast_reg) {
state.sg = idb_sg;
sg_init_one(idb_sg, req->indirect_desc, idb_len);
idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
#ifdef CONFIG_NEED_SG_DMA_LENGTH
idb_sg->dma_length = idb_sg->length; /* hack^2 */
#endif
ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
if (ret < 0)
return ret;
WARN_ON_ONCE(ret < 1);
} else {
return -EINVAL;
}
*idb_rkey = idb_desc.key;
return 0;
}
static void srp_check_mapping(struct srp_map_state *state,
struct srp_rdma_ch *ch, struct srp_request *req,
struct scatterlist *scat, int count)
{
struct srp_device *dev = ch->target->srp_host->srp_dev;
struct srp_fr_desc **pfr;
u64 desc_len = 0, mr_len = 0;
int i;
for (i = 0; i < state->ndesc; i++)
desc_len += be32_to_cpu(req->indirect_desc[i].len);
if (dev->use_fast_reg)
for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
mr_len += (*pfr)->mr->length;
if (desc_len != scsi_bufflen(req->scmnd) ||
mr_len > scsi_bufflen(req->scmnd))
pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
scsi_bufflen(req->scmnd), desc_len, mr_len,
state->ndesc, state->nmdesc);
}
/**
* srp_map_data() - map SCSI data buffer onto an SRP request
* @scmnd: SCSI command to map
* @ch: SRP RDMA channel
* @req: SRP request
*
* Returns the length in bytes of the SRP_CMD IU or a negative value if
* mapping failed. The size of any immediate data is not included in the
* return value.
*/
static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
struct srp_request *req)
{
struct srp_target_port *target = ch->target;
struct scatterlist *scat, *sg;
struct srp_cmd *cmd = req->cmd->buf;
int i, len, nents, count, ret;
struct srp_device *dev;
struct ib_device *ibdev;
struct srp_map_state state;
struct srp_indirect_buf *indirect_hdr;
u64 data_len;
u32 idb_len, table_len;
__be32 idb_rkey;
u8 fmt;
req->cmd->num_sge = 1;
if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
return sizeof(struct srp_cmd) + cmd->add_cdb_len;
if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
scmnd->sc_data_direction != DMA_TO_DEVICE) {
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled data direction %d\n",
scmnd->sc_data_direction);
return -EINVAL;
}
nents = scsi_sg_count(scmnd);
scat = scsi_sglist(scmnd);
data_len = scsi_bufflen(scmnd);
dev = target->srp_host->srp_dev;
ibdev = dev->dev;
count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
if (unlikely(count == 0))
return -EIO;
if (ch->use_imm_data &&
count <= ch->max_imm_sge &&
SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len &&
scmnd->sc_data_direction == DMA_TO_DEVICE) {
struct srp_imm_buf *buf;
struct ib_sge *sge = &req->cmd->sge[1];
fmt = SRP_DATA_DESC_IMM;
len = SRP_IMM_DATA_OFFSET;
req->nmdesc = 0;
buf = (void *)cmd->add_data + cmd->add_cdb_len;
buf->len = cpu_to_be32(data_len);
WARN_ON_ONCE((void *)(buf + 1) > (void *)cmd + len);
for_each_sg(scat, sg, count, i) {
sge[i].addr = sg_dma_address(sg);
sge[i].length = sg_dma_len(sg);
sge[i].lkey = target->lkey;
}
req->cmd->num_sge += count;
goto map_complete;
}
fmt = SRP_DATA_DESC_DIRECT;
len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
sizeof(struct srp_direct_buf);
if (count == 1 && target->global_rkey) {
/*
* The midlayer only generated a single gather/scatter
* entry, or DMA mapping coalesced everything to a
* single entry. So a direct descriptor along with
* the DMA MR suffices.
*/
struct srp_direct_buf *buf;
buf = (void *)cmd->add_data + cmd->add_cdb_len;
buf->va = cpu_to_be64(sg_dma_address(scat));
buf->key = cpu_to_be32(target->global_rkey);
buf->len = cpu_to_be32(sg_dma_len(scat));
req->nmdesc = 0;
goto map_complete;
}
/*
* We have more than one scatter/gather entry, so build our indirect
* descriptor table, trying to merge as many entries as we can.
*/
indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len;
ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
target->indirect_size, DMA_TO_DEVICE);
memset(&state, 0, sizeof(state));
state.desc = req->indirect_desc;
if (dev->use_fast_reg)
ret = srp_map_sg_fr(&state, ch, req, scat, count);
else
ret = srp_map_sg_dma(&state, ch, req, scat, count);
req->nmdesc = state.nmdesc;
if (ret < 0)
goto unmap;
{
DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
"Memory mapping consistency check");
if (DYNAMIC_DEBUG_BRANCH(ddm))
srp_check_mapping(&state, ch, req, scat, count);
}
/* We've mapped the request, now pull as much of the indirect
* descriptor table as we can into the command buffer. If this
* target is not using an external indirect table, we are
* guaranteed to fit into the command, as the SCSI layer won't
* give us more S/G entries than we allow.
*/
if (state.ndesc == 1) {
/*
* Memory registration collapsed the sg-list into one entry,
* so use a direct descriptor.
*/
struct srp_direct_buf *buf;
buf = (void *)cmd->add_data + cmd->add_cdb_len;
*buf = req->indirect_desc[0];
goto map_complete;
}
if (unlikely(target->cmd_sg_cnt < state.ndesc &&
!target->allow_ext_sg)) {
shost_printk(KERN_ERR, target->scsi_host,
"Could not fit S/G list into SRP_CMD\n");
ret = -EIO;
goto unmap;
}
count = min(state.ndesc, target->cmd_sg_cnt);
table_len = state.ndesc * sizeof (struct srp_direct_buf);
idb_len = sizeof(struct srp_indirect_buf) + table_len;
fmt = SRP_DATA_DESC_INDIRECT;
len = sizeof(struct srp_cmd) + cmd->add_cdb_len +
sizeof(struct srp_indirect_buf);
len += count * sizeof (struct srp_direct_buf);
memcpy(indirect_hdr->desc_list, req->indirect_desc,
count * sizeof (struct srp_direct_buf));
if (!target->global_rkey) {
ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
idb_len, &idb_rkey);
if (ret < 0)
goto unmap;
req->nmdesc++;
} else {
idb_rkey = cpu_to_be32(target->global_rkey);
}
indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
indirect_hdr->table_desc.key = idb_rkey;
indirect_hdr->table_desc.len = cpu_to_be32(table_len);
indirect_hdr->len = cpu_to_be32(state.total_len);
if (scmnd->sc_data_direction == DMA_TO_DEVICE)
cmd->data_out_desc_cnt = count;
else
cmd->data_in_desc_cnt = count;
ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
DMA_TO_DEVICE);
map_complete:
if (scmnd->sc_data_direction == DMA_TO_DEVICE)
cmd->buf_fmt = fmt << 4;
else
cmd->buf_fmt = fmt;
return len;
unmap:
srp_unmap_data(scmnd, ch, req);
if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
ret = -E2BIG;
return ret;
}
/*
* Return an IU and possible credit to the free pool
*/
static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
enum srp_iu_type iu_type)
{
unsigned long flags;
spin_lock_irqsave(&ch->lock, flags);
list_add(&iu->list, &ch->free_tx);
if (iu_type != SRP_IU_RSP)
++ch->req_lim;
spin_unlock_irqrestore(&ch->lock, flags);
}
/*
* Must be called with ch->lock held to protect req_lim and free_tx.
* If IU is not sent, it must be returned using srp_put_tx_iu().
*
* Note:
* An upper limit for the number of allocated information units for each
* request type is:
* - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
* more than Scsi_Host.can_queue requests.
* - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
* - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
* one unanswered SRP request to an initiator.
*/
static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
enum srp_iu_type iu_type)
{
struct srp_target_port *target = ch->target;
s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
struct srp_iu *iu;
lockdep_assert_held(&ch->lock);
ib_process_cq_direct(ch->send_cq, -1);
if (list_empty(&ch->free_tx))
return NULL;
/* Initiator responses to target requests do not consume credits */
if (iu_type != SRP_IU_RSP) {
if (ch->req_lim <= rsv) {
++target->zero_req_lim;
return NULL;
}
--ch->req_lim;
}
iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
list_del(&iu->list);
return iu;
}
/*
* Note: if this function is called from inside ib_drain_sq() then it will
* be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
* with status IB_WC_SUCCESS then that's a bug.
*/
static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
struct srp_rdma_ch *ch = cq->cq_context;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
srp_handle_qp_err(cq, wc, "SEND");
return;
}
lockdep_assert_held(&ch->lock);
list_add(&iu->list, &ch->free_tx);
}
/**
* srp_post_send() - send an SRP information unit
* @ch: RDMA channel over which to send the information unit.
* @iu: Information unit to send.
* @len: Length of the information unit excluding immediate data.
*/
static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
{
struct srp_target_port *target = ch->target;
struct ib_send_wr wr;
if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE))
return -EINVAL;
iu->sge[0].addr = iu->dma;
iu->sge[0].length = len;
iu->sge[0].lkey = target->lkey;
iu->cqe.done = srp_send_done;
wr.next = NULL;
wr.wr_cqe = &iu->cqe;
wr.sg_list = &iu->sge[0];
wr.num_sge = iu->num_sge;
wr.opcode = IB_WR_SEND;
wr.send_flags = IB_SEND_SIGNALED;
return ib_post_send(ch->qp, &wr, NULL);
}
static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
{
struct srp_target_port *target = ch->target;
struct ib_recv_wr wr;
struct ib_sge list;
list.addr = iu->dma;
list.length = iu->size;
list.lkey = target->lkey;
iu->cqe.done = srp_recv_done;
wr.next = NULL;
wr.wr_cqe = &iu->cqe;
wr.sg_list = &list;
wr.num_sge = 1;
return ib_post_recv(ch->qp, &wr, NULL);
}
static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
{
struct srp_target_port *target = ch->target;
struct srp_request *req;
struct scsi_cmnd *scmnd;
unsigned long flags;
if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
spin_lock_irqsave(&ch->lock, flags);
ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
if (rsp->tag == ch->tsk_mgmt_tag) {
ch->tsk_mgmt_status = -1;
if (be32_to_cpu(rsp->resp_data_len) >= 4)
ch->tsk_mgmt_status = rsp->data[3];
complete(&ch->tsk_mgmt_done);
} else {
shost_printk(KERN_ERR, target->scsi_host,
"Received tsk mgmt response too late for tag %#llx\n",
rsp->tag);
}
spin_unlock_irqrestore(&ch->lock, flags);
} else {
scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
if (scmnd) {
req = scsi_cmd_priv(scmnd);
scmnd = srp_claim_req(ch, req, NULL, scmnd);
}
if (!scmnd) {
shost_printk(KERN_ERR, target->scsi_host,
"Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
rsp->tag, ch - target->ch, ch->qp->qp_num);
spin_lock_irqsave(&ch->lock, flags);
ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
spin_unlock_irqrestore(&ch->lock, flags);
return;
}
scmnd->result = rsp->status;
if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
memcpy(scmnd->sense_buffer, rsp->data +
be32_to_cpu(rsp->resp_data_len),
min_t(int, be32_to_cpu(rsp->sense_data_len),
SCSI_SENSE_BUFFERSIZE));
}
if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
srp_free_req(ch, req, scmnd,
be32_to_cpu(rsp->req_lim_delta));
scsi_done(scmnd);
}
}
static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
void *rsp, int len)
{
struct srp_target_port *target = ch->target;
struct ib_device *dev = target->srp_host->srp_dev->dev;
unsigned long flags;
struct srp_iu *iu;
int err;
spin_lock_irqsave(&ch->lock, flags);
ch->req_lim += req_delta;
iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
spin_unlock_irqrestore(&ch->lock, flags);
if (!iu) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"no IU available to send response\n");
return 1;
}
iu->num_sge = 1;
ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
memcpy(iu->buf, rsp, len);
ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
err = srp_post_send(ch, iu, len);
if (err) {
shost_printk(KERN_ERR, target->scsi_host, PFX
"unable to post response: %d\n", err);
srp_put_tx_iu(ch, iu, SRP_IU_RSP);
}
return err;
}
static void srp_process_cred_req(struct srp_rdma_ch *ch,
struct srp_cred_req *req)
{
struct srp_cred_rsp rsp = {
.opcode = SRP_CRED_RSP,
.tag = req->tag,
};
s32 delta = be32_to_cpu(req->req_lim_delta);
if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
shost_printk(KERN_ERR, ch->target->scsi_host, PFX
"problems processing SRP_CRED_REQ\n");
}
static void srp_process_aer_req(struct srp_rdma_ch *ch,
struct srp_aer_req *req)
{
struct srp_target_port *target = ch->target;
struct srp_aer_rsp rsp = {
.opcode = SRP_AER_RSP,
.tag = req->tag,
};
s32 delta = be32_to_cpu(req->req_lim_delta);
shost_printk(KERN_ERR, target->scsi_host, PFX
"ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
shost_printk(KERN_ERR, target->scsi_host, PFX
"problems processing SRP_AER_REQ\n");
}
static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
struct srp_rdma_ch *ch = cq->cq_context;
struct srp_target_port *target = ch->target;
struct ib_device *dev = target->srp_host->srp_dev->dev;
int res;
u8 opcode;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
srp_handle_qp_err(cq, wc, "RECV");
return;
}
ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
DMA_FROM_DEVICE);
opcode = *(u8 *) iu->buf;
if (0) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "recv completion, opcode 0x%02x\n", opcode);
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
iu->buf, wc->byte_len, true);
}
switch (opcode) {
case SRP_RSP:
srp_process_rsp(ch, iu->buf);
break;
case SRP_CRED_REQ:
srp_process_cred_req(ch, iu->buf);
break;
case SRP_AER_REQ:
srp_process_aer_req(ch, iu->buf);
break;
case SRP_T_LOGOUT:
/* XXX Handle target logout */
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Got target logout request\n");
break;
default:
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled SRP opcode 0x%02x\n", opcode);
break;
}
ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
DMA_FROM_DEVICE);
res = srp_post_recv(ch, iu);
if (res != 0)
shost_printk(KERN_ERR, target->scsi_host,
PFX "Recv failed with error code %d\n", res);
}
/**
* srp_tl_err_work() - handle a transport layer error
* @work: Work structure embedded in an SRP target port.
*
* Note: This function may get invoked before the rport has been created,
* hence the target->rport test.
*/
static void srp_tl_err_work(struct work_struct *work)
{
struct srp_target_port *target;
target = container_of(work, struct srp_target_port, tl_err_work);
if (target->rport)
srp_start_tl_fail_timers(target->rport);
}
static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
const char *opname)
{
struct srp_rdma_ch *ch = cq->cq_context;
struct srp_target_port *target = ch->target;
if (ch->connected && !target->qp_in_error) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "failed %s status %s (%d) for CQE %p\n",
opname, ib_wc_status_msg(wc->status), wc->status,
wc->wr_cqe);
queue_work(system_long_wq, &target->tl_err_work);
}
target->qp_in_error = true;
}
static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
{
struct request *rq = scsi_cmd_to_rq(scmnd);
struct srp_target_port *target = host_to_target(shost);
struct srp_rdma_ch *ch;
struct srp_request *req = scsi_cmd_priv(scmnd);
struct srp_iu *iu;
struct srp_cmd *cmd;
struct ib_device *dev;
unsigned long flags;
u32 tag;
int len, ret;
scmnd->result = srp_chkready(target->rport);
if (unlikely(scmnd->result))
goto err;
WARN_ON_ONCE(rq->tag < 0);
tag = blk_mq_unique_tag(rq);
ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
spin_lock_irqsave(&ch->lock, flags);
iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
spin_unlock_irqrestore(&ch->lock, flags);
if (!iu)
goto err;
dev = target->srp_host->srp_dev->dev;
ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len,
DMA_TO_DEVICE);
cmd = iu->buf;
memset(cmd, 0, sizeof *cmd);
cmd->opcode = SRP_CMD;
int_to_scsilun(scmnd->device->lun, &cmd->lun);
cmd->tag = tag;
memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) {
cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb),
4);
if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN))
goto err_iu;
}
req->scmnd = scmnd;
req->cmd = iu;
len = srp_map_data(scmnd, ch, req);
if (len < 0) {
shost_printk(KERN_ERR, target->scsi_host,
PFX "Failed to map data (%d)\n", len);
/*
* If we ran out of memory descriptors (-ENOMEM) because an
* application is queuing many requests with more than
* max_pages_per_mr sg-list elements, tell the SCSI mid-layer
* to reduce queue depth temporarily.
*/
scmnd->result = len == -ENOMEM ?
DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
goto err_iu;
}
ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len,
DMA_TO_DEVICE);
if (srp_post_send(ch, iu, len)) {
shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
scmnd->result = DID_ERROR << 16;
goto err_unmap;
}
return 0;
err_unmap:
srp_unmap_data(scmnd, ch, req);
err_iu:
srp_put_tx_iu(ch, iu, SRP_IU_CMD);
/*
* Avoid that the loops that iterate over the request ring can
* encounter a dangling SCSI command pointer.
*/
req->scmnd = NULL;
err:
if (scmnd->result) {
scsi_done(scmnd);
ret = 0;
} else {
ret = SCSI_MLQUEUE_HOST_BUSY;
}
return ret;
}
/*
* Note: the resources allocated in this function are freed in
* srp_free_ch_ib().
*/
static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
int i;
ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
GFP_KERNEL);
if (!ch->rx_ring)
goto err_no_ring;
ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
GFP_KERNEL);
if (!ch->tx_ring)
goto err_no_ring;
for (i = 0; i < target->queue_size; ++i) {
ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
ch->max_ti_iu_len,
GFP_KERNEL, DMA_FROM_DEVICE);
if (!ch->rx_ring[i])
goto err;
}
for (i = 0; i < target->queue_size; ++i) {
ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
ch->max_it_iu_len,
GFP_KERNEL, DMA_TO_DEVICE);
if (!ch->tx_ring[i])
goto err;
list_add(&ch->tx_ring[i]->list, &ch->free_tx);
}
return 0;
err:
for (i = 0; i < target->queue_size; ++i) {
srp_free_iu(target->srp_host, ch->rx_ring[i]);
srp_free_iu(target->srp_host, ch->tx_ring[i]);
}
err_no_ring:
kfree(ch->tx_ring);
ch->tx_ring = NULL;
kfree(ch->rx_ring);
ch->rx_ring = NULL;
return -ENOMEM;
}
static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
{
uint64_t T_tr_ns, max_compl_time_ms;
uint32_t rq_tmo_jiffies;
/*
* According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
* table 91), both the QP timeout and the retry count have to be set
* for RC QP's during the RTR to RTS transition.
*/
WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
(IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
/*
* Set target->rq_tmo_jiffies to one second more than the largest time
* it can take before an error completion is generated. See also
* C9-140..142 in the IBTA spec for more information about how to
* convert the QP Local ACK Timeout value to nanoseconds.
*/
T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
do_div(max_compl_time_ms, NSEC_PER_MSEC);
rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
return rq_tmo_jiffies;
}
static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
const struct srp_login_rsp *lrsp,
struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct ib_qp_attr *qp_attr = NULL;
int attr_mask = 0;
int ret = 0;
int i;
if (lrsp->opcode == SRP_LOGIN_RSP) {
ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
ch->use_imm_data = srp_use_imm_data &&
(lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP);
ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
ch->use_imm_data,
target->max_it_iu_size);
WARN_ON_ONCE(ch->max_it_iu_len >
be32_to_cpu(lrsp->max_it_iu_len));
if (ch->use_imm_data)
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "using immediate data\n");
/*
* Reserve credits for task management so we don't
* bounce requests back to the SCSI mid-layer.
*/
target->scsi_host->can_queue
= min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
target->scsi_host->can_queue);
target->scsi_host->cmd_per_lun
= min_t(int, target->scsi_host->can_queue,
target->scsi_host->cmd_per_lun);
} else {
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
ret = -ECONNRESET;
goto error;
}
if (!ch->rx_ring) {
ret = srp_alloc_iu_bufs(ch);
if (ret)
goto error;
}
for (i = 0; i < target->queue_size; i++) {
struct srp_iu *iu = ch->rx_ring[i];
ret = srp_post_recv(ch, iu);
if (ret)
goto error;
}
if (!target->using_rdma_cm) {
ret = -ENOMEM;
qp_attr = kmalloc(sizeof(*qp_attr), GFP_KERNEL);
if (!qp_attr)
goto error;
qp_attr->qp_state = IB_QPS_RTR;
ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
if (ret)
goto error_free;
ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
if (ret)
goto error_free;
qp_attr->qp_state = IB_QPS_RTS;
ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
if (ret)
goto error_free;
target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
if (ret)
goto error_free;
ret = ib_send_cm_rtu(cm_id, NULL, 0);
}
error_free:
kfree(qp_attr);
error:
ch->status = ret;
}
static void srp_ib_cm_rej_handler(struct ib_cm_id *cm_id,
const struct ib_cm_event *event,
struct srp_rdma_ch *ch)
{
struct srp_target_port *target = ch->target;
struct Scsi_Host *shost = target->scsi_host;
struct ib_class_port_info *cpi;
int opcode;
u16 dlid;
switch (event->param.rej_rcvd.reason) {
case IB_CM_REJ_PORT_CM_REDIRECT:
cpi = event->param.rej_rcvd.ari;
dlid = be16_to_cpu(cpi->redirect_lid);
sa_path_set_dlid(&ch->ib_cm.path, dlid);
ch->ib_cm.path.pkey = cpi->redirect_pkey;
cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16);
ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
break;
case IB_CM_REJ_PORT_REDIRECT:
if (srp_target_is_topspin(target)) {
union ib_gid *dgid = &ch->ib_cm.path.dgid;
/*
* Topspin/Cisco SRP gateways incorrectly send
* reject reason code 25 when they mean 24
* (port redirect).
*/
memcpy(dgid->raw, event->param.rej_rcvd.ari, 16);
shost_printk(KERN_DEBUG, shost,
PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
be64_to_cpu(dgid->global.subnet_prefix),
be64_to_cpu(dgid->global.interface_id));
ch->status = SRP_PORT_REDIRECT;
} else {
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
ch->status = -ECONNRESET;
}
break;
case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
ch->status = -ECONNRESET;
break;
case IB_CM_REJ_CONSUMER_DEFINED:
opcode = *(u8 *) event->private_data;
if (opcode == SRP_LOGIN_REJ) {
struct srp_login_rej *rej = event->private_data;
u32 reason = be32_to_cpu(rej->reason);
if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
shost_printk(KERN_WARNING, shost,
PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
else
shost_printk(KERN_WARNING, shost, PFX
"SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
target->sgid.raw,
target->ib_cm.orig_dgid.raw,
reason);
} else
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
" opcode 0x%02x\n", opcode);
ch->status = -ECONNRESET;
break;
case IB_CM_REJ_STALE_CONN:
shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
ch->status = SRP_STALE_CONN;
break;
default:
shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
event->param.rej_rcvd.reason);
ch->status = -ECONNRESET;
}
}
static int srp_ib_cm_handler(struct ib_cm_id *cm_id,
const struct ib_cm_event *event)
{
struct srp_rdma_ch *ch = cm_id->context;
struct srp_target_port *target = ch->target;
int comp = 0;
switch (event->event) {
case IB_CM_REQ_ERROR:
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Sending CM REQ failed\n");
comp = 1;
ch->status = -ECONNRESET;
break;
case IB_CM_REP_RECEIVED:
comp = 1;
srp_cm_rep_handler(cm_id, event->private_data, ch);
break;
case IB_CM_REJ_RECEIVED:
shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
comp = 1;
srp_ib_cm_rej_handler(cm_id, event, ch);
break;
case IB_CM_DREQ_RECEIVED:
shost_printk(KERN_WARNING, target->scsi_host,
PFX "DREQ received - connection closed\n");
ch->connected = false;
if (ib_send_cm_drep(cm_id, NULL, 0))
shost_printk(KERN_ERR, target->scsi_host,
PFX "Sending CM DREP failed\n");
queue_work(system_long_wq, &target->tl_err_work);
break;
case IB_CM_TIMEWAIT_EXIT:
shost_printk(KERN_ERR, target->scsi_host,
PFX "connection closed\n");
comp = 1;
ch->status = 0;
break;
case IB_CM_MRA_RECEIVED:
case IB_CM_DREQ_ERROR:
case IB_CM_DREP_RECEIVED:
break;
default:
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled CM event %d\n", event->event);
break;
}
if (comp)
complete(&ch->done);
return 0;
}
static void srp_rdma_cm_rej_handler(struct srp_rdma_ch *ch,
struct rdma_cm_event *event)
{
struct srp_target_port *target = ch->target;
struct Scsi_Host *shost = target->scsi_host;
int opcode;
switch (event->status) {
case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
ch->status = -ECONNRESET;
break;
case IB_CM_REJ_CONSUMER_DEFINED:
opcode = *(u8 *) event->param.conn.private_data;
if (opcode == SRP_LOGIN_REJ) {
struct srp_login_rej *rej =
(struct srp_login_rej *)
event->param.conn.private_data;
u32 reason = be32_to_cpu(rej->reason);
if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
shost_printk(KERN_WARNING, shost,
PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
else
shost_printk(KERN_WARNING, shost,
PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
} else {
shost_printk(KERN_WARNING, shost,
" REJ reason: IB_CM_REJ_CONSUMER_DEFINED, opcode 0x%02x\n",
opcode);
}
ch->status = -ECONNRESET;
break;
case IB_CM_REJ_STALE_CONN:
shost_printk(KERN_WARNING, shost,
" REJ reason: stale connection\n");
ch->status = SRP_STALE_CONN;
break;
default:
shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
event->status);
ch->status = -ECONNRESET;
break;
}
}
static int srp_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
struct srp_rdma_ch *ch = cm_id->context;
struct srp_target_port *target = ch->target;
int comp = 0;
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
ch->status = 0;
comp = 1;
break;
case RDMA_CM_EVENT_ADDR_ERROR:
ch->status = -ENXIO;
comp = 1;
break;
case RDMA_CM_EVENT_ROUTE_RESOLVED:
ch->status = 0;
comp = 1;
break;
case RDMA_CM_EVENT_ROUTE_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
ch->status = -EHOSTUNREACH;
comp = 1;
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
shost_printk(KERN_DEBUG, target->scsi_host,
PFX "Sending CM REQ failed\n");
comp = 1;
ch->status = -ECONNRESET;
break;
case RDMA_CM_EVENT_ESTABLISHED:
comp = 1;
srp_cm_rep_handler(NULL, event->param.conn.private_data, ch);
break;
case RDMA_CM_EVENT_REJECTED:
shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
comp = 1;
srp_rdma_cm_rej_handler(ch, event);
break;
case RDMA_CM_EVENT_DISCONNECTED:
if (ch->connected) {
shost_printk(KERN_WARNING, target->scsi_host,
PFX "received DREQ\n");
rdma_disconnect(ch->rdma_cm.cm_id);
comp = 1;
ch->status = 0;
queue_work(system_long_wq, &target->tl_err_work);
}
break;
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
shost_printk(KERN_ERR, target->scsi_host,
PFX "connection closed\n");
comp = 1;
ch->status = 0;
break;
default:
shost_printk(KERN_WARNING, target->scsi_host,
PFX "Unhandled CM event %d\n", event->event);
break;
}
if (comp)
complete(&ch->done);
return 0;
}
/**
* srp_change_queue_depth - setting device queue depth
* @sdev: scsi device struct
* @qdepth: requested queue depth
*
* Returns queue depth.
*/
static int
srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
if (!sdev->tagged_supported)
qdepth = 1;
return scsi_change_queue_depth(sdev, qdepth);
}
static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
u8 func, u8 *status)
{
struct srp_target_port *target = ch->target;
struct srp_rport *rport = target->rport;
struct ib_device *dev = target->srp_host->srp_dev->dev;
struct srp_iu *iu;
struct srp_tsk_mgmt *tsk_mgmt;
int res;
if (!ch->connected || target->qp_in_error)
return -1;
/*
* Lock the rport mutex to avoid that srp_create_ch_ib() is
* invoked while a task management function is being sent.
*/
mutex_lock(&rport->mutex);
spin_lock_irq(&ch->lock);
iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
spin_unlock_irq(&ch->lock);
if (!iu) {
mutex_unlock(&rport->mutex);
return -1;
}
iu->num_sge = 1;
ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
DMA_TO_DEVICE);
tsk_mgmt = iu->buf;
memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
tsk_mgmt->opcode = SRP_TSK_MGMT;
int_to_scsilun(lun, &tsk_mgmt->lun);
tsk_mgmt->tsk_mgmt_func = func;
tsk_mgmt->task_tag = req_tag;
spin_lock_irq(&ch->lock);
ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
tsk_mgmt->tag = ch->tsk_mgmt_tag;
spin_unlock_irq(&ch->lock);
init_completion(&ch->tsk_mgmt_done);
ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
DMA_TO_DEVICE);
if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
mutex_unlock(&rport->mutex);
return -1;
}
res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
if (res > 0 && status)
*status = ch->tsk_mgmt_status;
mutex_unlock(&rport->mutex);
WARN_ON_ONCE(res < 0);
return res > 0 ? 0 : -1;
}
static int srp_abort(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_request *req = scsi_cmd_priv(scmnd);
u32 tag;
u16 ch_idx;
struct srp_rdma_ch *ch;
int ret;
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmnd));
ch_idx = blk_mq_unique_tag_to_hwq(tag);
if (WARN_ON_ONCE(ch_idx >= target->ch_count))
return SUCCESS;
ch = &target->ch[ch_idx];
if (!srp_claim_req(ch, req, NULL, scmnd))
return SUCCESS;
shost_printk(KERN_ERR, target->scsi_host,
"Sending SRP abort for tag %#x\n", tag);
if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
SRP_TSK_ABORT_TASK, NULL) == 0)
ret = SUCCESS;
else if (target->rport->state == SRP_RPORT_LOST)
ret = FAST_IO_FAIL;
else
ret = FAILED;
if (ret == SUCCESS) {
srp_free_req(ch, req, scmnd, 0);
scmnd->result = DID_ABORT << 16;
scsi_done(scmnd);
}
return ret;
}
static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
ch = &target->ch[0];
if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
SRP_TSK_LUN_RESET, &status))
return FAILED;
if (status)
return FAILED;
return SUCCESS;
}
static int srp_reset_host(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
}
static int srp_target_alloc(struct scsi_target *starget)
{
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct srp_target_port *target = host_to_target(shost);
if (target->target_can_queue)
starget->can_queue = target->target_can_queue;
return 0;
}
static int srp_slave_configure(struct scsi_device *sdev)
{
struct Scsi_Host *shost = sdev->host;
struct srp_target_port *target = host_to_target(shost);
struct request_queue *q = sdev->request_queue;
unsigned long timeout;
if (sdev->type == TYPE_DISK) {
timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
blk_queue_rq_timeout(q, timeout);
}
return 0;
}
static ssize_t id_ext_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
}
static DEVICE_ATTR_RO(id_ext);
static ssize_t ioc_guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
}
static DEVICE_ATTR_RO(ioc_guid);
static ssize_t service_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
if (target->using_rdma_cm)
return -ENOENT;
return sysfs_emit(buf, "0x%016llx\n",
be64_to_cpu(target->ib_cm.service_id));
}
static DEVICE_ATTR_RO(service_id);
static ssize_t pkey_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
if (target->using_rdma_cm)
return -ENOENT;
return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey));
}
static DEVICE_ATTR_RO(pkey);
static ssize_t sgid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sysfs_emit(buf, "%pI6\n", target->sgid.raw);
}
static DEVICE_ATTR_RO(sgid);
static ssize_t dgid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
struct srp_rdma_ch *ch = &target->ch[0];
if (target->using_rdma_cm)
return -ENOENT;
return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw);
}
static DEVICE_ATTR_RO(dgid);
static ssize_t orig_dgid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
if (target->using_rdma_cm)
return -ENOENT;
return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw);
}
static DEVICE_ATTR_RO(orig_dgid);
static ssize_t req_lim_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
struct srp_rdma_ch *ch;
int i, req_lim = INT_MAX;
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
req_lim = min(req_lim, ch->req_lim);
}
return sysfs_emit(buf, "%d\n", req_lim);
}
static DEVICE_ATTR_RO(req_lim);
static ssize_t zero_req_lim_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sysfs_emit(buf, "%d\n", target->zero_req_lim);
}
static DEVICE_ATTR_RO(zero_req_lim);
static ssize_t local_ib_port_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sysfs_emit(buf, "%u\n", target->srp_host->port);
}
static DEVICE_ATTR_RO(local_ib_port);
static ssize_t local_ib_device_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sysfs_emit(buf, "%s\n",
dev_name(&target->srp_host->srp_dev->dev->dev));
}
static DEVICE_ATTR_RO(local_ib_device);
static ssize_t ch_count_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sysfs_emit(buf, "%d\n", target->ch_count);
}
static DEVICE_ATTR_RO(ch_count);
static ssize_t comp_vector_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sysfs_emit(buf, "%d\n", target->comp_vector);
}
static DEVICE_ATTR_RO(comp_vector);
static ssize_t tl_retry_count_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sysfs_emit(buf, "%d\n", target->tl_retry_count);
}
static DEVICE_ATTR_RO(tl_retry_count);
static ssize_t cmd_sg_entries_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt);
}
static DEVICE_ATTR_RO(cmd_sg_entries);
static ssize_t allow_ext_sg_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct srp_target_port *target = host_to_target(class_to_shost(dev));
return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
}
static DEVICE_ATTR_RO(allow_ext_sg);
static struct attribute *srp_host_attrs[] = {
&dev_attr_id_ext.attr,
&dev_attr_ioc_guid.attr,
&dev_attr_service_id.attr,
&dev_attr_pkey.attr,
&dev_attr_sgid.attr,
&dev_attr_dgid.attr,
&dev_attr_orig_dgid.attr,
&dev_attr_req_lim.attr,
&dev_attr_zero_req_lim.attr,
&dev_attr_local_ib_port.attr,
&dev_attr_local_ib_device.attr,
&dev_attr_ch_count.attr,
&dev_attr_comp_vector.attr,
&dev_attr_tl_retry_count.attr,
&dev_attr_cmd_sg_entries.attr,
&dev_attr_allow_ext_sg.attr,
NULL
};
ATTRIBUTE_GROUPS(srp_host);
static const struct scsi_host_template srp_template = {
.module = THIS_MODULE,
.name = "InfiniBand SRP initiator",
.proc_name = DRV_NAME,
.target_alloc = srp_target_alloc,
.slave_configure = srp_slave_configure,
.info = srp_target_info,
.init_cmd_priv = srp_init_cmd_priv,
.exit_cmd_priv = srp_exit_cmd_priv,
.queuecommand = srp_queuecommand,
.change_queue_depth = srp_change_queue_depth,
.eh_timed_out = srp_timed_out,
.eh_abort_handler = srp_abort,
.eh_device_reset_handler = srp_reset_device,
.eh_host_reset_handler = srp_reset_host,
.skip_settle_delay = true,
.sg_tablesize = SRP_DEF_SG_TABLESIZE,
.can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
.this_id = -1,
.cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
.shost_groups = srp_host_groups,
.track_queue_depth = 1,
.cmd_size = sizeof(struct srp_request),
};
static int srp_sdev_count(struct Scsi_Host *host)
{
struct scsi_device *sdev;
int c = 0;
shost_for_each_device(sdev, host)
c++;
return c;
}
/*
* Return values:
* < 0 upon failure. Caller is responsible for SRP target port cleanup.
* 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
* removal has been scheduled.
* 0 and target->state != SRP_TARGET_REMOVED upon success.
*/
static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
{
struct srp_rport_identifiers ids;
struct srp_rport *rport;
target->state = SRP_TARGET_SCANNING;
sprintf(target->target_name, "SRP.T10:%016llX",
be64_to_cpu(target->id_ext));
if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
return -ENODEV;
memcpy(ids.port_id, &target->id_ext, 8);
memcpy(ids.port_id + 8, &target->ioc_guid, 8);
ids.roles = SRP_RPORT_ROLE_TARGET;
rport = srp_rport_add(target->scsi_host, &ids);
if (IS_ERR(rport)) {
scsi_remove_host(target->scsi_host);
return PTR_ERR(rport);
}
rport->lld_data = target;
target->rport = rport;
spin_lock(&host->target_lock);
list_add_tail(&target->list, &host->target_list);
spin_unlock(&host->target_lock);
scsi_scan_target(&target->scsi_host->shost_gendev,
0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
if (srp_connected_ch(target) < target->ch_count ||
target->qp_in_error) {
shost_printk(KERN_INFO, target->scsi_host,
PFX "SCSI scan failed - removing SCSI host\n");
srp_queue_remove_work(target);
goto out;
}
pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
dev_name(&target->scsi_host->shost_gendev),
srp_sdev_count(target->scsi_host));
spin_lock_irq(&target->lock);
if (target->state == SRP_TARGET_SCANNING)
target->state = SRP_TARGET_LIVE;
spin_unlock_irq(&target->lock);
out:
return 0;
}
static void srp_release_dev(struct device *dev)
{
struct srp_host *host =
container_of(dev, struct srp_host, dev);
kfree(host);
}
static struct attribute *srp_class_attrs[];
ATTRIBUTE_GROUPS(srp_class);
static struct class srp_class = {
.name = "infiniband_srp",
.dev_groups = srp_class_groups,
.dev_release = srp_release_dev
};
/**
* srp_conn_unique() - check whether the connection to a target is unique
* @host: SRP host.
* @target: SRP target port.
*/
static bool srp_conn_unique(struct srp_host *host,
struct srp_target_port *target)
{
struct srp_target_port *t;
bool ret = false;
if (target->state == SRP_TARGET_REMOVED)
goto out;
ret = true;
spin_lock(&host->target_lock);
list_for_each_entry(t, &host->target_list, list) {
if (t != target &&
target->id_ext == t->id_ext &&
target->ioc_guid == t->ioc_guid &&
target->initiator_ext == t->initiator_ext) {
ret = false;
break;
}
}
spin_unlock(&host->target_lock);
out:
return ret;
}
/*
* Target ports are added by writing
*
* id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
* pkey=<P_Key>,service_id=<service ID>
* or
* id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,
* [src=<IPv4 address>,]dest=<IPv4 address>:<port number>
*
* to the add_target sysfs attribute.
*/
enum {
SRP_OPT_ERR = 0,
SRP_OPT_ID_EXT = 1 << 0,
SRP_OPT_IOC_GUID = 1 << 1,
SRP_OPT_DGID = 1 << 2,
SRP_OPT_PKEY = 1 << 3,
SRP_OPT_SERVICE_ID = 1 << 4,
SRP_OPT_MAX_SECT = 1 << 5,
SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
SRP_OPT_IO_CLASS = 1 << 7,
SRP_OPT_INITIATOR_EXT = 1 << 8,
SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
SRP_OPT_ALLOW_EXT_SG = 1 << 10,
SRP_OPT_SG_TABLESIZE = 1 << 11,
SRP_OPT_COMP_VECTOR = 1 << 12,
SRP_OPT_TL_RETRY_COUNT = 1 << 13,
SRP_OPT_QUEUE_SIZE = 1 << 14,
SRP_OPT_IP_SRC = 1 << 15,
SRP_OPT_IP_DEST = 1 << 16,
SRP_OPT_TARGET_CAN_QUEUE= 1 << 17,
SRP_OPT_MAX_IT_IU_SIZE = 1 << 18,
SRP_OPT_CH_COUNT = 1 << 19,
};
static unsigned int srp_opt_mandatory[] = {
SRP_OPT_ID_EXT |
SRP_OPT_IOC_GUID |
SRP_OPT_DGID |
SRP_OPT_PKEY |
SRP_OPT_SERVICE_ID,
SRP_OPT_ID_EXT |
SRP_OPT_IOC_GUID |
SRP_OPT_IP_DEST,
};
static const match_table_t srp_opt_tokens = {
{ SRP_OPT_ID_EXT, "id_ext=%s" },
{ SRP_OPT_IOC_GUID, "ioc_guid=%s" },
{ SRP_OPT_DGID, "dgid=%s" },
{ SRP_OPT_PKEY, "pkey=%x" },
{ SRP_OPT_SERVICE_ID, "service_id=%s" },
{ SRP_OPT_MAX_SECT, "max_sect=%d" },
{ SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
{ SRP_OPT_TARGET_CAN_QUEUE, "target_can_queue=%d" },
{ SRP_OPT_IO_CLASS, "io_class=%x" },
{ SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
{ SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
{ SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
{ SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
{ SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
{ SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
{ SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
{ SRP_OPT_IP_SRC, "src=%s" },
{ SRP_OPT_IP_DEST, "dest=%s" },
{ SRP_OPT_MAX_IT_IU_SIZE, "max_it_iu_size=%d" },
{ SRP_OPT_CH_COUNT, "ch_count=%u", },
{ SRP_OPT_ERR, NULL }
};
/**
* srp_parse_in - parse an IP address and port number combination
* @net: [in] Network namespace.
* @sa: [out] Address family, IP address and port number.
* @addr_port_str: [in] IP address and port number.
* @has_port: [out] Whether or not @addr_port_str includes a port number.
*
* Parse the following address formats:
* - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
* - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
*/
static int srp_parse_in(struct net *net, struct sockaddr_storage *sa,
const char *addr_port_str, bool *has_port)
{
char *addr_end, *addr = kstrdup(addr_port_str, GFP_KERNEL);
char *port_str;
int ret;
if (!addr)
return -ENOMEM;
port_str = strrchr(addr, ':');
if (port_str && strchr(port_str, ']'))
port_str = NULL;
if (port_str)
*port_str++ = '\0';
if (has_port)
*has_port = port_str != NULL;
ret = inet_pton_with_scope(net, AF_INET, addr, port_str, sa);
if (ret && addr[0]) {
addr_end = addr + strlen(addr) - 1;
if (addr[0] == '[' && *addr_end == ']') {
*addr_end = '\0';
ret = inet_pton_with_scope(net, AF_INET6, addr + 1,
port_str, sa);
}
}
kfree(addr);
pr_debug("%s -> %pISpfsc\n", addr_port_str, sa);
return ret;
}
static int srp_parse_options(struct net *net, const char *buf,
struct srp_target_port *target)
{
char *options, *sep_opt;
char *p;
substring_t args[MAX_OPT_ARGS];
unsigned long long ull;
bool has_port;
int opt_mask = 0;
int token;
int ret = -EINVAL;
int i;
options = kstrdup(buf, GFP_KERNEL);
if (!options)
return -ENOMEM;
sep_opt = options;
while ((p = strsep(&sep_opt, ",\n")) != NULL) {
if (!*p)
continue;
token = match_token(p, srp_opt_tokens, args);
opt_mask |= token;
switch (token) {
case SRP_OPT_ID_EXT:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
ret = kstrtoull(p, 16, &ull);
if (ret) {
pr_warn("invalid id_ext parameter '%s'\n", p);
kfree(p);
goto out;
}
target->id_ext = cpu_to_be64(ull);
kfree(p);
break;
case SRP_OPT_IOC_GUID:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
ret = kstrtoull(p, 16, &ull);
if (ret) {
pr_warn("invalid ioc_guid parameter '%s'\n", p);
kfree(p);
goto out;
}
target->ioc_guid = cpu_to_be64(ull);
kfree(p);
break;
case SRP_OPT_DGID:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
if (strlen(p) != 32) {
pr_warn("bad dest GID parameter '%s'\n", p);
kfree(p);
goto out;
}
ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16);
kfree(p);
if (ret < 0)
goto out;
break;
case SRP_OPT_PKEY:
ret = match_hex(args, &token);
if (ret) {
pr_warn("bad P_Key parameter '%s'\n", p);
goto out;
}
target->ib_cm.pkey = cpu_to_be16(token);
break;
case SRP_OPT_SERVICE_ID:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
ret = kstrtoull(p, 16, &ull);
if (ret) {
pr_warn("bad service_id parameter '%s'\n", p);
kfree(p);
goto out;
}
target->ib_cm.service_id = cpu_to_be64(ull);
kfree(p);
break;
case SRP_OPT_IP_SRC:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
ret = srp_parse_in(net, &target->rdma_cm.src.ss, p,
NULL);
if (ret < 0) {
pr_warn("bad source parameter '%s'\n", p);
kfree(p);
goto out;
}
target->rdma_cm.src_specified = true;
kfree(p);
break;
case SRP_OPT_IP_DEST:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p,
&has_port);
if (!has_port)
ret = -EINVAL;
if (ret < 0) {
pr_warn("bad dest parameter '%s'\n", p);
kfree(p);
goto out;
}
target->using_rdma_cm = true;
kfree(p);
break;
case SRP_OPT_MAX_SECT:
ret = match_int(args, &token);
if (ret) {
pr_warn("bad max sect parameter '%s'\n", p);
goto out;
}
target->scsi_host->max_sectors = token;
break;
case SRP_OPT_QUEUE_SIZE:
ret = match_int(args, &token);
if (ret) {
pr_warn("match_int() failed for queue_size parameter '%s', Error %d\n",
p, ret);
goto out;
}
if (token < 1) {
pr_warn("bad queue_size parameter '%s'\n", p);
ret = -EINVAL;
goto out;
}
target->scsi_host->can_queue = token;
target->queue_size = token + SRP_RSP_SQ_SIZE +
SRP_TSK_MGMT_SQ_SIZE;
if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
target->scsi_host->cmd_per_lun = token;
break;
case SRP_OPT_MAX_CMD_PER_LUN:
ret = match_int(args, &token);
if (ret) {
pr_warn("match_int() failed for max cmd_per_lun parameter '%s', Error %d\n",
p, ret);
goto out;
}
if (token < 1) {
pr_warn("bad max cmd_per_lun parameter '%s'\n",
p);
ret = -EINVAL;
goto out;
}
target->scsi_host->cmd_per_lun = token;
break;
case SRP_OPT_TARGET_CAN_QUEUE:
ret = match_int(args, &token);
if (ret) {
pr_warn("match_int() failed for max target_can_queue parameter '%s', Error %d\n",
p, ret);
goto out;
}
if (token < 1) {
pr_warn("bad max target_can_queue parameter '%s'\n",
p);
ret = -EINVAL;
goto out;
}
target->target_can_queue = token;
break;
case SRP_OPT_IO_CLASS:
ret = match_hex(args, &token);
if (ret) {
pr_warn("bad IO class parameter '%s'\n", p);
goto out;
}
if (token != SRP_REV10_IB_IO_CLASS &&
token != SRP_REV16A_IB_IO_CLASS) {
pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
token, SRP_REV10_IB_IO_CLASS,
SRP_REV16A_IB_IO_CLASS);
ret = -EINVAL;
goto out;
}
target->io_class = token;
break;
case SRP_OPT_INITIATOR_EXT:
p = match_strdup(args);
if (!p) {
ret = -ENOMEM;
goto out;
}
ret = kstrtoull(p, 16, &ull);
if (ret) {
pr_warn("bad initiator_ext value '%s'\n", p);
kfree(p);
goto out;
}
target->initiator_ext = cpu_to_be64(ull);
kfree(p);
break;
case SRP_OPT_CMD_SG_ENTRIES:
ret = match_int(args, &token);
if (ret) {
pr_warn("match_int() failed for max cmd_sg_entries parameter '%s', Error %d\n",
p, ret);
goto out;
}
if (token < 1 || token > 255) {
pr_warn("bad max cmd_sg_entries parameter '%s'\n",
p);
ret = -EINVAL;
goto out;
}
target->cmd_sg_cnt = token;
break;
case SRP_OPT_ALLOW_EXT_SG:
ret = match_int(args, &token);
if (ret) {
pr_warn("bad allow_ext_sg parameter '%s'\n", p);
goto out;
}
target->allow_ext_sg = !!token;
break;
case SRP_OPT_SG_TABLESIZE:
ret = match_int(args, &token);
if (ret) {
pr_warn("match_int() failed for max sg_tablesize parameter '%s', Error %d\n",
p, ret);
goto out;
}
if (token < 1 || token > SG_MAX_SEGMENTS) {
pr_warn("bad max sg_tablesize parameter '%s'\n",
p);
ret = -EINVAL;
goto out;
}
target->sg_tablesize = token;
break;
case SRP_OPT_COMP_VECTOR:
ret = match_int(args, &token);
if (ret) {
pr_warn("match_int() failed for comp_vector parameter '%s', Error %d\n",
p, ret);
goto out;
}
if (token < 0) {
pr_warn("bad comp_vector parameter '%s'\n", p);
ret = -EINVAL;
goto out;
}
target->comp_vector = token;
break;
case SRP_OPT_TL_RETRY_COUNT:
ret = match_int(args, &token);
if (ret) {
pr_warn("match_int() failed for tl_retry_count parameter '%s', Error %d\n",
p, ret);
goto out;
}
if (token < 2 || token > 7) {
pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
p);
ret = -EINVAL;
goto out;
}
target->tl_retry_count = token;
break;
case SRP_OPT_MAX_IT_IU_SIZE:
ret = match_int(args, &token);
if (ret) {
pr_warn("match_int() failed for max it_iu_size parameter '%s', Error %d\n",
p, ret);
goto out;
}
if (token < 0) {
pr_warn("bad maximum initiator to target IU size '%s'\n", p);
ret = -EINVAL;
goto out;
}
target->max_it_iu_size = token;
break;
case SRP_OPT_CH_COUNT:
ret = match_int(args, &token);
if (ret) {
pr_warn("match_int() failed for channel count parameter '%s', Error %d\n",
p, ret);
goto out;
}
if (token < 1) {
pr_warn("bad channel count %s\n", p);
ret = -EINVAL;
goto out;
}
target->ch_count = token;
break;
default:
pr_warn("unknown parameter or missing value '%s' in target creation request\n",
p);
ret = -EINVAL;
goto out;
}
}
for (i = 0; i < ARRAY_SIZE(srp_opt_mandatory); i++) {
if ((opt_mask & srp_opt_mandatory[i]) == srp_opt_mandatory[i]) {
ret = 0;
break;
}
}
if (ret)
pr_warn("target creation request is missing one or more parameters\n");
if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
&& (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
pr_warn("cmd_per_lun = %d > queue_size = %d\n",
target->scsi_host->cmd_per_lun,
target->scsi_host->can_queue);
out:
kfree(options);
return ret;
}
static ssize_t add_target_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
struct srp_host *host =
container_of(dev, struct srp_host, dev);
struct Scsi_Host *target_host;
struct srp_target_port *target;
struct srp_rdma_ch *ch;
struct srp_device *srp_dev = host->srp_dev;
struct ib_device *ibdev = srp_dev->dev;
int ret, i, ch_idx;
unsigned int max_sectors_per_mr, mr_per_cmd = 0;
bool multich = false;
uint32_t max_iu_len;
target_host = scsi_host_alloc(&srp_template,
sizeof (struct srp_target_port));
if (!target_host)
return -ENOMEM;
target_host->transportt = ib_srp_transport_template;
target_host->max_channel = 0;
target_host->max_id = 1;
target_host->max_lun = -1LL;
target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
target_host->max_segment_size = ib_dma_max_seg_size(ibdev);
if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG))
target_host->virt_boundary_mask = ~srp_dev->mr_page_mask;
target = host_to_target(target_host);
target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
target->io_class = SRP_REV16A_IB_IO_CLASS;
target->scsi_host = target_host;
target->srp_host = host;
target->lkey = host->srp_dev->pd->local_dma_lkey;
target->global_rkey = host->srp_dev->global_rkey;
target->cmd_sg_cnt = cmd_sg_entries;
target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
target->allow_ext_sg = allow_ext_sg;
target->tl_retry_count = 7;
target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
/*
* Avoid that the SCSI host can be removed by srp_remove_target()
* before this function returns.
*/
scsi_host_get(target->scsi_host);
ret = mutex_lock_interruptible(&host->add_target_mutex);
if (ret < 0)
goto put;
ret = srp_parse_options(target->net, buf, target);
if (ret)
goto out;
if (!srp_conn_unique(target->srp_host, target)) {
if (target->using_rdma_cm) {
shost_printk(KERN_INFO, target->scsi_host,
PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;dest=%pIS\n",
be64_to_cpu(target->id_ext),
be64_to_cpu(target->ioc_guid),
&target->rdma_cm.dst);
} else {
shost_printk(KERN_INFO, target->scsi_host,
PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
be64_to_cpu(target->id_ext),
be64_to_cpu(target->ioc_guid),
be64_to_cpu(target->initiator_ext));
}
ret = -EEXIST;
goto out;
}
if (!srp_dev->has_fr && !target->allow_ext_sg &&
target->cmd_sg_cnt < target->sg_tablesize) {
pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
target->sg_tablesize = target->cmd_sg_cnt;
}
if (srp_dev->use_fast_reg) {
bool gaps_reg = ibdev->attrs.kernel_cap_flags &
IBK_SG_GAPS_REG;
max_sectors_per_mr = srp_dev->max_pages_per_mr <<
(ilog2(srp_dev->mr_page_size) - 9);
if (!gaps_reg) {
/*
* FR can only map one HCA page per entry. If the start
* address is not aligned on a HCA page boundary two
* entries will be used for the head and the tail
* although these two entries combined contain at most
* one HCA page of data. Hence the "+ 1" in the
* calculation below.
*
* The indirect data buffer descriptor is contiguous
* so the memory for that buffer will only be
* registered if register_always is true. Hence add
* one to mr_per_cmd if register_always has been set.
*/
mr_per_cmd = register_always +
(target->scsi_host->max_sectors + 1 +
max_sectors_per_mr - 1) / max_sectors_per_mr;
} else {
mr_per_cmd = register_always +
(target->sg_tablesize +
srp_dev->max_pages_per_mr - 1) /
srp_dev->max_pages_per_mr;
}
pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
max_sectors_per_mr, mr_per_cmd);
}
target_host->sg_tablesize = target->sg_tablesize;
target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
target->mr_per_cmd = mr_per_cmd;
target->indirect_size = target->sg_tablesize *
sizeof (struct srp_direct_buf);
max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt,
srp_use_imm_data,
target->max_it_iu_size);
INIT_WORK(&target->tl_err_work, srp_tl_err_work);
INIT_WORK(&target->remove_work, srp_remove_work);
spin_lock_init(&target->lock);
ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid);
if (ret)
goto out;
ret = -ENOMEM;
if (target->ch_count == 0) {
target->ch_count =
min(ch_count ?:
max(4 * num_online_nodes(),
ibdev->num_comp_vectors),
num_online_cpus());
}
target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
GFP_KERNEL);
if (!target->ch)
goto out;
for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) {
ch = &target->ch[ch_idx];
ch->target = target;
ch->comp_vector = ch_idx % ibdev->num_comp_vectors;
spin_lock_init(&ch->lock);
INIT_LIST_HEAD(&ch->free_tx);
ret = srp_new_cm_id(ch);
if (ret)
goto err_disconnect;
ret = srp_create_ch_ib(ch);
if (ret)
goto err_disconnect;
ret = srp_connect_ch(ch, max_iu_len, multich);
if (ret) {
char dst[64];
if (target->using_rdma_cm)
snprintf(dst, sizeof(dst), "%pIS",
&target->rdma_cm.dst);
else
snprintf(dst, sizeof(dst), "%pI6",
target->ib_cm.orig_dgid.raw);
shost_printk(KERN_ERR, target->scsi_host,
PFX "Connection %d/%d to %s failed\n",
ch_idx,
target->ch_count, dst);
if (ch_idx == 0) {
goto free_ch;
} else {
srp_free_ch_ib(target, ch);
target->ch_count = ch - target->ch;
goto connected;
}
}
multich = true;
}
connected:
target->scsi_host->nr_hw_queues = target->ch_count;
ret = srp_add_target(host, target);
if (ret)
goto err_disconnect;
if (target->state != SRP_TARGET_REMOVED) {
if (target->using_rdma_cm) {
shost_printk(KERN_DEBUG, target->scsi_host, PFX
"new target: id_ext %016llx ioc_guid %016llx sgid %pI6 dest %pIS\n",
be64_to_cpu(target->id_ext),
be64_to_cpu(target->ioc_guid),
target->sgid.raw, &target->rdma_cm.dst);
} else {
shost_printk(KERN_DEBUG, target->scsi_host, PFX
"new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
be64_to_cpu(target->id_ext),
be64_to_cpu(target->ioc_guid),
be16_to_cpu(target->ib_cm.pkey),
be64_to_cpu(target->ib_cm.service_id),
target->sgid.raw,
target->ib_cm.orig_dgid.raw);
}
}
ret = count;
out:
mutex_unlock(&host->add_target_mutex);
put:
scsi_host_put(target->scsi_host);
if (ret < 0) {
/*
* If a call to srp_remove_target() has not been scheduled,
* drop the network namespace reference now that was obtained
* earlier in this function.
*/
if (target->state != SRP_TARGET_REMOVED)
kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net);
scsi_host_put(target->scsi_host);
}
return ret;
err_disconnect:
srp_disconnect_target(target);
free_ch:
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
srp_free_ch_ib(target, ch);
}
kfree(target->ch);
goto out;
}
static DEVICE_ATTR_WO(add_target);
static ssize_t ibdev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev));
}
static DEVICE_ATTR_RO(ibdev);
static ssize_t port_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct srp_host *host = container_of(dev, struct srp_host, dev);
return sysfs_emit(buf, "%u\n", host->port);
}
static DEVICE_ATTR_RO(port);
static struct attribute *srp_class_attrs[] = {
&dev_attr_add_target.attr,
&dev_attr_ibdev.attr,
&dev_attr_port.attr,
NULL
};
static struct srp_host *srp_add_port(struct srp_device *device, u32 port)
{
struct srp_host *host;
host = kzalloc(sizeof *host, GFP_KERNEL);
if (!host)
return NULL;
INIT_LIST_HEAD(&host->target_list);
spin_lock_init(&host->target_lock);
mutex_init(&host->add_target_mutex);
host->srp_dev = device;
host->port = port;
device_initialize(&host->dev);
host->dev.class = &srp_class;
host->dev.parent = device->dev->dev.parent;
if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev),
port))
goto put_host;
if (device_add(&host->dev))
goto put_host;
return host;
put_host:
device_del(&host->dev);
put_device(&host->dev);
return NULL;
}
static void srp_rename_dev(struct ib_device *device, void *client_data)
{
struct srp_device *srp_dev = client_data;
struct srp_host *host, *tmp_host;
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
char name[IB_DEVICE_NAME_MAX + 8];
snprintf(name, sizeof(name), "srp-%s-%u",
dev_name(&device->dev), host->port);
device_rename(&host->dev, name);
}
}
static int srp_add_one(struct ib_device *device)
{
struct srp_device *srp_dev;
struct ib_device_attr *attr = &device->attrs;
struct srp_host *host;
int mr_page_shift;
u32 p;
u64 max_pages_per_mr;
unsigned int flags = 0;
srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
if (!srp_dev)
return -ENOMEM;
/*
* Use the smallest page size supported by the HCA, down to a
* minimum of 4096 bytes. We're unlikely to build large sglists
* out of smaller entries.
*/
mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
srp_dev->mr_page_size = 1 << mr_page_shift;
srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
max_pages_per_mr = attr->max_mr_size;
do_div(max_pages_per_mr, srp_dev->mr_page_size);
pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
attr->max_mr_size, srp_dev->mr_page_size,
max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
max_pages_per_mr);
srp_dev->has_fr = (attr->device_cap_flags &
IB_DEVICE_MEM_MGT_EXTENSIONS);
if (!never_register && !srp_dev->has_fr)
dev_warn(&device->dev, "FR is not supported\n");
else if (!never_register &&
attr->max_mr_size >= 2 * srp_dev->mr_page_size)
srp_dev->use_fast_reg = srp_dev->has_fr;
if (never_register || !register_always || !srp_dev->has_fr)
flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
if (srp_dev->use_fast_reg) {
srp_dev->max_pages_per_mr =
min_t(u32, srp_dev->max_pages_per_mr,
attr->max_fast_reg_page_list_len);
}
srp_dev->mr_max_size = srp_dev->mr_page_size *
srp_dev->max_pages_per_mr;
pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
dev_name(&device->dev), mr_page_shift, attr->max_mr_size,
attr->max_fast_reg_page_list_len,
srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
INIT_LIST_HEAD(&srp_dev->dev_list);
srp_dev->dev = device;
srp_dev->pd = ib_alloc_pd(device, flags);
if (IS_ERR(srp_dev->pd)) {
int ret = PTR_ERR(srp_dev->pd);
kfree(srp_dev);
return ret;
}
if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey;
WARN_ON_ONCE(srp_dev->global_rkey == 0);
}
rdma_for_each_port (device, p) {
host = srp_add_port(srp_dev, p);
if (host)
list_add_tail(&host->list, &srp_dev->dev_list);
}
ib_set_client_data(device, &srp_client, srp_dev);
return 0;
}
static void srp_remove_one(struct ib_device *device, void *client_data)
{
struct srp_device *srp_dev;
struct srp_host *host, *tmp_host;
struct srp_target_port *target;
srp_dev = client_data;
list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
/*
* Remove the add_target sysfs entry so that no new target ports
* can be created.
*/
device_del(&host->dev);
/*
* Remove all target ports.
*/
spin_lock(&host->target_lock);
list_for_each_entry(target, &host->target_list, list)
srp_queue_remove_work(target);
spin_unlock(&host->target_lock);
/*
* srp_queue_remove_work() queues a call to
* srp_remove_target(). The latter function cancels
* target->tl_err_work so waiting for the remove works to
* finish is sufficient.
*/
flush_workqueue(srp_remove_wq);
put_device(&host->dev);
}
ib_dealloc_pd(srp_dev->pd);
kfree(srp_dev);
}
static struct srp_function_template ib_srp_transport_functions = {
.has_rport_state = true,
.reset_timer_if_blocked = true,
.reconnect_delay = &srp_reconnect_delay,
.fast_io_fail_tmo = &srp_fast_io_fail_tmo,
.dev_loss_tmo = &srp_dev_loss_tmo,
.reconnect = srp_rport_reconnect,
.rport_delete = srp_rport_delete,
.terminate_rport_io = srp_terminate_io,
};
static int __init srp_init_module(void)
{
int ret;
BUILD_BUG_ON(sizeof(struct srp_aer_req) != 36);
BUILD_BUG_ON(sizeof(struct srp_cmd) != 48);
BUILD_BUG_ON(sizeof(struct srp_imm_buf) != 4);
BUILD_BUG_ON(sizeof(struct srp_indirect_buf) != 20);
BUILD_BUG_ON(sizeof(struct srp_login_req) != 64);
BUILD_BUG_ON(sizeof(struct srp_login_req_rdma) != 56);
BUILD_BUG_ON(sizeof(struct srp_rsp) != 36);
if (srp_sg_tablesize) {
pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
if (!cmd_sg_entries)
cmd_sg_entries = srp_sg_tablesize;
}
if (!cmd_sg_entries)
cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
if (cmd_sg_entries > 255) {
pr_warn("Clamping cmd_sg_entries to 255\n");
cmd_sg_entries = 255;
}
if (!indirect_sg_entries)
indirect_sg_entries = cmd_sg_entries;
else if (indirect_sg_entries < cmd_sg_entries) {
pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
cmd_sg_entries);
indirect_sg_entries = cmd_sg_entries;
}
if (indirect_sg_entries > SG_MAX_SEGMENTS) {
pr_warn("Clamping indirect_sg_entries to %u\n",
SG_MAX_SEGMENTS);
indirect_sg_entries = SG_MAX_SEGMENTS;
}
srp_remove_wq = create_workqueue("srp_remove");
if (!srp_remove_wq) {
ret = -ENOMEM;
goto out;
}
ret = -ENOMEM;
ib_srp_transport_template =
srp_attach_transport(&ib_srp_transport_functions);
if (!ib_srp_transport_template)
goto destroy_wq;
ret = class_register(&srp_class);
if (ret) {
pr_err("couldn't register class infiniband_srp\n");
goto release_tr;
}
ib_sa_register_client(&srp_sa_client);
ret = ib_register_client(&srp_client);
if (ret) {
pr_err("couldn't register IB client\n");
goto unreg_sa;
}
out:
return ret;
unreg_sa:
ib_sa_unregister_client(&srp_sa_client);
class_unregister(&srp_class);
release_tr:
srp_release_transport(ib_srp_transport_template);
destroy_wq:
destroy_workqueue(srp_remove_wq);
goto out;
}
static void __exit srp_cleanup_module(void)
{
ib_unregister_client(&srp_client);
ib_sa_unregister_client(&srp_sa_client);
class_unregister(&srp_class);
srp_release_transport(ib_srp_transport_template);
destroy_workqueue(srp_remove_wq);
}
module_init(srp_init_module);
module_exit(srp_cleanup_module);
| linux-master | drivers/infiniband/ulp/srp/ib_srp.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Common code for Intel Running Average Power Limit (RAPL) support.
* Copyright (c) 2019, Intel Corporation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/log2.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
#include <linux/cpu.h>
#include <linux/powercap.h>
#include <linux/suspend.h>
#include <linux/intel_rapl.h>
#include <linux/processor.h>
#include <linux/platform_device.h>
#include <asm/iosf_mbi.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
/* bitmasks for RAPL MSRs, used by primitive access functions */
#define ENERGY_STATUS_MASK 0xffffffff
#define POWER_LIMIT1_MASK 0x7FFF
#define POWER_LIMIT1_ENABLE BIT(15)
#define POWER_LIMIT1_CLAMP BIT(16)
#define POWER_LIMIT2_MASK (0x7FFFULL<<32)
#define POWER_LIMIT2_ENABLE BIT_ULL(47)
#define POWER_LIMIT2_CLAMP BIT_ULL(48)
#define POWER_HIGH_LOCK BIT_ULL(63)
#define POWER_LOW_LOCK BIT(31)
#define POWER_LIMIT4_MASK 0x1FFF
#define TIME_WINDOW1_MASK (0x7FULL<<17)
#define TIME_WINDOW2_MASK (0x7FULL<<49)
#define POWER_UNIT_OFFSET 0
#define POWER_UNIT_MASK 0x0F
#define ENERGY_UNIT_OFFSET 0x08
#define ENERGY_UNIT_MASK 0x1F00
#define TIME_UNIT_OFFSET 0x10
#define TIME_UNIT_MASK 0xF0000
#define POWER_INFO_MAX_MASK (0x7fffULL<<32)
#define POWER_INFO_MIN_MASK (0x7fffULL<<16)
#define POWER_INFO_MAX_TIME_WIN_MASK (0x3fULL<<48)
#define POWER_INFO_THERMAL_SPEC_MASK 0x7fff
#define PERF_STATUS_THROTTLE_TIME_MASK 0xffffffff
#define PP_POLICY_MASK 0x1F
/*
* SPR has different layout for Psys Domain PowerLimit registers.
* There are 17 bits of PL1 and PL2 instead of 15 bits.
* The Enable bits and TimeWindow bits are also shifted as a result.
*/
#define PSYS_POWER_LIMIT1_MASK 0x1FFFF
#define PSYS_POWER_LIMIT1_ENABLE BIT(17)
#define PSYS_POWER_LIMIT2_MASK (0x1FFFFULL<<32)
#define PSYS_POWER_LIMIT2_ENABLE BIT_ULL(49)
#define PSYS_TIME_WINDOW1_MASK (0x7FULL<<19)
#define PSYS_TIME_WINDOW2_MASK (0x7FULL<<51)
/* bitmasks for RAPL TPMI, used by primitive access functions */
#define TPMI_POWER_LIMIT_MASK 0x3FFFF
#define TPMI_POWER_LIMIT_ENABLE BIT_ULL(62)
#define TPMI_TIME_WINDOW_MASK (0x7FULL<<18)
#define TPMI_INFO_SPEC_MASK 0x3FFFF
#define TPMI_INFO_MIN_MASK (0x3FFFFULL << 18)
#define TPMI_INFO_MAX_MASK (0x3FFFFULL << 36)
#define TPMI_INFO_MAX_TIME_WIN_MASK (0x7FULL << 54)
/* Non HW constants */
#define RAPL_PRIMITIVE_DERIVED BIT(1) /* not from raw data */
#define RAPL_PRIMITIVE_DUMMY BIT(2)
#define TIME_WINDOW_MAX_MSEC 40000
#define TIME_WINDOW_MIN_MSEC 250
#define ENERGY_UNIT_SCALE 1000 /* scale from driver unit to powercap unit */
enum unit_type {
ARBITRARY_UNIT, /* no translation */
POWER_UNIT,
ENERGY_UNIT,
TIME_UNIT,
};
/* per domain data, some are optional */
#define NR_RAW_PRIMITIVES (NR_RAPL_PRIMITIVES - 2)
#define DOMAIN_STATE_INACTIVE BIT(0)
#define DOMAIN_STATE_POWER_LIMIT_SET BIT(1)
static const char *pl_names[NR_POWER_LIMITS] = {
[POWER_LIMIT1] = "long_term",
[POWER_LIMIT2] = "short_term",
[POWER_LIMIT4] = "peak_power",
};
enum pl_prims {
PL_ENABLE,
PL_CLAMP,
PL_LIMIT,
PL_TIME_WINDOW,
PL_MAX_POWER,
PL_LOCK,
};
static bool is_pl_valid(struct rapl_domain *rd, int pl)
{
if (pl < POWER_LIMIT1 || pl > POWER_LIMIT4)
return false;
return rd->rpl[pl].name ? true : false;
}
static int get_pl_lock_prim(struct rapl_domain *rd, int pl)
{
if (rd->rp->priv->type == RAPL_IF_TPMI) {
if (pl == POWER_LIMIT1)
return PL1_LOCK;
if (pl == POWER_LIMIT2)
return PL2_LOCK;
if (pl == POWER_LIMIT4)
return PL4_LOCK;
}
/* MSR/MMIO Interface doesn't have Lock bit for PL4 */
if (pl == POWER_LIMIT4)
return -EINVAL;
/*
* Power Limit register that supports two power limits has a different
* bit position for the Lock bit.
*/
if (rd->rp->priv->limits[rd->id] & BIT(POWER_LIMIT2))
return FW_HIGH_LOCK;
return FW_LOCK;
}
static int get_pl_prim(struct rapl_domain *rd, int pl, enum pl_prims prim)
{
switch (pl) {
case POWER_LIMIT1:
if (prim == PL_ENABLE)
return PL1_ENABLE;
if (prim == PL_CLAMP && rd->rp->priv->type != RAPL_IF_TPMI)
return PL1_CLAMP;
if (prim == PL_LIMIT)
return POWER_LIMIT1;
if (prim == PL_TIME_WINDOW)
return TIME_WINDOW1;
if (prim == PL_MAX_POWER)
return THERMAL_SPEC_POWER;
if (prim == PL_LOCK)
return get_pl_lock_prim(rd, pl);
return -EINVAL;
case POWER_LIMIT2:
if (prim == PL_ENABLE)
return PL2_ENABLE;
if (prim == PL_CLAMP && rd->rp->priv->type != RAPL_IF_TPMI)
return PL2_CLAMP;
if (prim == PL_LIMIT)
return POWER_LIMIT2;
if (prim == PL_TIME_WINDOW)
return TIME_WINDOW2;
if (prim == PL_MAX_POWER)
return MAX_POWER;
if (prim == PL_LOCK)
return get_pl_lock_prim(rd, pl);
return -EINVAL;
case POWER_LIMIT4:
if (prim == PL_LIMIT)
return POWER_LIMIT4;
if (prim == PL_ENABLE)
return PL4_ENABLE;
/* PL4 would be around two times PL2, use same prim as PL2. */
if (prim == PL_MAX_POWER)
return MAX_POWER;
if (prim == PL_LOCK)
return get_pl_lock_prim(rd, pl);
return -EINVAL;
default:
return -EINVAL;
}
}
#define power_zone_to_rapl_domain(_zone) \
container_of(_zone, struct rapl_domain, power_zone)
struct rapl_defaults {
u8 floor_freq_reg_addr;
int (*check_unit)(struct rapl_domain *rd);
void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
u64 (*compute_time_window)(struct rapl_domain *rd, u64 val,
bool to_raw);
unsigned int dram_domain_energy_unit;
unsigned int psys_domain_energy_unit;
bool spr_psys_bits;
};
static struct rapl_defaults *defaults_msr;
static const struct rapl_defaults defaults_tpmi;
static struct rapl_defaults *get_defaults(struct rapl_package *rp)
{
return rp->priv->defaults;
}
/* Sideband MBI registers */
#define IOSF_CPU_POWER_BUDGET_CTL_BYT (0x2)
#define IOSF_CPU_POWER_BUDGET_CTL_TNG (0xdf)
#define PACKAGE_PLN_INT_SAVED BIT(0)
#define MAX_PRIM_NAME (32)
/* per domain data. used to describe individual knobs such that access function
* can be consolidated into one instead of many inline functions.
*/
struct rapl_primitive_info {
const char *name;
u64 mask;
int shift;
enum rapl_domain_reg_id id;
enum unit_type unit;
u32 flag;
};
#define PRIMITIVE_INFO_INIT(p, m, s, i, u, f) { \
.name = #p, \
.mask = m, \
.shift = s, \
.id = i, \
.unit = u, \
.flag = f \
}
static void rapl_init_domains(struct rapl_package *rp);
static int rapl_read_data_raw(struct rapl_domain *rd,
enum rapl_primitives prim,
bool xlate, u64 *data);
static int rapl_write_data_raw(struct rapl_domain *rd,
enum rapl_primitives prim,
unsigned long long value);
static int rapl_read_pl_data(struct rapl_domain *rd, int pl,
enum pl_prims pl_prim,
bool xlate, u64 *data);
static int rapl_write_pl_data(struct rapl_domain *rd, int pl,
enum pl_prims pl_prim,
unsigned long long value);
static u64 rapl_unit_xlate(struct rapl_domain *rd,
enum unit_type type, u64 value, int to_raw);
static void package_power_limit_irq_save(struct rapl_package *rp);
static LIST_HEAD(rapl_packages); /* guarded by CPU hotplug lock */
static const char *const rapl_domain_names[] = {
"package",
"core",
"uncore",
"dram",
"psys",
};
static int get_energy_counter(struct powercap_zone *power_zone,
u64 *energy_raw)
{
struct rapl_domain *rd;
u64 energy_now;
/* prevent CPU hotplug, make sure the RAPL domain does not go
* away while reading the counter.
*/
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
if (!rapl_read_data_raw(rd, ENERGY_COUNTER, true, &energy_now)) {
*energy_raw = energy_now;
cpus_read_unlock();
return 0;
}
cpus_read_unlock();
return -EIO;
}
static int get_max_energy_counter(struct powercap_zone *pcd_dev, u64 *energy)
{
struct rapl_domain *rd = power_zone_to_rapl_domain(pcd_dev);
*energy = rapl_unit_xlate(rd, ENERGY_UNIT, ENERGY_STATUS_MASK, 0);
return 0;
}
static int release_zone(struct powercap_zone *power_zone)
{
struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
struct rapl_package *rp = rd->rp;
/* package zone is the last zone of a package, we can free
* memory here since all children has been unregistered.
*/
if (rd->id == RAPL_DOMAIN_PACKAGE) {
kfree(rd);
rp->domains = NULL;
}
return 0;
}
static int find_nr_power_limit(struct rapl_domain *rd)
{
int i, nr_pl = 0;
for (i = 0; i < NR_POWER_LIMITS; i++) {
if (is_pl_valid(rd, i))
nr_pl++;
}
return nr_pl;
}
static int set_domain_enable(struct powercap_zone *power_zone, bool mode)
{
struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
struct rapl_defaults *defaults = get_defaults(rd->rp);
int ret;
cpus_read_lock();
ret = rapl_write_pl_data(rd, POWER_LIMIT1, PL_ENABLE, mode);
if (!ret && defaults->set_floor_freq)
defaults->set_floor_freq(rd, mode);
cpus_read_unlock();
return ret;
}
static int get_domain_enable(struct powercap_zone *power_zone, bool *mode)
{
struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone);
u64 val;
int ret;
if (rd->rpl[POWER_LIMIT1].locked) {
*mode = false;
return 0;
}
cpus_read_lock();
ret = rapl_read_pl_data(rd, POWER_LIMIT1, PL_ENABLE, true, &val);
if (!ret)
*mode = val;
cpus_read_unlock();
return ret;
}
/* per RAPL domain ops, in the order of rapl_domain_type */
static const struct powercap_zone_ops zone_ops[] = {
/* RAPL_DOMAIN_PACKAGE */
{
.get_energy_uj = get_energy_counter,
.get_max_energy_range_uj = get_max_energy_counter,
.release = release_zone,
.set_enable = set_domain_enable,
.get_enable = get_domain_enable,
},
/* RAPL_DOMAIN_PP0 */
{
.get_energy_uj = get_energy_counter,
.get_max_energy_range_uj = get_max_energy_counter,
.release = release_zone,
.set_enable = set_domain_enable,
.get_enable = get_domain_enable,
},
/* RAPL_DOMAIN_PP1 */
{
.get_energy_uj = get_energy_counter,
.get_max_energy_range_uj = get_max_energy_counter,
.release = release_zone,
.set_enable = set_domain_enable,
.get_enable = get_domain_enable,
},
/* RAPL_DOMAIN_DRAM */
{
.get_energy_uj = get_energy_counter,
.get_max_energy_range_uj = get_max_energy_counter,
.release = release_zone,
.set_enable = set_domain_enable,
.get_enable = get_domain_enable,
},
/* RAPL_DOMAIN_PLATFORM */
{
.get_energy_uj = get_energy_counter,
.get_max_energy_range_uj = get_max_energy_counter,
.release = release_zone,
.set_enable = set_domain_enable,
.get_enable = get_domain_enable,
},
};
/*
* Constraint index used by powercap can be different than power limit (PL)
* index in that some PLs maybe missing due to non-existent MSRs. So we
* need to convert here by finding the valid PLs only (name populated).
*/
static int contraint_to_pl(struct rapl_domain *rd, int cid)
{
int i, j;
for (i = POWER_LIMIT1, j = 0; i < NR_POWER_LIMITS; i++) {
if (is_pl_valid(rd, i) && j++ == cid) {
pr_debug("%s: index %d\n", __func__, i);
return i;
}
}
pr_err("Cannot find matching power limit for constraint %d\n", cid);
return -EINVAL;
}
static int set_power_limit(struct powercap_zone *power_zone, int cid,
u64 power_limit)
{
struct rapl_domain *rd;
struct rapl_package *rp;
int ret = 0;
int id;
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
rp = rd->rp;
ret = rapl_write_pl_data(rd, id, PL_LIMIT, power_limit);
if (!ret)
package_power_limit_irq_save(rp);
cpus_read_unlock();
return ret;
}
static int get_current_power_limit(struct powercap_zone *power_zone, int cid,
u64 *data)
{
struct rapl_domain *rd;
u64 val;
int ret = 0;
int id;
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
ret = rapl_read_pl_data(rd, id, PL_LIMIT, true, &val);
if (!ret)
*data = val;
cpus_read_unlock();
return ret;
}
static int set_time_window(struct powercap_zone *power_zone, int cid,
u64 window)
{
struct rapl_domain *rd;
int ret = 0;
int id;
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
ret = rapl_write_pl_data(rd, id, PL_TIME_WINDOW, window);
cpus_read_unlock();
return ret;
}
static int get_time_window(struct powercap_zone *power_zone, int cid,
u64 *data)
{
struct rapl_domain *rd;
u64 val;
int ret = 0;
int id;
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
ret = rapl_read_pl_data(rd, id, PL_TIME_WINDOW, true, &val);
if (!ret)
*data = val;
cpus_read_unlock();
return ret;
}
static const char *get_constraint_name(struct powercap_zone *power_zone,
int cid)
{
struct rapl_domain *rd;
int id;
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
if (id >= 0)
return rd->rpl[id].name;
return NULL;
}
static int get_max_power(struct powercap_zone *power_zone, int cid, u64 *data)
{
struct rapl_domain *rd;
u64 val;
int ret = 0;
int id;
cpus_read_lock();
rd = power_zone_to_rapl_domain(power_zone);
id = contraint_to_pl(rd, cid);
ret = rapl_read_pl_data(rd, id, PL_MAX_POWER, true, &val);
if (!ret)
*data = val;
/* As a generalization rule, PL4 would be around two times PL2. */
if (id == POWER_LIMIT4)
*data = *data * 2;
cpus_read_unlock();
return ret;
}
static const struct powercap_zone_constraint_ops constraint_ops = {
.set_power_limit_uw = set_power_limit,
.get_power_limit_uw = get_current_power_limit,
.set_time_window_us = set_time_window,
.get_time_window_us = get_time_window,
.get_max_power_uw = get_max_power,
.get_name = get_constraint_name,
};
/* Return the id used for read_raw/write_raw callback */
static int get_rid(struct rapl_package *rp)
{
return rp->lead_cpu >= 0 ? rp->lead_cpu : rp->id;
}
/* called after domain detection and package level data are set */
static void rapl_init_domains(struct rapl_package *rp)
{
enum rapl_domain_type i;
enum rapl_domain_reg_id j;
struct rapl_domain *rd = rp->domains;
for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
unsigned int mask = rp->domain_map & (1 << i);
int t;
if (!mask)
continue;
rd->rp = rp;
if (i == RAPL_DOMAIN_PLATFORM && rp->id > 0) {
snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "psys-%d",
rp->lead_cpu >= 0 ? topology_physical_package_id(rp->lead_cpu) :
rp->id);
} else {
snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "%s",
rapl_domain_names[i]);
}
rd->id = i;
/* PL1 is supported by default */
rp->priv->limits[i] |= BIT(POWER_LIMIT1);
for (t = POWER_LIMIT1; t < NR_POWER_LIMITS; t++) {
if (rp->priv->limits[i] & BIT(t))
rd->rpl[t].name = pl_names[t];
}
for (j = 0; j < RAPL_DOMAIN_REG_MAX; j++)
rd->regs[j] = rp->priv->regs[i][j];
rd++;
}
}
static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type,
u64 value, int to_raw)
{
u64 units = 1;
struct rapl_defaults *defaults = get_defaults(rd->rp);
u64 scale = 1;
switch (type) {
case POWER_UNIT:
units = rd->power_unit;
break;
case ENERGY_UNIT:
scale = ENERGY_UNIT_SCALE;
units = rd->energy_unit;
break;
case TIME_UNIT:
return defaults->compute_time_window(rd, value, to_raw);
case ARBITRARY_UNIT:
default:
return value;
}
if (to_raw)
return div64_u64(value, units) * scale;
value *= units;
return div64_u64(value, scale);
}
/* RAPL primitives for MSR and MMIO I/F */
static struct rapl_primitive_info rpi_msr[NR_RAPL_PRIMITIVES] = {
/* name, mask, shift, msr index, unit divisor */
[POWER_LIMIT1] = PRIMITIVE_INFO_INIT(POWER_LIMIT1, POWER_LIMIT1_MASK, 0,
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
[POWER_LIMIT2] = PRIMITIVE_INFO_INIT(POWER_LIMIT2, POWER_LIMIT2_MASK, 32,
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
[POWER_LIMIT4] = PRIMITIVE_INFO_INIT(POWER_LIMIT4, POWER_LIMIT4_MASK, 0,
RAPL_DOMAIN_REG_PL4, POWER_UNIT, 0),
[ENERGY_COUNTER] = PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
RAPL_DOMAIN_REG_STATUS, ENERGY_UNIT, 0),
[FW_LOCK] = PRIMITIVE_INFO_INIT(FW_LOCK, POWER_LOW_LOCK, 31,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[FW_HIGH_LOCK] = PRIMITIVE_INFO_INIT(FW_LOCK, POWER_HIGH_LOCK, 63,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[PL1_ENABLE] = PRIMITIVE_INFO_INIT(PL1_ENABLE, POWER_LIMIT1_ENABLE, 15,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[PL1_CLAMP] = PRIMITIVE_INFO_INIT(PL1_CLAMP, POWER_LIMIT1_CLAMP, 16,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[PL2_ENABLE] = PRIMITIVE_INFO_INIT(PL2_ENABLE, POWER_LIMIT2_ENABLE, 47,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[PL2_CLAMP] = PRIMITIVE_INFO_INIT(PL2_CLAMP, POWER_LIMIT2_CLAMP, 48,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TIME_WINDOW1_MASK, 17,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
[TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TIME_WINDOW2_MASK, 49,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
[THERMAL_SPEC_POWER] = PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, POWER_INFO_THERMAL_SPEC_MASK,
0, RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
[MAX_POWER] = PRIMITIVE_INFO_INIT(MAX_POWER, POWER_INFO_MAX_MASK, 32,
RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
[MIN_POWER] = PRIMITIVE_INFO_INIT(MIN_POWER, POWER_INFO_MIN_MASK, 16,
RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
[MAX_TIME_WINDOW] = PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, POWER_INFO_MAX_TIME_WIN_MASK, 48,
RAPL_DOMAIN_REG_INFO, TIME_UNIT, 0),
[THROTTLED_TIME] = PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
RAPL_DOMAIN_REG_PERF, TIME_UNIT, 0),
[PRIORITY_LEVEL] = PRIMITIVE_INFO_INIT(PRIORITY_LEVEL, PP_POLICY_MASK, 0,
RAPL_DOMAIN_REG_POLICY, ARBITRARY_UNIT, 0),
[PSYS_POWER_LIMIT1] = PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT1, PSYS_POWER_LIMIT1_MASK, 0,
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
[PSYS_POWER_LIMIT2] = PRIMITIVE_INFO_INIT(PSYS_POWER_LIMIT2, PSYS_POWER_LIMIT2_MASK, 32,
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
[PSYS_PL1_ENABLE] = PRIMITIVE_INFO_INIT(PSYS_PL1_ENABLE, PSYS_POWER_LIMIT1_ENABLE, 17,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[PSYS_PL2_ENABLE] = PRIMITIVE_INFO_INIT(PSYS_PL2_ENABLE, PSYS_POWER_LIMIT2_ENABLE, 49,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[PSYS_TIME_WINDOW1] = PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW1, PSYS_TIME_WINDOW1_MASK, 19,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
[PSYS_TIME_WINDOW2] = PRIMITIVE_INFO_INIT(PSYS_TIME_WINDOW2, PSYS_TIME_WINDOW2_MASK, 51,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
/* non-hardware */
[AVERAGE_POWER] = PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0, POWER_UNIT,
RAPL_PRIMITIVE_DERIVED),
};
/* RAPL primitives for TPMI I/F */
static struct rapl_primitive_info rpi_tpmi[NR_RAPL_PRIMITIVES] = {
/* name, mask, shift, msr index, unit divisor */
[POWER_LIMIT1] = PRIMITIVE_INFO_INIT(POWER_LIMIT1, TPMI_POWER_LIMIT_MASK, 0,
RAPL_DOMAIN_REG_LIMIT, POWER_UNIT, 0),
[POWER_LIMIT2] = PRIMITIVE_INFO_INIT(POWER_LIMIT2, TPMI_POWER_LIMIT_MASK, 0,
RAPL_DOMAIN_REG_PL2, POWER_UNIT, 0),
[POWER_LIMIT4] = PRIMITIVE_INFO_INIT(POWER_LIMIT4, TPMI_POWER_LIMIT_MASK, 0,
RAPL_DOMAIN_REG_PL4, POWER_UNIT, 0),
[ENERGY_COUNTER] = PRIMITIVE_INFO_INIT(ENERGY_COUNTER, ENERGY_STATUS_MASK, 0,
RAPL_DOMAIN_REG_STATUS, ENERGY_UNIT, 0),
[PL1_LOCK] = PRIMITIVE_INFO_INIT(PL1_LOCK, POWER_HIGH_LOCK, 63,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[PL2_LOCK] = PRIMITIVE_INFO_INIT(PL2_LOCK, POWER_HIGH_LOCK, 63,
RAPL_DOMAIN_REG_PL2, ARBITRARY_UNIT, 0),
[PL4_LOCK] = PRIMITIVE_INFO_INIT(PL4_LOCK, POWER_HIGH_LOCK, 63,
RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
[PL1_ENABLE] = PRIMITIVE_INFO_INIT(PL1_ENABLE, TPMI_POWER_LIMIT_ENABLE, 62,
RAPL_DOMAIN_REG_LIMIT, ARBITRARY_UNIT, 0),
[PL2_ENABLE] = PRIMITIVE_INFO_INIT(PL2_ENABLE, TPMI_POWER_LIMIT_ENABLE, 62,
RAPL_DOMAIN_REG_PL2, ARBITRARY_UNIT, 0),
[PL4_ENABLE] = PRIMITIVE_INFO_INIT(PL4_ENABLE, TPMI_POWER_LIMIT_ENABLE, 62,
RAPL_DOMAIN_REG_PL4, ARBITRARY_UNIT, 0),
[TIME_WINDOW1] = PRIMITIVE_INFO_INIT(TIME_WINDOW1, TPMI_TIME_WINDOW_MASK, 18,
RAPL_DOMAIN_REG_LIMIT, TIME_UNIT, 0),
[TIME_WINDOW2] = PRIMITIVE_INFO_INIT(TIME_WINDOW2, TPMI_TIME_WINDOW_MASK, 18,
RAPL_DOMAIN_REG_PL2, TIME_UNIT, 0),
[THERMAL_SPEC_POWER] = PRIMITIVE_INFO_INIT(THERMAL_SPEC_POWER, TPMI_INFO_SPEC_MASK, 0,
RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
[MAX_POWER] = PRIMITIVE_INFO_INIT(MAX_POWER, TPMI_INFO_MAX_MASK, 36,
RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
[MIN_POWER] = PRIMITIVE_INFO_INIT(MIN_POWER, TPMI_INFO_MIN_MASK, 18,
RAPL_DOMAIN_REG_INFO, POWER_UNIT, 0),
[MAX_TIME_WINDOW] = PRIMITIVE_INFO_INIT(MAX_TIME_WINDOW, TPMI_INFO_MAX_TIME_WIN_MASK, 54,
RAPL_DOMAIN_REG_INFO, TIME_UNIT, 0),
[THROTTLED_TIME] = PRIMITIVE_INFO_INIT(THROTTLED_TIME, PERF_STATUS_THROTTLE_TIME_MASK, 0,
RAPL_DOMAIN_REG_PERF, TIME_UNIT, 0),
/* non-hardware */
[AVERAGE_POWER] = PRIMITIVE_INFO_INIT(AVERAGE_POWER, 0, 0, 0,
POWER_UNIT, RAPL_PRIMITIVE_DERIVED),
};
static struct rapl_primitive_info *get_rpi(struct rapl_package *rp, int prim)
{
struct rapl_primitive_info *rpi = rp->priv->rpi;
if (prim < 0 || prim > NR_RAPL_PRIMITIVES || !rpi)
return NULL;
return &rpi[prim];
}
static int rapl_config(struct rapl_package *rp)
{
switch (rp->priv->type) {
/* MMIO I/F shares the same register layout as MSR registers */
case RAPL_IF_MMIO:
case RAPL_IF_MSR:
rp->priv->defaults = (void *)defaults_msr;
rp->priv->rpi = (void *)rpi_msr;
break;
case RAPL_IF_TPMI:
rp->priv->defaults = (void *)&defaults_tpmi;
rp->priv->rpi = (void *)rpi_tpmi;
break;
default:
return -EINVAL;
}
return 0;
}
static enum rapl_primitives
prim_fixups(struct rapl_domain *rd, enum rapl_primitives prim)
{
struct rapl_defaults *defaults = get_defaults(rd->rp);
if (!defaults->spr_psys_bits)
return prim;
if (rd->id != RAPL_DOMAIN_PLATFORM)
return prim;
switch (prim) {
case POWER_LIMIT1:
return PSYS_POWER_LIMIT1;
case POWER_LIMIT2:
return PSYS_POWER_LIMIT2;
case PL1_ENABLE:
return PSYS_PL1_ENABLE;
case PL2_ENABLE:
return PSYS_PL2_ENABLE;
case TIME_WINDOW1:
return PSYS_TIME_WINDOW1;
case TIME_WINDOW2:
return PSYS_TIME_WINDOW2;
default:
return prim;
}
}
/* Read primitive data based on its related struct rapl_primitive_info.
* if xlate flag is set, return translated data based on data units, i.e.
* time, energy, and power.
* RAPL MSRs are non-architectual and are laid out not consistently across
* domains. Here we use primitive info to allow writing consolidated access
* functions.
* For a given primitive, it is processed by MSR mask and shift. Unit conversion
* is pre-assigned based on RAPL unit MSRs read at init time.
* 63-------------------------- 31--------------------------- 0
* | xxxxx (mask) |
* | |<- shift ----------------|
* 63-------------------------- 31--------------------------- 0
*/
static int rapl_read_data_raw(struct rapl_domain *rd,
enum rapl_primitives prim, bool xlate, u64 *data)
{
u64 value;
enum rapl_primitives prim_fixed = prim_fixups(rd, prim);
struct rapl_primitive_info *rpi = get_rpi(rd->rp, prim_fixed);
struct reg_action ra;
if (!rpi || !rpi->name || rpi->flag & RAPL_PRIMITIVE_DUMMY)
return -EINVAL;
ra.reg = rd->regs[rpi->id];
if (!ra.reg.val)
return -EINVAL;
/* non-hardware data are collected by the polling thread */
if (rpi->flag & RAPL_PRIMITIVE_DERIVED) {
*data = rd->rdd.primitives[prim];
return 0;
}
ra.mask = rpi->mask;
if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
pr_debug("failed to read reg 0x%llx for %s:%s\n", ra.reg.val, rd->rp->name, rd->name);
return -EIO;
}
value = ra.value >> rpi->shift;
if (xlate)
*data = rapl_unit_xlate(rd, rpi->unit, value, 0);
else
*data = value;
return 0;
}
/* Similar use of primitive info in the read counterpart */
static int rapl_write_data_raw(struct rapl_domain *rd,
enum rapl_primitives prim,
unsigned long long value)
{
enum rapl_primitives prim_fixed = prim_fixups(rd, prim);
struct rapl_primitive_info *rpi = get_rpi(rd->rp, prim_fixed);
u64 bits;
struct reg_action ra;
int ret;
if (!rpi || !rpi->name || rpi->flag & RAPL_PRIMITIVE_DUMMY)
return -EINVAL;
bits = rapl_unit_xlate(rd, rpi->unit, value, 1);
bits <<= rpi->shift;
bits &= rpi->mask;
memset(&ra, 0, sizeof(ra));
ra.reg = rd->regs[rpi->id];
ra.mask = rpi->mask;
ra.value = bits;
ret = rd->rp->priv->write_raw(get_rid(rd->rp), &ra);
return ret;
}
static int rapl_read_pl_data(struct rapl_domain *rd, int pl,
enum pl_prims pl_prim, bool xlate, u64 *data)
{
enum rapl_primitives prim = get_pl_prim(rd, pl, pl_prim);
if (!is_pl_valid(rd, pl))
return -EINVAL;
return rapl_read_data_raw(rd, prim, xlate, data);
}
static int rapl_write_pl_data(struct rapl_domain *rd, int pl,
enum pl_prims pl_prim,
unsigned long long value)
{
enum rapl_primitives prim = get_pl_prim(rd, pl, pl_prim);
if (!is_pl_valid(rd, pl))
return -EINVAL;
if (rd->rpl[pl].locked) {
pr_warn("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]);
return -EACCES;
}
return rapl_write_data_raw(rd, prim, value);
}
/*
* Raw RAPL data stored in MSRs are in certain scales. We need to
* convert them into standard units based on the units reported in
* the RAPL unit MSRs. This is specific to CPUs as the method to
* calculate units differ on different CPUs.
* We convert the units to below format based on CPUs.
* i.e.
* energy unit: picoJoules : Represented in picoJoules by default
* power unit : microWatts : Represented in milliWatts by default
* time unit : microseconds: Represented in seconds by default
*/
static int rapl_check_unit_core(struct rapl_domain *rd)
{
struct reg_action ra;
u32 value;
ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT];
ra.mask = ~0;
if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n",
ra.reg.val, rd->rp->name, rd->name);
return -ENODEV;
}
value = (ra.value & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
rd->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
value = (ra.value & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
rd->power_unit = 1000000 / (1 << value);
value = (ra.value & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
rd->time_unit = 1000000 / (1 << value);
pr_debug("Core CPU %s:%s energy=%dpJ, time=%dus, power=%duW\n",
rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit);
return 0;
}
static int rapl_check_unit_atom(struct rapl_domain *rd)
{
struct reg_action ra;
u32 value;
ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT];
ra.mask = ~0;
if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n",
ra.reg.val, rd->rp->name, rd->name);
return -ENODEV;
}
value = (ra.value & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
rd->energy_unit = ENERGY_UNIT_SCALE * 1 << value;
value = (ra.value & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
rd->power_unit = (1 << value) * 1000;
value = (ra.value & TIME_UNIT_MASK) >> TIME_UNIT_OFFSET;
rd->time_unit = 1000000 / (1 << value);
pr_debug("Atom %s:%s energy=%dpJ, time=%dus, power=%duW\n",
rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit);
return 0;
}
static void power_limit_irq_save_cpu(void *info)
{
u32 l, h = 0;
struct rapl_package *rp = (struct rapl_package *)info;
/* save the state of PLN irq mask bit before disabling it */
rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) {
rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE;
rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED;
}
l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
}
/* REVISIT:
* When package power limit is set artificially low by RAPL, LVT
* thermal interrupt for package power limit should be ignored
* since we are not really exceeding the real limit. The intention
* is to avoid excessive interrupts while we are trying to save power.
* A useful feature might be routing the package_power_limit interrupt
* to userspace via eventfd. once we have a usecase, this is simple
* to do by adding an atomic notifier.
*/
static void package_power_limit_irq_save(struct rapl_package *rp)
{
if (rp->lead_cpu < 0)
return;
if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
return;
smp_call_function_single(rp->lead_cpu, power_limit_irq_save_cpu, rp, 1);
}
/*
* Restore per package power limit interrupt enable state. Called from cpu
* hotplug code on package removal.
*/
static void package_power_limit_irq_restore(struct rapl_package *rp)
{
u32 l, h;
if (rp->lead_cpu < 0)
return;
if (!boot_cpu_has(X86_FEATURE_PTS) || !boot_cpu_has(X86_FEATURE_PLN))
return;
/* irq enable state not saved, nothing to restore */
if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED))
return;
rdmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, &l, &h);
if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE)
l |= PACKAGE_THERM_INT_PLN_ENABLE;
else
l &= ~PACKAGE_THERM_INT_PLN_ENABLE;
wrmsr_safe(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h);
}
static void set_floor_freq_default(struct rapl_domain *rd, bool mode)
{
int i;
/* always enable clamp such that p-state can go below OS requested
* range. power capping priority over guranteed frequency.
*/
rapl_write_pl_data(rd, POWER_LIMIT1, PL_CLAMP, mode);
for (i = POWER_LIMIT2; i < NR_POWER_LIMITS; i++) {
rapl_write_pl_data(rd, i, PL_ENABLE, mode);
rapl_write_pl_data(rd, i, PL_CLAMP, mode);
}
}
static void set_floor_freq_atom(struct rapl_domain *rd, bool enable)
{
static u32 power_ctrl_orig_val;
struct rapl_defaults *defaults = get_defaults(rd->rp);
u32 mdata;
if (!defaults->floor_freq_reg_addr) {
pr_err("Invalid floor frequency config register\n");
return;
}
if (!power_ctrl_orig_val)
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_CR_READ,
defaults->floor_freq_reg_addr,
&power_ctrl_orig_val);
mdata = power_ctrl_orig_val;
if (enable) {
mdata &= ~(0x7f << 8);
mdata |= 1 << 8;
}
iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_CR_WRITE,
defaults->floor_freq_reg_addr, mdata);
}
static u64 rapl_compute_time_window_core(struct rapl_domain *rd, u64 value,
bool to_raw)
{
u64 f, y; /* fraction and exp. used for time unit */
/*
* Special processing based on 2^Y*(1+F/4), refer
* to Intel Software Developer's manual Vol.3B: CH 14.9.3.
*/
if (!to_raw) {
f = (value & 0x60) >> 5;
y = value & 0x1f;
value = (1 << y) * (4 + f) * rd->time_unit / 4;
} else {
if (value < rd->time_unit)
return 0;
do_div(value, rd->time_unit);
y = ilog2(value);
/*
* The target hardware field is 7 bits wide, so return all ones
* if the exponent is too large.
*/
if (y > 0x1f)
return 0x7f;
f = div64_u64(4 * (value - (1ULL << y)), 1ULL << y);
value = (y & 0x1f) | ((f & 0x3) << 5);
}
return value;
}
static u64 rapl_compute_time_window_atom(struct rapl_domain *rd, u64 value,
bool to_raw)
{
/*
* Atom time unit encoding is straight forward val * time_unit,
* where time_unit is default to 1 sec. Never 0.
*/
if (!to_raw)
return (value) ? value * rd->time_unit : rd->time_unit;
value = div64_u64(value, rd->time_unit);
return value;
}
/* TPMI Unit register has different layout */
#define TPMI_POWER_UNIT_OFFSET POWER_UNIT_OFFSET
#define TPMI_POWER_UNIT_MASK POWER_UNIT_MASK
#define TPMI_ENERGY_UNIT_OFFSET 0x06
#define TPMI_ENERGY_UNIT_MASK 0x7C0
#define TPMI_TIME_UNIT_OFFSET 0x0C
#define TPMI_TIME_UNIT_MASK 0xF000
static int rapl_check_unit_tpmi(struct rapl_domain *rd)
{
struct reg_action ra;
u32 value;
ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT];
ra.mask = ~0;
if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) {
pr_err("Failed to read power unit REG 0x%llx on %s:%s, exit.\n",
ra.reg.val, rd->rp->name, rd->name);
return -ENODEV;
}
value = (ra.value & TPMI_ENERGY_UNIT_MASK) >> TPMI_ENERGY_UNIT_OFFSET;
rd->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value);
value = (ra.value & TPMI_POWER_UNIT_MASK) >> TPMI_POWER_UNIT_OFFSET;
rd->power_unit = 1000000 / (1 << value);
value = (ra.value & TPMI_TIME_UNIT_MASK) >> TPMI_TIME_UNIT_OFFSET;
rd->time_unit = 1000000 / (1 << value);
pr_debug("Core CPU %s:%s energy=%dpJ, time=%dus, power=%duW\n",
rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit);
return 0;
}
static const struct rapl_defaults defaults_tpmi = {
.check_unit = rapl_check_unit_tpmi,
/* Reuse existing logic, ignore the PL_CLAMP failures and enable all Power Limits */
.set_floor_freq = set_floor_freq_default,
.compute_time_window = rapl_compute_time_window_core,
};
static const struct rapl_defaults rapl_defaults_core = {
.floor_freq_reg_addr = 0,
.check_unit = rapl_check_unit_core,
.set_floor_freq = set_floor_freq_default,
.compute_time_window = rapl_compute_time_window_core,
};
static const struct rapl_defaults rapl_defaults_hsw_server = {
.check_unit = rapl_check_unit_core,
.set_floor_freq = set_floor_freq_default,
.compute_time_window = rapl_compute_time_window_core,
.dram_domain_energy_unit = 15300,
};
static const struct rapl_defaults rapl_defaults_spr_server = {
.check_unit = rapl_check_unit_core,
.set_floor_freq = set_floor_freq_default,
.compute_time_window = rapl_compute_time_window_core,
.psys_domain_energy_unit = 1000000000,
.spr_psys_bits = true,
};
static const struct rapl_defaults rapl_defaults_byt = {
.floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_BYT,
.check_unit = rapl_check_unit_atom,
.set_floor_freq = set_floor_freq_atom,
.compute_time_window = rapl_compute_time_window_atom,
};
static const struct rapl_defaults rapl_defaults_tng = {
.floor_freq_reg_addr = IOSF_CPU_POWER_BUDGET_CTL_TNG,
.check_unit = rapl_check_unit_atom,
.set_floor_freq = set_floor_freq_atom,
.compute_time_window = rapl_compute_time_window_atom,
};
static const struct rapl_defaults rapl_defaults_ann = {
.floor_freq_reg_addr = 0,
.check_unit = rapl_check_unit_atom,
.set_floor_freq = NULL,
.compute_time_window = rapl_compute_time_window_atom,
};
static const struct rapl_defaults rapl_defaults_cht = {
.floor_freq_reg_addr = 0,
.check_unit = rapl_check_unit_atom,
.set_floor_freq = NULL,
.compute_time_window = rapl_compute_time_window_atom,
};
static const struct rapl_defaults rapl_defaults_amd = {
.check_unit = rapl_check_unit_core,
};
static const struct x86_cpu_id rapl_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_G, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &rapl_defaults_hsw_server),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &rapl_defaults_hsw_server),
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &rapl_defaults_hsw_server),
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_NNPI, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &rapl_defaults_hsw_server),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &rapl_defaults_hsw_server),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &rapl_defaults_spr_server),
X86_MATCH_INTEL_FAM6_MODEL(LAKEFIELD, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, &rapl_defaults_byt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, &rapl_defaults_cht),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_MID, &rapl_defaults_tng),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT_MID, &rapl_defaults_ann),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &rapl_defaults_core),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &rapl_defaults_hsw_server),
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &rapl_defaults_hsw_server),
X86_MATCH_VENDOR_FAM(AMD, 0x17, &rapl_defaults_amd),
X86_MATCH_VENDOR_FAM(AMD, 0x19, &rapl_defaults_amd),
X86_MATCH_VENDOR_FAM(HYGON, 0x18, &rapl_defaults_amd),
{}
};
MODULE_DEVICE_TABLE(x86cpu, rapl_ids);
/* Read once for all raw primitive data for domains */
static void rapl_update_domain_data(struct rapl_package *rp)
{
int dmn, prim;
u64 val;
for (dmn = 0; dmn < rp->nr_domains; dmn++) {
pr_debug("update %s domain %s data\n", rp->name,
rp->domains[dmn].name);
/* exclude non-raw primitives */
for (prim = 0; prim < NR_RAW_PRIMITIVES; prim++) {
struct rapl_primitive_info *rpi = get_rpi(rp, prim);
if (!rapl_read_data_raw(&rp->domains[dmn], prim,
rpi->unit, &val))
rp->domains[dmn].rdd.primitives[prim] = val;
}
}
}
static int rapl_package_register_powercap(struct rapl_package *rp)
{
struct rapl_domain *rd;
struct powercap_zone *power_zone = NULL;
int nr_pl, ret;
/* Update the domain data of the new package */
rapl_update_domain_data(rp);
/* first we register package domain as the parent zone */
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
if (rd->id == RAPL_DOMAIN_PACKAGE) {
nr_pl = find_nr_power_limit(rd);
pr_debug("register package domain %s\n", rp->name);
power_zone = powercap_register_zone(&rd->power_zone,
rp->priv->control_type, rp->name,
NULL, &zone_ops[rd->id], nr_pl,
&constraint_ops);
if (IS_ERR(power_zone)) {
pr_debug("failed to register power zone %s\n",
rp->name);
return PTR_ERR(power_zone);
}
/* track parent zone in per package/socket data */
rp->power_zone = power_zone;
/* done, only one package domain per socket */
break;
}
}
if (!power_zone) {
pr_err("no package domain found, unknown topology!\n");
return -ENODEV;
}
/* now register domains as children of the socket/package */
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
struct powercap_zone *parent = rp->power_zone;
if (rd->id == RAPL_DOMAIN_PACKAGE)
continue;
if (rd->id == RAPL_DOMAIN_PLATFORM)
parent = NULL;
/* number of power limits per domain varies */
nr_pl = find_nr_power_limit(rd);
power_zone = powercap_register_zone(&rd->power_zone,
rp->priv->control_type,
rd->name, parent,
&zone_ops[rd->id], nr_pl,
&constraint_ops);
if (IS_ERR(power_zone)) {
pr_debug("failed to register power_zone, %s:%s\n",
rp->name, rd->name);
ret = PTR_ERR(power_zone);
goto err_cleanup;
}
}
return 0;
err_cleanup:
/*
* Clean up previously initialized domains within the package if we
* failed after the first domain setup.
*/
while (--rd >= rp->domains) {
pr_debug("unregister %s domain %s\n", rp->name, rd->name);
powercap_unregister_zone(rp->priv->control_type,
&rd->power_zone);
}
return ret;
}
static int rapl_check_domain(int domain, struct rapl_package *rp)
{
struct reg_action ra;
switch (domain) {
case RAPL_DOMAIN_PACKAGE:
case RAPL_DOMAIN_PP0:
case RAPL_DOMAIN_PP1:
case RAPL_DOMAIN_DRAM:
case RAPL_DOMAIN_PLATFORM:
ra.reg = rp->priv->regs[domain][RAPL_DOMAIN_REG_STATUS];
break;
default:
pr_err("invalid domain id %d\n", domain);
return -EINVAL;
}
/* make sure domain counters are available and contains non-zero
* values, otherwise skip it.
*/
ra.mask = ENERGY_STATUS_MASK;
if (rp->priv->read_raw(get_rid(rp), &ra) || !ra.value)
return -ENODEV;
return 0;
}
/*
* Get per domain energy/power/time unit.
* RAPL Interfaces without per domain unit register will use the package
* scope unit register to set per domain units.
*/
static int rapl_get_domain_unit(struct rapl_domain *rd)
{
struct rapl_defaults *defaults = get_defaults(rd->rp);
int ret;
if (!rd->regs[RAPL_DOMAIN_REG_UNIT].val) {
if (!rd->rp->priv->reg_unit.val) {
pr_err("No valid Unit register found\n");
return -ENODEV;
}
rd->regs[RAPL_DOMAIN_REG_UNIT] = rd->rp->priv->reg_unit;
}
if (!defaults->check_unit) {
pr_err("missing .check_unit() callback\n");
return -ENODEV;
}
ret = defaults->check_unit(rd);
if (ret)
return ret;
if (rd->id == RAPL_DOMAIN_DRAM && defaults->dram_domain_energy_unit)
rd->energy_unit = defaults->dram_domain_energy_unit;
if (rd->id == RAPL_DOMAIN_PLATFORM && defaults->psys_domain_energy_unit)
rd->energy_unit = defaults->psys_domain_energy_unit;
return 0;
}
/*
* Check if power limits are available. Two cases when they are not available:
* 1. Locked by BIOS, in this case we still provide read-only access so that
* users can see what limit is set by the BIOS.
* 2. Some CPUs make some domains monitoring only which means PLx MSRs may not
* exist at all. In this case, we do not show the constraints in powercap.
*
* Called after domains are detected and initialized.
*/
static void rapl_detect_powerlimit(struct rapl_domain *rd)
{
u64 val64;
int i;
for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) {
if (!rapl_read_pl_data(rd, i, PL_LOCK, false, &val64)) {
if (val64) {
rd->rpl[i].locked = true;
pr_info("%s:%s:%s locked by BIOS\n",
rd->rp->name, rd->name, pl_names[i]);
}
}
if (rapl_read_pl_data(rd, i, PL_LIMIT, false, &val64))
rd->rpl[i].name = NULL;
}
}
/* Detect active and valid domains for the given CPU, caller must
* ensure the CPU belongs to the targeted package and CPU hotlug is disabled.
*/
static int rapl_detect_domains(struct rapl_package *rp)
{
struct rapl_domain *rd;
int i;
for (i = 0; i < RAPL_DOMAIN_MAX; i++) {
/* use physical package id to read counters */
if (!rapl_check_domain(i, rp)) {
rp->domain_map |= 1 << i;
pr_info("Found RAPL domain %s\n", rapl_domain_names[i]);
}
}
rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX);
if (!rp->nr_domains) {
pr_debug("no valid rapl domains found in %s\n", rp->name);
return -ENODEV;
}
pr_debug("found %d domains on %s\n", rp->nr_domains, rp->name);
rp->domains = kcalloc(rp->nr_domains, sizeof(struct rapl_domain),
GFP_KERNEL);
if (!rp->domains)
return -ENOMEM;
rapl_init_domains(rp);
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
rapl_get_domain_unit(rd);
rapl_detect_powerlimit(rd);
}
return 0;
}
/* called from CPU hotplug notifier, hotplug lock held */
void rapl_remove_package(struct rapl_package *rp)
{
struct rapl_domain *rd, *rd_package = NULL;
package_power_limit_irq_restore(rp);
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
int i;
for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) {
rapl_write_pl_data(rd, i, PL_ENABLE, 0);
rapl_write_pl_data(rd, i, PL_CLAMP, 0);
}
if (rd->id == RAPL_DOMAIN_PACKAGE) {
rd_package = rd;
continue;
}
pr_debug("remove package, undo power limit on %s: %s\n",
rp->name, rd->name);
powercap_unregister_zone(rp->priv->control_type,
&rd->power_zone);
}
/* do parent zone last */
powercap_unregister_zone(rp->priv->control_type,
&rd_package->power_zone);
list_del(&rp->plist);
kfree(rp);
}
EXPORT_SYMBOL_GPL(rapl_remove_package);
/* caller to ensure CPU hotplug lock is held */
struct rapl_package *rapl_find_package_domain(int id, struct rapl_if_priv *priv, bool id_is_cpu)
{
struct rapl_package *rp;
int uid;
if (id_is_cpu)
uid = topology_logical_die_id(id);
else
uid = id;
list_for_each_entry(rp, &rapl_packages, plist) {
if (rp->id == uid
&& rp->priv->control_type == priv->control_type)
return rp;
}
return NULL;
}
EXPORT_SYMBOL_GPL(rapl_find_package_domain);
/* called from CPU hotplug notifier, hotplug lock held */
struct rapl_package *rapl_add_package(int id, struct rapl_if_priv *priv, bool id_is_cpu)
{
struct rapl_package *rp;
int ret;
rp = kzalloc(sizeof(struct rapl_package), GFP_KERNEL);
if (!rp)
return ERR_PTR(-ENOMEM);
if (id_is_cpu) {
rp->id = topology_logical_die_id(id);
rp->lead_cpu = id;
if (topology_max_die_per_package() > 1)
snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d-die-%d",
topology_physical_package_id(id), topology_die_id(id));
else
snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d",
topology_physical_package_id(id));
} else {
rp->id = id;
rp->lead_cpu = -1;
snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d", id);
}
rp->priv = priv;
ret = rapl_config(rp);
if (ret)
goto err_free_package;
/* check if the package contains valid domains */
if (rapl_detect_domains(rp)) {
ret = -ENODEV;
goto err_free_package;
}
ret = rapl_package_register_powercap(rp);
if (!ret) {
INIT_LIST_HEAD(&rp->plist);
list_add(&rp->plist, &rapl_packages);
return rp;
}
err_free_package:
kfree(rp->domains);
kfree(rp);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(rapl_add_package);
static void power_limit_state_save(void)
{
struct rapl_package *rp;
struct rapl_domain *rd;
int ret, i;
cpus_read_lock();
list_for_each_entry(rp, &rapl_packages, plist) {
if (!rp->power_zone)
continue;
rd = power_zone_to_rapl_domain(rp->power_zone);
for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++) {
ret = rapl_read_pl_data(rd, i, PL_LIMIT, true,
&rd->rpl[i].last_power_limit);
if (ret)
rd->rpl[i].last_power_limit = 0;
}
}
cpus_read_unlock();
}
static void power_limit_state_restore(void)
{
struct rapl_package *rp;
struct rapl_domain *rd;
int i;
cpus_read_lock();
list_for_each_entry(rp, &rapl_packages, plist) {
if (!rp->power_zone)
continue;
rd = power_zone_to_rapl_domain(rp->power_zone);
for (i = POWER_LIMIT1; i < NR_POWER_LIMITS; i++)
if (rd->rpl[i].last_power_limit)
rapl_write_pl_data(rd, i, PL_LIMIT,
rd->rpl[i].last_power_limit);
}
cpus_read_unlock();
}
static int rapl_pm_callback(struct notifier_block *nb,
unsigned long mode, void *_unused)
{
switch (mode) {
case PM_SUSPEND_PREPARE:
power_limit_state_save();
break;
case PM_POST_SUSPEND:
power_limit_state_restore();
break;
}
return NOTIFY_OK;
}
static struct notifier_block rapl_pm_notifier = {
.notifier_call = rapl_pm_callback,
};
static struct platform_device *rapl_msr_platdev;
static int __init rapl_init(void)
{
const struct x86_cpu_id *id;
int ret;
id = x86_match_cpu(rapl_ids);
if (id) {
defaults_msr = (struct rapl_defaults *)id->driver_data;
rapl_msr_platdev = platform_device_alloc("intel_rapl_msr", 0);
if (!rapl_msr_platdev)
return -ENOMEM;
ret = platform_device_add(rapl_msr_platdev);
if (ret) {
platform_device_put(rapl_msr_platdev);
return ret;
}
}
ret = register_pm_notifier(&rapl_pm_notifier);
if (ret && rapl_msr_platdev) {
platform_device_del(rapl_msr_platdev);
platform_device_put(rapl_msr_platdev);
}
return ret;
}
static void __exit rapl_exit(void)
{
platform_device_unregister(rapl_msr_platdev);
unregister_pm_notifier(&rapl_pm_notifier);
}
fs_initcall(rapl_init);
module_exit(rapl_exit);
MODULE_DESCRIPTION("Intel Runtime Average Power Limit (RAPL) common code");
MODULE_AUTHOR("Jacob Pan <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/powercap/intel_rapl_common.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Linaro Limited
*
* Author: Daniel Lezcano <[email protected]>
*
* The powercap based Dynamic Thermal Power Management framework
* provides to the userspace a consistent API to set the power limit
* on some devices.
*
* DTPM defines the functions to create a tree of constraints. Each
* parent node is a virtual description of the aggregation of the
* children. It propagates the constraints set at its level to its
* children and collect the children power information. The leaves of
* the tree are the real devices which have the ability to get their
* current power consumption and set their power limit.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dtpm.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/powercap.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include "dtpm_subsys.h"
#define DTPM_POWER_LIMIT_FLAG 0
static const char *constraint_name[] = {
"Instantaneous",
};
static DEFINE_MUTEX(dtpm_lock);
static struct powercap_control_type *pct;
static struct dtpm *root;
static int get_time_window_us(struct powercap_zone *pcz, int cid, u64 *window)
{
return -ENOSYS;
}
static int set_time_window_us(struct powercap_zone *pcz, int cid, u64 window)
{
return -ENOSYS;
}
static int get_max_power_range_uw(struct powercap_zone *pcz, u64 *max_power_uw)
{
struct dtpm *dtpm = to_dtpm(pcz);
*max_power_uw = dtpm->power_max - dtpm->power_min;
return 0;
}
static int __get_power_uw(struct dtpm *dtpm, u64 *power_uw)
{
struct dtpm *child;
u64 power;
int ret = 0;
if (dtpm->ops) {
*power_uw = dtpm->ops->get_power_uw(dtpm);
return 0;
}
*power_uw = 0;
list_for_each_entry(child, &dtpm->children, sibling) {
ret = __get_power_uw(child, &power);
if (ret)
break;
*power_uw += power;
}
return ret;
}
static int get_power_uw(struct powercap_zone *pcz, u64 *power_uw)
{
return __get_power_uw(to_dtpm(pcz), power_uw);
}
static void __dtpm_rebalance_weight(struct dtpm *dtpm)
{
struct dtpm *child;
list_for_each_entry(child, &dtpm->children, sibling) {
pr_debug("Setting weight '%d' for '%s'\n",
child->weight, child->zone.name);
child->weight = DIV64_U64_ROUND_CLOSEST(
child->power_max * 1024, dtpm->power_max);
__dtpm_rebalance_weight(child);
}
}
static void __dtpm_sub_power(struct dtpm *dtpm)
{
struct dtpm *parent = dtpm->parent;
while (parent) {
parent->power_min -= dtpm->power_min;
parent->power_max -= dtpm->power_max;
parent->power_limit -= dtpm->power_limit;
parent = parent->parent;
}
}
static void __dtpm_add_power(struct dtpm *dtpm)
{
struct dtpm *parent = dtpm->parent;
while (parent) {
parent->power_min += dtpm->power_min;
parent->power_max += dtpm->power_max;
parent->power_limit += dtpm->power_limit;
parent = parent->parent;
}
}
/**
* dtpm_update_power - Update the power on the dtpm
* @dtpm: a pointer to a dtpm structure to update
*
* Function to update the power values of the dtpm node specified in
* parameter. These new values will be propagated to the tree.
*
* Return: zero on success, -EINVAL if the values are inconsistent
*/
int dtpm_update_power(struct dtpm *dtpm)
{
int ret;
__dtpm_sub_power(dtpm);
ret = dtpm->ops->update_power_uw(dtpm);
if (ret)
pr_err("Failed to update power for '%s': %d\n",
dtpm->zone.name, ret);
if (!test_bit(DTPM_POWER_LIMIT_FLAG, &dtpm->flags))
dtpm->power_limit = dtpm->power_max;
__dtpm_add_power(dtpm);
if (root)
__dtpm_rebalance_weight(root);
return ret;
}
/**
* dtpm_release_zone - Cleanup when the node is released
* @pcz: a pointer to a powercap_zone structure
*
* Do some housecleaning and update the weight on the tree. The
* release will be denied if the node has children. This function must
* be called by the specific release callback of the different
* backends.
*
* Return: 0 on success, -EBUSY if there are children
*/
int dtpm_release_zone(struct powercap_zone *pcz)
{
struct dtpm *dtpm = to_dtpm(pcz);
struct dtpm *parent = dtpm->parent;
if (!list_empty(&dtpm->children))
return -EBUSY;
if (parent)
list_del(&dtpm->sibling);
__dtpm_sub_power(dtpm);
if (dtpm->ops)
dtpm->ops->release(dtpm);
else
kfree(dtpm);
return 0;
}
static int get_power_limit_uw(struct powercap_zone *pcz,
int cid, u64 *power_limit)
{
*power_limit = to_dtpm(pcz)->power_limit;
return 0;
}
/*
* Set the power limit on the nodes, the power limit is distributed
* given the weight of the children.
*
* The dtpm node lock must be held when calling this function.
*/
static int __set_power_limit_uw(struct dtpm *dtpm, int cid, u64 power_limit)
{
struct dtpm *child;
int ret = 0;
u64 power;
/*
* A max power limitation means we remove the power limit,
* otherwise we set a constraint and flag the dtpm node.
*/
if (power_limit == dtpm->power_max) {
clear_bit(DTPM_POWER_LIMIT_FLAG, &dtpm->flags);
} else {
set_bit(DTPM_POWER_LIMIT_FLAG, &dtpm->flags);
}
pr_debug("Setting power limit for '%s': %llu uW\n",
dtpm->zone.name, power_limit);
/*
* Only leaves of the dtpm tree has ops to get/set the power
*/
if (dtpm->ops) {
dtpm->power_limit = dtpm->ops->set_power_uw(dtpm, power_limit);
} else {
dtpm->power_limit = 0;
list_for_each_entry(child, &dtpm->children, sibling) {
/*
* Integer division rounding will inevitably
* lead to a different min or max value when
* set several times. In order to restore the
* initial value, we force the child's min or
* max power every time if the constraint is
* at the boundaries.
*/
if (power_limit == dtpm->power_max) {
power = child->power_max;
} else if (power_limit == dtpm->power_min) {
power = child->power_min;
} else {
power = DIV_ROUND_CLOSEST_ULL(
power_limit * child->weight, 1024);
}
pr_debug("Setting power limit for '%s': %llu uW\n",
child->zone.name, power);
ret = __set_power_limit_uw(child, cid, power);
if (!ret)
ret = get_power_limit_uw(&child->zone, cid, &power);
if (ret)
break;
dtpm->power_limit += power;
}
}
return ret;
}
static int set_power_limit_uw(struct powercap_zone *pcz,
int cid, u64 power_limit)
{
struct dtpm *dtpm = to_dtpm(pcz);
int ret;
/*
* Don't allow values outside of the power range previously
* set when initializing the power numbers.
*/
power_limit = clamp_val(power_limit, dtpm->power_min, dtpm->power_max);
ret = __set_power_limit_uw(dtpm, cid, power_limit);
pr_debug("%s: power limit: %llu uW, power max: %llu uW\n",
dtpm->zone.name, dtpm->power_limit, dtpm->power_max);
return ret;
}
static const char *get_constraint_name(struct powercap_zone *pcz, int cid)
{
return constraint_name[cid];
}
static int get_max_power_uw(struct powercap_zone *pcz, int id, u64 *max_power)
{
*max_power = to_dtpm(pcz)->power_max;
return 0;
}
static struct powercap_zone_constraint_ops constraint_ops = {
.set_power_limit_uw = set_power_limit_uw,
.get_power_limit_uw = get_power_limit_uw,
.set_time_window_us = set_time_window_us,
.get_time_window_us = get_time_window_us,
.get_max_power_uw = get_max_power_uw,
.get_name = get_constraint_name,
};
static struct powercap_zone_ops zone_ops = {
.get_max_power_range_uw = get_max_power_range_uw,
.get_power_uw = get_power_uw,
.release = dtpm_release_zone,
};
/**
* dtpm_init - Allocate and initialize a dtpm struct
* @dtpm: The dtpm struct pointer to be initialized
* @ops: The dtpm device specific ops, NULL for a virtual node
*/
void dtpm_init(struct dtpm *dtpm, struct dtpm_ops *ops)
{
if (dtpm) {
INIT_LIST_HEAD(&dtpm->children);
INIT_LIST_HEAD(&dtpm->sibling);
dtpm->weight = 1024;
dtpm->ops = ops;
}
}
/**
* dtpm_unregister - Unregister a dtpm node from the hierarchy tree
* @dtpm: a pointer to a dtpm structure corresponding to the node to be removed
*
* Call the underlying powercap unregister function. That will call
* the release callback of the powercap zone.
*/
void dtpm_unregister(struct dtpm *dtpm)
{
powercap_unregister_zone(pct, &dtpm->zone);
pr_debug("Unregistered dtpm node '%s'\n", dtpm->zone.name);
}
/**
* dtpm_register - Register a dtpm node in the hierarchy tree
* @name: a string specifying the name of the node
* @dtpm: a pointer to a dtpm structure corresponding to the new node
* @parent: a pointer to a dtpm structure corresponding to the parent node
*
* Create a dtpm node in the tree. If no parent is specified, the node
* is the root node of the hierarchy. If the root node already exists,
* then the registration will fail. The powercap controller must be
* initialized before calling this function.
*
* The dtpm structure must be initialized with the power numbers
* before calling this function.
*
* Return: zero on success, a negative value in case of error:
* -EAGAIN: the function is called before the framework is initialized.
* -EBUSY: the root node is already inserted
* -EINVAL: * there is no root node yet and @parent is specified
* * no all ops are defined
* * parent have ops which are reserved for leaves
* Other negative values are reported back from the powercap framework
*/
int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
{
struct powercap_zone *pcz;
if (!pct)
return -EAGAIN;
if (root && !parent)
return -EBUSY;
if (!root && parent)
return -EINVAL;
if (parent && parent->ops)
return -EINVAL;
if (!dtpm)
return -EINVAL;
if (dtpm->ops && !(dtpm->ops->set_power_uw &&
dtpm->ops->get_power_uw &&
dtpm->ops->update_power_uw &&
dtpm->ops->release))
return -EINVAL;
pcz = powercap_register_zone(&dtpm->zone, pct, name,
parent ? &parent->zone : NULL,
&zone_ops, MAX_DTPM_CONSTRAINTS,
&constraint_ops);
if (IS_ERR(pcz))
return PTR_ERR(pcz);
if (parent) {
list_add_tail(&dtpm->sibling, &parent->children);
dtpm->parent = parent;
} else {
root = dtpm;
}
if (dtpm->ops && !dtpm->ops->update_power_uw(dtpm)) {
__dtpm_add_power(dtpm);
dtpm->power_limit = dtpm->power_max;
}
pr_debug("Registered dtpm node '%s' / %llu-%llu uW, \n",
dtpm->zone.name, dtpm->power_min, dtpm->power_max);
return 0;
}
static struct dtpm *dtpm_setup_virtual(const struct dtpm_node *hierarchy,
struct dtpm *parent)
{
struct dtpm *dtpm;
int ret;
dtpm = kzalloc(sizeof(*dtpm), GFP_KERNEL);
if (!dtpm)
return ERR_PTR(-ENOMEM);
dtpm_init(dtpm, NULL);
ret = dtpm_register(hierarchy->name, dtpm, parent);
if (ret) {
pr_err("Failed to register dtpm node '%s': %d\n",
hierarchy->name, ret);
kfree(dtpm);
return ERR_PTR(ret);
}
return dtpm;
}
static struct dtpm *dtpm_setup_dt(const struct dtpm_node *hierarchy,
struct dtpm *parent)
{
struct device_node *np;
int i, ret;
np = of_find_node_by_path(hierarchy->name);
if (!np) {
pr_err("Failed to find '%s'\n", hierarchy->name);
return ERR_PTR(-ENXIO);
}
for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) {
if (!dtpm_subsys[i]->setup)
continue;
ret = dtpm_subsys[i]->setup(parent, np);
if (ret) {
pr_err("Failed to setup '%s': %d\n", dtpm_subsys[i]->name, ret);
of_node_put(np);
return ERR_PTR(ret);
}
}
of_node_put(np);
/*
* By returning a NULL pointer, we let know the caller there
* is no child for us as we are a leaf of the tree
*/
return NULL;
}
typedef struct dtpm * (*dtpm_node_callback_t)(const struct dtpm_node *, struct dtpm *);
static dtpm_node_callback_t dtpm_node_callback[] = {
[DTPM_NODE_VIRTUAL] = dtpm_setup_virtual,
[DTPM_NODE_DT] = dtpm_setup_dt,
};
static int dtpm_for_each_child(const struct dtpm_node *hierarchy,
const struct dtpm_node *it, struct dtpm *parent)
{
struct dtpm *dtpm;
int i, ret;
for (i = 0; hierarchy[i].name; i++) {
if (hierarchy[i].parent != it)
continue;
dtpm = dtpm_node_callback[hierarchy[i].type](&hierarchy[i], parent);
/*
* A NULL pointer means there is no children, hence we
* continue without going deeper in the recursivity.
*/
if (!dtpm)
continue;
/*
* There are multiple reasons why the callback could
* fail. The generic glue is abstracting the backend
* and therefore it is not possible to report back or
* take a decision based on the error. In any case,
* if this call fails, it is not critical in the
* hierarchy creation, we can assume the underlying
* service is not found, so we continue without this
* branch in the tree but with a warning to log the
* information the node was not created.
*/
if (IS_ERR(dtpm)) {
pr_warn("Failed to create '%s' in the hierarchy\n",
hierarchy[i].name);
continue;
}
ret = dtpm_for_each_child(hierarchy, &hierarchy[i], dtpm);
if (ret)
return ret;
}
return 0;
}
/**
* dtpm_create_hierarchy - Create the dtpm hierarchy
* @hierarchy: An array of struct dtpm_node describing the hierarchy
*
* The function is called by the platform specific code with the
* description of the different node in the hierarchy. It creates the
* tree in the sysfs filesystem under the powercap dtpm entry.
*
* The expected tree has the format:
*
* struct dtpm_node hierarchy[] = {
* [0] { .name = "topmost", type = DTPM_NODE_VIRTUAL },
* [1] { .name = "package", .type = DTPM_NODE_VIRTUAL, .parent = &hierarchy[0] },
* [2] { .name = "/cpus/cpu0", .type = DTPM_NODE_DT, .parent = &hierarchy[1] },
* [3] { .name = "/cpus/cpu1", .type = DTPM_NODE_DT, .parent = &hierarchy[1] },
* [4] { .name = "/cpus/cpu2", .type = DTPM_NODE_DT, .parent = &hierarchy[1] },
* [5] { .name = "/cpus/cpu3", .type = DTPM_NODE_DT, .parent = &hierarchy[1] },
* [6] { }
* };
*
* The last element is always an empty one and marks the end of the
* array.
*
* Return: zero on success, a negative value in case of error. Errors
* are reported back from the underlying functions.
*/
int dtpm_create_hierarchy(struct of_device_id *dtpm_match_table)
{
const struct of_device_id *match;
const struct dtpm_node *hierarchy;
struct device_node *np;
int i, ret;
mutex_lock(&dtpm_lock);
if (pct) {
ret = -EBUSY;
goto out_unlock;
}
pct = powercap_register_control_type(NULL, "dtpm", NULL);
if (IS_ERR(pct)) {
pr_err("Failed to register control type\n");
ret = PTR_ERR(pct);
goto out_pct;
}
ret = -ENODEV;
np = of_find_node_by_path("/");
if (!np)
goto out_err;
match = of_match_node(dtpm_match_table, np);
of_node_put(np);
if (!match)
goto out_err;
hierarchy = match->data;
if (!hierarchy) {
ret = -EFAULT;
goto out_err;
}
ret = dtpm_for_each_child(hierarchy, NULL, NULL);
if (ret)
goto out_err;
for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) {
if (!dtpm_subsys[i]->init)
continue;
ret = dtpm_subsys[i]->init();
if (ret)
pr_info("Failed to initialize '%s': %d",
dtpm_subsys[i]->name, ret);
}
mutex_unlock(&dtpm_lock);
return 0;
out_err:
powercap_unregister_control_type(pct);
out_pct:
pct = NULL;
out_unlock:
mutex_unlock(&dtpm_lock);
return ret;
}
EXPORT_SYMBOL_GPL(dtpm_create_hierarchy);
static void __dtpm_destroy_hierarchy(struct dtpm *dtpm)
{
struct dtpm *child, *aux;
list_for_each_entry_safe(child, aux, &dtpm->children, sibling)
__dtpm_destroy_hierarchy(child);
/*
* At this point, we know all children were removed from the
* recursive call before
*/
dtpm_unregister(dtpm);
}
void dtpm_destroy_hierarchy(void)
{
int i;
mutex_lock(&dtpm_lock);
if (!pct)
goto out_unlock;
__dtpm_destroy_hierarchy(root);
for (i = 0; i < ARRAY_SIZE(dtpm_subsys); i++) {
if (!dtpm_subsys[i]->exit)
continue;
dtpm_subsys[i]->exit();
}
powercap_unregister_control_type(pct);
pct = NULL;
root = NULL;
out_unlock:
mutex_unlock(&dtpm_lock);
}
EXPORT_SYMBOL_GPL(dtpm_destroy_hierarchy);
| linux-master | drivers/powercap/dtpm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Power capping class
* Copyright (c) 2013, Intel Corporation.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/kstrtox.h>
#include <linux/slab.h>
#include <linux/powercap.h>
#define to_powercap_zone(n) container_of(n, struct powercap_zone, dev)
#define to_powercap_control_type(n) \
container_of(n, struct powercap_control_type, dev)
/* Power zone show function */
#define define_power_zone_show(_attr) \
static ssize_t _attr##_show(struct device *dev, \
struct device_attribute *dev_attr,\
char *buf) \
{ \
u64 value; \
ssize_t len = -EINVAL; \
struct powercap_zone *power_zone = to_powercap_zone(dev); \
\
if (power_zone->ops->get_##_attr) { \
if (!power_zone->ops->get_##_attr(power_zone, &value)) \
len = sprintf(buf, "%lld\n", value); \
} \
\
return len; \
}
/* The only meaningful input is 0 (reset), others are silently ignored */
#define define_power_zone_store(_attr) \
static ssize_t _attr##_store(struct device *dev,\
struct device_attribute *dev_attr, \
const char *buf, size_t count) \
{ \
int err; \
struct powercap_zone *power_zone = to_powercap_zone(dev); \
u64 value; \
\
err = kstrtoull(buf, 10, &value); \
if (err) \
return -EINVAL; \
if (value) \
return count; \
if (power_zone->ops->reset_##_attr) { \
if (!power_zone->ops->reset_##_attr(power_zone)) \
return count; \
} \
\
return -EINVAL; \
}
/* Power zone constraint show function */
#define define_power_zone_constraint_show(_attr) \
static ssize_t show_constraint_##_attr(struct device *dev, \
struct device_attribute *dev_attr,\
char *buf) \
{ \
u64 value; \
ssize_t len = -ENODATA; \
struct powercap_zone *power_zone = to_powercap_zone(dev); \
int id; \
struct powercap_zone_constraint *pconst;\
\
if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
return -EINVAL; \
if (id >= power_zone->const_id_cnt) \
return -EINVAL; \
pconst = &power_zone->constraints[id]; \
if (pconst && pconst->ops && pconst->ops->get_##_attr) { \
if (!pconst->ops->get_##_attr(power_zone, id, &value)) \
len = sprintf(buf, "%lld\n", value); \
} \
\
return len; \
}
/* Power zone constraint store function */
#define define_power_zone_constraint_store(_attr) \
static ssize_t store_constraint_##_attr(struct device *dev,\
struct device_attribute *dev_attr, \
const char *buf, size_t count) \
{ \
int err; \
u64 value; \
struct powercap_zone *power_zone = to_powercap_zone(dev); \
int id; \
struct powercap_zone_constraint *pconst;\
\
if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \
return -EINVAL; \
if (id >= power_zone->const_id_cnt) \
return -EINVAL; \
pconst = &power_zone->constraints[id]; \
err = kstrtoull(buf, 10, &value); \
if (err) \
return -EINVAL; \
if (pconst && pconst->ops && pconst->ops->set_##_attr) { \
if (!pconst->ops->set_##_attr(power_zone, id, value)) \
return count; \
} \
\
return -ENODATA; \
}
/* Power zone information callbacks */
define_power_zone_show(power_uw);
define_power_zone_show(max_power_range_uw);
define_power_zone_show(energy_uj);
define_power_zone_store(energy_uj);
define_power_zone_show(max_energy_range_uj);
/* Power zone attributes */
static DEVICE_ATTR_RO(max_power_range_uw);
static DEVICE_ATTR_RO(power_uw);
static DEVICE_ATTR_RO(max_energy_range_uj);
static DEVICE_ATTR_RW(energy_uj);
/* Power zone constraint attributes callbacks */
define_power_zone_constraint_show(power_limit_uw);
define_power_zone_constraint_store(power_limit_uw);
define_power_zone_constraint_show(time_window_us);
define_power_zone_constraint_store(time_window_us);
define_power_zone_constraint_show(max_power_uw);
define_power_zone_constraint_show(min_power_uw);
define_power_zone_constraint_show(max_time_window_us);
define_power_zone_constraint_show(min_time_window_us);
/* For one time seeding of constraint device attributes */
struct powercap_constraint_attr {
struct device_attribute power_limit_attr;
struct device_attribute time_window_attr;
struct device_attribute max_power_attr;
struct device_attribute min_power_attr;
struct device_attribute max_time_window_attr;
struct device_attribute min_time_window_attr;
struct device_attribute name_attr;
};
static struct powercap_constraint_attr
constraint_attrs[MAX_CONSTRAINTS_PER_ZONE];
/* A list of powercap control_types */
static LIST_HEAD(powercap_cntrl_list);
/* Mutex to protect list of powercap control_types */
static DEFINE_MUTEX(powercap_cntrl_list_lock);
#define POWERCAP_CONSTRAINT_NAME_LEN 30 /* Some limit to avoid overflow */
static ssize_t show_constraint_name(struct device *dev,
struct device_attribute *dev_attr,
char *buf)
{
const char *name;
struct powercap_zone *power_zone = to_powercap_zone(dev);
int id;
ssize_t len = -ENODATA;
struct powercap_zone_constraint *pconst;
if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id))
return -EINVAL;
if (id >= power_zone->const_id_cnt)
return -EINVAL;
pconst = &power_zone->constraints[id];
if (pconst && pconst->ops && pconst->ops->get_name) {
name = pconst->ops->get_name(power_zone, id);
if (name) {
sprintf(buf, "%.*s\n", POWERCAP_CONSTRAINT_NAME_LEN - 1,
name);
len = strlen(buf);
}
}
return len;
}
static int create_constraint_attribute(int id, const char *name,
int mode,
struct device_attribute *dev_attr,
ssize_t (*show)(struct device *,
struct device_attribute *, char *),
ssize_t (*store)(struct device *,
struct device_attribute *,
const char *, size_t)
)
{
dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s",
id, name);
if (!dev_attr->attr.name)
return -ENOMEM;
dev_attr->attr.mode = mode;
dev_attr->show = show;
dev_attr->store = store;
return 0;
}
static void free_constraint_attributes(void)
{
int i;
for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
kfree(constraint_attrs[i].power_limit_attr.attr.name);
kfree(constraint_attrs[i].time_window_attr.attr.name);
kfree(constraint_attrs[i].name_attr.attr.name);
kfree(constraint_attrs[i].max_power_attr.attr.name);
kfree(constraint_attrs[i].min_power_attr.attr.name);
kfree(constraint_attrs[i].max_time_window_attr.attr.name);
kfree(constraint_attrs[i].min_time_window_attr.attr.name);
}
}
static int seed_constraint_attributes(void)
{
int i;
int ret;
for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) {
ret = create_constraint_attribute(i, "power_limit_uw",
S_IWUSR | S_IRUGO,
&constraint_attrs[i].power_limit_attr,
show_constraint_power_limit_uw,
store_constraint_power_limit_uw);
if (ret)
goto err_alloc;
ret = create_constraint_attribute(i, "time_window_us",
S_IWUSR | S_IRUGO,
&constraint_attrs[i].time_window_attr,
show_constraint_time_window_us,
store_constraint_time_window_us);
if (ret)
goto err_alloc;
ret = create_constraint_attribute(i, "name", S_IRUGO,
&constraint_attrs[i].name_attr,
show_constraint_name,
NULL);
if (ret)
goto err_alloc;
ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO,
&constraint_attrs[i].max_power_attr,
show_constraint_max_power_uw,
NULL);
if (ret)
goto err_alloc;
ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO,
&constraint_attrs[i].min_power_attr,
show_constraint_min_power_uw,
NULL);
if (ret)
goto err_alloc;
ret = create_constraint_attribute(i, "max_time_window_us",
S_IRUGO,
&constraint_attrs[i].max_time_window_attr,
show_constraint_max_time_window_us,
NULL);
if (ret)
goto err_alloc;
ret = create_constraint_attribute(i, "min_time_window_us",
S_IRUGO,
&constraint_attrs[i].min_time_window_attr,
show_constraint_min_time_window_us,
NULL);
if (ret)
goto err_alloc;
}
return 0;
err_alloc:
free_constraint_attributes();
return ret;
}
static int create_constraints(struct powercap_zone *power_zone,
int nr_constraints,
const struct powercap_zone_constraint_ops *const_ops)
{
int i;
int ret = 0;
int count;
struct powercap_zone_constraint *pconst;
if (!power_zone || !const_ops || !const_ops->get_power_limit_uw ||
!const_ops->set_power_limit_uw ||
!const_ops->get_time_window_us ||
!const_ops->set_time_window_us)
return -EINVAL;
count = power_zone->zone_attr_count;
for (i = 0; i < nr_constraints; ++i) {
pconst = &power_zone->constraints[i];
pconst->ops = const_ops;
pconst->id = power_zone->const_id_cnt;
power_zone->const_id_cnt++;
power_zone->zone_dev_attrs[count++] =
&constraint_attrs[i].power_limit_attr.attr;
power_zone->zone_dev_attrs[count++] =
&constraint_attrs[i].time_window_attr.attr;
if (pconst->ops->get_name)
power_zone->zone_dev_attrs[count++] =
&constraint_attrs[i].name_attr.attr;
if (pconst->ops->get_max_power_uw)
power_zone->zone_dev_attrs[count++] =
&constraint_attrs[i].max_power_attr.attr;
if (pconst->ops->get_min_power_uw)
power_zone->zone_dev_attrs[count++] =
&constraint_attrs[i].min_power_attr.attr;
if (pconst->ops->get_max_time_window_us)
power_zone->zone_dev_attrs[count++] =
&constraint_attrs[i].max_time_window_attr.attr;
if (pconst->ops->get_min_time_window_us)
power_zone->zone_dev_attrs[count++] =
&constraint_attrs[i].min_time_window_attr.attr;
}
power_zone->zone_attr_count = count;
return ret;
}
static bool control_type_valid(void *control_type)
{
struct powercap_control_type *pos = NULL;
bool found = false;
mutex_lock(&powercap_cntrl_list_lock);
list_for_each_entry(pos, &powercap_cntrl_list, node) {
if (pos == control_type) {
found = true;
break;
}
}
mutex_unlock(&powercap_cntrl_list_lock);
return found;
}
static ssize_t name_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct powercap_zone *power_zone = to_powercap_zone(dev);
return sprintf(buf, "%s\n", power_zone->name);
}
static DEVICE_ATTR_RO(name);
/* Create zone and attributes in sysfs */
static void create_power_zone_common_attributes(
struct powercap_zone *power_zone)
{
int count = 0;
power_zone->zone_dev_attrs[count++] = &dev_attr_name.attr;
if (power_zone->ops->get_max_energy_range_uj)
power_zone->zone_dev_attrs[count++] =
&dev_attr_max_energy_range_uj.attr;
if (power_zone->ops->get_energy_uj) {
if (power_zone->ops->reset_energy_uj)
dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUSR;
else
dev_attr_energy_uj.attr.mode = S_IRUSR;
power_zone->zone_dev_attrs[count++] =
&dev_attr_energy_uj.attr;
}
if (power_zone->ops->get_power_uw)
power_zone->zone_dev_attrs[count++] =
&dev_attr_power_uw.attr;
if (power_zone->ops->get_max_power_range_uw)
power_zone->zone_dev_attrs[count++] =
&dev_attr_max_power_range_uw.attr;
power_zone->zone_dev_attrs[count] = NULL;
power_zone->zone_attr_count = count;
}
static void powercap_release(struct device *dev)
{
bool allocated;
if (dev->parent) {
struct powercap_zone *power_zone = to_powercap_zone(dev);
/* Store flag as the release() may free memory */
allocated = power_zone->allocated;
/* Remove id from parent idr struct */
idr_remove(power_zone->parent_idr, power_zone->id);
/* Destroy idrs allocated for this zone */
idr_destroy(&power_zone->idr);
kfree(power_zone->name);
kfree(power_zone->zone_dev_attrs);
kfree(power_zone->constraints);
if (power_zone->ops->release)
power_zone->ops->release(power_zone);
if (allocated)
kfree(power_zone);
} else {
struct powercap_control_type *control_type =
to_powercap_control_type(dev);
/* Store flag as the release() may free memory */
allocated = control_type->allocated;
idr_destroy(&control_type->idr);
mutex_destroy(&control_type->lock);
if (control_type->ops && control_type->ops->release)
control_type->ops->release(control_type);
if (allocated)
kfree(control_type);
}
}
static ssize_t enabled_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
bool mode = true;
/* Default is enabled */
if (dev->parent) {
struct powercap_zone *power_zone = to_powercap_zone(dev);
if (power_zone->ops->get_enable)
if (power_zone->ops->get_enable(power_zone, &mode))
mode = false;
} else {
struct powercap_control_type *control_type =
to_powercap_control_type(dev);
if (control_type->ops && control_type->ops->get_enable)
if (control_type->ops->get_enable(control_type, &mode))
mode = false;
}
return sprintf(buf, "%d\n", mode);
}
static ssize_t enabled_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
bool mode;
if (kstrtobool(buf, &mode))
return -EINVAL;
if (dev->parent) {
struct powercap_zone *power_zone = to_powercap_zone(dev);
if (power_zone->ops->set_enable)
if (!power_zone->ops->set_enable(power_zone, mode))
return len;
} else {
struct powercap_control_type *control_type =
to_powercap_control_type(dev);
if (control_type->ops && control_type->ops->set_enable)
if (!control_type->ops->set_enable(control_type, mode))
return len;
}
return -ENOSYS;
}
static DEVICE_ATTR_RW(enabled);
static struct attribute *powercap_attrs[] = {
&dev_attr_enabled.attr,
NULL,
};
ATTRIBUTE_GROUPS(powercap);
static struct class powercap_class = {
.name = "powercap",
.dev_release = powercap_release,
.dev_groups = powercap_groups,
};
struct powercap_zone *powercap_register_zone(
struct powercap_zone *power_zone,
struct powercap_control_type *control_type,
const char *name,
struct powercap_zone *parent,
const struct powercap_zone_ops *ops,
int nr_constraints,
const struct powercap_zone_constraint_ops *const_ops)
{
int result;
int nr_attrs;
if (!name || !control_type || !ops ||
nr_constraints > MAX_CONSTRAINTS_PER_ZONE ||
(!ops->get_energy_uj && !ops->get_power_uw) ||
!control_type_valid(control_type))
return ERR_PTR(-EINVAL);
if (power_zone) {
if (!ops->release)
return ERR_PTR(-EINVAL);
memset(power_zone, 0, sizeof(*power_zone));
} else {
power_zone = kzalloc(sizeof(*power_zone), GFP_KERNEL);
if (!power_zone)
return ERR_PTR(-ENOMEM);
power_zone->allocated = true;
}
power_zone->ops = ops;
power_zone->control_type_inst = control_type;
if (!parent) {
power_zone->dev.parent = &control_type->dev;
power_zone->parent_idr = &control_type->idr;
} else {
power_zone->dev.parent = &parent->dev;
power_zone->parent_idr = &parent->idr;
}
power_zone->dev.class = &powercap_class;
mutex_lock(&control_type->lock);
/* Using idr to get the unique id */
result = idr_alloc(power_zone->parent_idr, NULL, 0, 0, GFP_KERNEL);
if (result < 0)
goto err_idr_alloc;
power_zone->id = result;
idr_init(&power_zone->idr);
result = -ENOMEM;
power_zone->name = kstrdup(name, GFP_KERNEL);
if (!power_zone->name)
goto err_name_alloc;
power_zone->constraints = kcalloc(nr_constraints,
sizeof(*power_zone->constraints),
GFP_KERNEL);
if (!power_zone->constraints)
goto err_const_alloc;
nr_attrs = nr_constraints * POWERCAP_CONSTRAINTS_ATTRS +
POWERCAP_ZONE_MAX_ATTRS + 1;
power_zone->zone_dev_attrs = kcalloc(nr_attrs, sizeof(void *),
GFP_KERNEL);
if (!power_zone->zone_dev_attrs)
goto err_attr_alloc;
create_power_zone_common_attributes(power_zone);
result = create_constraints(power_zone, nr_constraints, const_ops);
if (result)
goto err_dev_ret;
power_zone->zone_dev_attrs[power_zone->zone_attr_count] = NULL;
power_zone->dev_zone_attr_group.attrs = power_zone->zone_dev_attrs;
power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group;
power_zone->dev_attr_groups[1] = NULL;
power_zone->dev.groups = power_zone->dev_attr_groups;
dev_set_name(&power_zone->dev, "%s:%x",
dev_name(power_zone->dev.parent),
power_zone->id);
result = device_register(&power_zone->dev);
if (result) {
put_device(&power_zone->dev);
mutex_unlock(&control_type->lock);
return ERR_PTR(result);
}
control_type->nr_zones++;
mutex_unlock(&control_type->lock);
return power_zone;
err_dev_ret:
kfree(power_zone->zone_dev_attrs);
err_attr_alloc:
kfree(power_zone->constraints);
err_const_alloc:
kfree(power_zone->name);
err_name_alloc:
idr_remove(power_zone->parent_idr, power_zone->id);
err_idr_alloc:
if (power_zone->allocated)
kfree(power_zone);
mutex_unlock(&control_type->lock);
return ERR_PTR(result);
}
EXPORT_SYMBOL_GPL(powercap_register_zone);
int powercap_unregister_zone(struct powercap_control_type *control_type,
struct powercap_zone *power_zone)
{
if (!power_zone || !control_type)
return -EINVAL;
mutex_lock(&control_type->lock);
control_type->nr_zones--;
mutex_unlock(&control_type->lock);
device_unregister(&power_zone->dev);
return 0;
}
EXPORT_SYMBOL_GPL(powercap_unregister_zone);
struct powercap_control_type *powercap_register_control_type(
struct powercap_control_type *control_type,
const char *name,
const struct powercap_control_type_ops *ops)
{
int result;
if (!name)
return ERR_PTR(-EINVAL);
if (control_type) {
if (!ops || !ops->release)
return ERR_PTR(-EINVAL);
memset(control_type, 0, sizeof(*control_type));
} else {
control_type = kzalloc(sizeof(*control_type), GFP_KERNEL);
if (!control_type)
return ERR_PTR(-ENOMEM);
control_type->allocated = true;
}
mutex_init(&control_type->lock);
control_type->ops = ops;
INIT_LIST_HEAD(&control_type->node);
control_type->dev.class = &powercap_class;
dev_set_name(&control_type->dev, "%s", name);
result = device_register(&control_type->dev);
if (result) {
if (control_type->allocated)
kfree(control_type);
return ERR_PTR(result);
}
idr_init(&control_type->idr);
mutex_lock(&powercap_cntrl_list_lock);
list_add_tail(&control_type->node, &powercap_cntrl_list);
mutex_unlock(&powercap_cntrl_list_lock);
return control_type;
}
EXPORT_SYMBOL_GPL(powercap_register_control_type);
int powercap_unregister_control_type(struct powercap_control_type *control_type)
{
struct powercap_control_type *pos = NULL;
if (control_type->nr_zones) {
dev_err(&control_type->dev, "Zones of this type still not freed\n");
return -EINVAL;
}
mutex_lock(&powercap_cntrl_list_lock);
list_for_each_entry(pos, &powercap_cntrl_list, node) {
if (pos == control_type) {
list_del(&control_type->node);
mutex_unlock(&powercap_cntrl_list_lock);
device_unregister(&control_type->dev);
return 0;
}
}
mutex_unlock(&powercap_cntrl_list_lock);
return -ENODEV;
}
EXPORT_SYMBOL_GPL(powercap_unregister_control_type);
static int __init powercap_init(void)
{
int result;
result = seed_constraint_attributes();
if (result)
return result;
return class_register(&powercap_class);
}
fs_initcall(powercap_init);
MODULE_DESCRIPTION("PowerCap sysfs Driver");
MODULE_AUTHOR("Srinivas Pandruvada <[email protected]>");
| linux-master | drivers/powercap/powercap_sys.c |
// SPDX-License-Identifier: GPL-2.0
/*
* SCMI Powercap support.
*
* Copyright (C) 2022 ARM Ltd.
*/
#include <linux/device.h>
#include <linux/math.h>
#include <linux/limits.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/powercap.h>
#include <linux/scmi_protocol.h>
#include <linux/slab.h>
#define to_scmi_powercap_zone(z) \
container_of(z, struct scmi_powercap_zone, zone)
static const struct scmi_powercap_proto_ops *powercap_ops;
struct scmi_powercap_zone {
bool registered;
bool invalid;
unsigned int height;
struct device *dev;
struct scmi_protocol_handle *ph;
const struct scmi_powercap_info *info;
struct scmi_powercap_zone *spzones;
struct powercap_zone zone;
struct list_head node;
};
struct scmi_powercap_root {
unsigned int num_zones;
struct scmi_powercap_zone *spzones;
struct list_head *registered_zones;
struct list_head scmi_zones;
};
static struct powercap_control_type *scmi_top_pcntrl;
static int scmi_powercap_zone_release(struct powercap_zone *pz)
{
return 0;
}
static int scmi_powercap_get_max_power_range_uw(struct powercap_zone *pz,
u64 *max_power_range_uw)
{
*max_power_range_uw = U32_MAX;
return 0;
}
static int scmi_powercap_get_power_uw(struct powercap_zone *pz,
u64 *power_uw)
{
struct scmi_powercap_zone *spz = to_scmi_powercap_zone(pz);
u32 avg_power, pai;
int ret;
if (!spz->info->powercap_monitoring)
return -EINVAL;
ret = powercap_ops->measurements_get(spz->ph, spz->info->id, &avg_power,
&pai);
if (ret)
return ret;
*power_uw = avg_power;
if (spz->info->powercap_scale_mw)
*power_uw *= 1000;
return 0;
}
static int scmi_powercap_zone_enable_set(struct powercap_zone *pz, bool mode)
{
struct scmi_powercap_zone *spz = to_scmi_powercap_zone(pz);
return powercap_ops->cap_enable_set(spz->ph, spz->info->id, mode);
}
static int scmi_powercap_zone_enable_get(struct powercap_zone *pz, bool *mode)
{
struct scmi_powercap_zone *spz = to_scmi_powercap_zone(pz);
return powercap_ops->cap_enable_get(spz->ph, spz->info->id, mode);
}
static const struct powercap_zone_ops zone_ops = {
.get_max_power_range_uw = scmi_powercap_get_max_power_range_uw,
.get_power_uw = scmi_powercap_get_power_uw,
.release = scmi_powercap_zone_release,
.set_enable = scmi_powercap_zone_enable_set,
.get_enable = scmi_powercap_zone_enable_get,
};
static void scmi_powercap_normalize_cap(const struct scmi_powercap_zone *spz,
u64 power_limit_uw, u32 *norm)
{
bool scale_mw = spz->info->powercap_scale_mw;
u64 val;
val = scale_mw ? DIV_ROUND_UP_ULL(power_limit_uw, 1000) : power_limit_uw;
/*
* This cast is lossless since here @req_power is certain to be within
* the range [min_power_cap, max_power_cap] whose bounds are assured to
* be two unsigned 32bits quantities.
*/
*norm = clamp_t(u32, val, spz->info->min_power_cap,
spz->info->max_power_cap);
*norm = rounddown(*norm, spz->info->power_cap_step);
val = (scale_mw) ? *norm * 1000 : *norm;
if (power_limit_uw != val)
dev_dbg(spz->dev,
"Normalized %s:CAP - requested:%llu - normalized:%llu\n",
spz->info->name, power_limit_uw, val);
}
static int scmi_powercap_set_power_limit_uw(struct powercap_zone *pz, int cid,
u64 power_uw)
{
struct scmi_powercap_zone *spz = to_scmi_powercap_zone(pz);
u32 norm_power;
if (!spz->info->powercap_cap_config)
return -EINVAL;
scmi_powercap_normalize_cap(spz, power_uw, &norm_power);
return powercap_ops->cap_set(spz->ph, spz->info->id, norm_power, false);
}
static int scmi_powercap_get_power_limit_uw(struct powercap_zone *pz, int cid,
u64 *power_limit_uw)
{
struct scmi_powercap_zone *spz = to_scmi_powercap_zone(pz);
u32 power;
int ret;
ret = powercap_ops->cap_get(spz->ph, spz->info->id, &power);
if (ret)
return ret;
*power_limit_uw = power;
if (spz->info->powercap_scale_mw)
*power_limit_uw *= 1000;
return 0;
}
static void scmi_powercap_normalize_time(const struct scmi_powercap_zone *spz,
u64 time_us, u32 *norm)
{
/*
* This cast is lossless since here @time_us is certain to be within the
* range [min_pai, max_pai] whose bounds are assured to be two unsigned
* 32bits quantities.
*/
*norm = clamp_t(u32, time_us, spz->info->min_pai, spz->info->max_pai);
*norm = rounddown(*norm, spz->info->pai_step);
if (time_us != *norm)
dev_dbg(spz->dev,
"Normalized %s:PAI - requested:%llu - normalized:%u\n",
spz->info->name, time_us, *norm);
}
static int scmi_powercap_set_time_window_us(struct powercap_zone *pz, int cid,
u64 time_window_us)
{
struct scmi_powercap_zone *spz = to_scmi_powercap_zone(pz);
u32 norm_pai;
if (!spz->info->powercap_pai_config)
return -EINVAL;
scmi_powercap_normalize_time(spz, time_window_us, &norm_pai);
return powercap_ops->pai_set(spz->ph, spz->info->id, norm_pai);
}
static int scmi_powercap_get_time_window_us(struct powercap_zone *pz, int cid,
u64 *time_window_us)
{
struct scmi_powercap_zone *spz = to_scmi_powercap_zone(pz);
int ret;
u32 pai;
ret = powercap_ops->pai_get(spz->ph, spz->info->id, &pai);
if (ret)
return ret;
*time_window_us = pai;
return 0;
}
static int scmi_powercap_get_max_power_uw(struct powercap_zone *pz, int cid,
u64 *max_power_uw)
{
struct scmi_powercap_zone *spz = to_scmi_powercap_zone(pz);
*max_power_uw = spz->info->max_power_cap;
if (spz->info->powercap_scale_mw)
*max_power_uw *= 1000;
return 0;
}
static int scmi_powercap_get_min_power_uw(struct powercap_zone *pz, int cid,
u64 *min_power_uw)
{
struct scmi_powercap_zone *spz = to_scmi_powercap_zone(pz);
*min_power_uw = spz->info->min_power_cap;
if (spz->info->powercap_scale_mw)
*min_power_uw *= 1000;
return 0;
}
static int scmi_powercap_get_max_time_window_us(struct powercap_zone *pz,
int cid, u64 *time_window_us)
{
struct scmi_powercap_zone *spz = to_scmi_powercap_zone(pz);
*time_window_us = spz->info->max_pai;
return 0;
}
static int scmi_powercap_get_min_time_window_us(struct powercap_zone *pz,
int cid, u64 *time_window_us)
{
struct scmi_powercap_zone *spz = to_scmi_powercap_zone(pz);
*time_window_us = (u64)spz->info->min_pai;
return 0;
}
static const char *scmi_powercap_get_name(struct powercap_zone *pz, int cid)
{
return "SCMI power-cap";
}
static const struct powercap_zone_constraint_ops constraint_ops = {
.set_power_limit_uw = scmi_powercap_set_power_limit_uw,
.get_power_limit_uw = scmi_powercap_get_power_limit_uw,
.set_time_window_us = scmi_powercap_set_time_window_us,
.get_time_window_us = scmi_powercap_get_time_window_us,
.get_max_power_uw = scmi_powercap_get_max_power_uw,
.get_min_power_uw = scmi_powercap_get_min_power_uw,
.get_max_time_window_us = scmi_powercap_get_max_time_window_us,
.get_min_time_window_us = scmi_powercap_get_min_time_window_us,
.get_name = scmi_powercap_get_name,
};
static void scmi_powercap_unregister_all_zones(struct scmi_powercap_root *pr)
{
int i;
/* Un-register children zones first starting from the leaves */
for (i = pr->num_zones - 1; i >= 0; i--) {
if (!list_empty(&pr->registered_zones[i])) {
struct scmi_powercap_zone *spz;
list_for_each_entry(spz, &pr->registered_zones[i], node)
powercap_unregister_zone(scmi_top_pcntrl,
&spz->zone);
}
}
}
static inline unsigned int
scmi_powercap_get_zone_height(struct scmi_powercap_zone *spz)
{
if (spz->info->parent_id == SCMI_POWERCAP_ROOT_ZONE_ID)
return 0;
return spz->spzones[spz->info->parent_id].height + 1;
}
static inline struct scmi_powercap_zone *
scmi_powercap_get_parent_zone(struct scmi_powercap_zone *spz)
{
if (spz->info->parent_id == SCMI_POWERCAP_ROOT_ZONE_ID)
return NULL;
return &spz->spzones[spz->info->parent_id];
}
static int scmi_powercap_register_zone(struct scmi_powercap_root *pr,
struct scmi_powercap_zone *spz,
struct scmi_powercap_zone *parent)
{
int ret = 0;
struct powercap_zone *z;
if (spz->invalid) {
list_del(&spz->node);
return -EINVAL;
}
z = powercap_register_zone(&spz->zone, scmi_top_pcntrl, spz->info->name,
parent ? &parent->zone : NULL,
&zone_ops, 1, &constraint_ops);
if (!IS_ERR(z)) {
spz->height = scmi_powercap_get_zone_height(spz);
spz->registered = true;
list_move(&spz->node, &pr->registered_zones[spz->height]);
dev_dbg(spz->dev, "Registered node %s - parent %s - height:%d\n",
spz->info->name, parent ? parent->info->name : "ROOT",
spz->height);
} else {
list_del(&spz->node);
ret = PTR_ERR(z);
dev_err(spz->dev,
"Error registering node:%s - parent:%s - h:%d - ret:%d\n",
spz->info->name,
parent ? parent->info->name : "ROOT",
spz->height, ret);
}
return ret;
}
/**
* scmi_zones_register- Register SCMI powercap zones starting from parent zones
*
* @dev: A reference to the SCMI device
* @pr: A reference to the root powercap zones descriptors
*
* When registering SCMI powercap zones with the powercap framework we should
* take care to always register zones starting from the root ones and to
* deregister starting from the leaves.
*
* Unfortunately we cannot assume that the array of available SCMI powercap
* zones provided by the SCMI platform firmware is built to comply with such
* requirement.
*
* This function, given the set of SCMI powercap zones to register, takes care
* to walk the SCMI powercap zones trees up to the root registering any
* unregistered parent zone before registering the child zones; at the same
* time each registered-zone height in such a tree is accounted for and each
* zone, once registered, is stored in the @registered_zones array that is
* indexed by zone height: this way will be trivial, at unregister time, to walk
* the @registered_zones array backward and unregister all the zones starting
* from the leaves, removing children zones before parents.
*
* While doing this, we prune away any zone marked as invalid (like the ones
* sporting an SCMI abstract power scale) as long as they are positioned as
* leaves in the SCMI powercap zones hierarchy: any non-leaf invalid zone causes
* the entire process to fail since we cannot assume the correctness of an SCMI
* powercap zones hierarchy if some of the internal nodes are missing.
*
* Note that the array of SCMI powercap zones as returned by the SCMI platform
* is known to be sane, i.e. zones relationships have been validated at the
* protocol layer.
*
* Return: 0 on Success
*/
static int scmi_zones_register(struct device *dev,
struct scmi_powercap_root *pr)
{
int ret = 0;
unsigned int sp = 0, reg_zones = 0;
struct scmi_powercap_zone *spz, **zones_stack;
zones_stack = kcalloc(pr->num_zones, sizeof(spz), GFP_KERNEL);
if (!zones_stack)
return -ENOMEM;
spz = list_first_entry_or_null(&pr->scmi_zones,
struct scmi_powercap_zone, node);
while (spz) {
struct scmi_powercap_zone *parent;
parent = scmi_powercap_get_parent_zone(spz);
if (parent && !parent->registered) {
zones_stack[sp++] = spz;
spz = parent;
} else {
ret = scmi_powercap_register_zone(pr, spz, parent);
if (!ret) {
reg_zones++;
} else if (sp) {
/* Failed to register a non-leaf zone.
* Bail-out.
*/
dev_err(dev,
"Failed to register non-leaf zone - ret:%d\n",
ret);
scmi_powercap_unregister_all_zones(pr);
reg_zones = 0;
goto out;
}
/* Pick next zone to process */
if (sp)
spz = zones_stack[--sp];
else
spz = list_first_entry_or_null(&pr->scmi_zones,
struct scmi_powercap_zone,
node);
}
}
out:
kfree(zones_stack);
dev_info(dev, "Registered %d SCMI Powercap domains !\n", reg_zones);
return ret;
}
static int scmi_powercap_probe(struct scmi_device *sdev)
{
int ret, i;
struct scmi_powercap_root *pr;
struct scmi_powercap_zone *spz;
struct scmi_protocol_handle *ph;
struct device *dev = &sdev->dev;
if (!sdev->handle)
return -ENODEV;
powercap_ops = sdev->handle->devm_protocol_get(sdev,
SCMI_PROTOCOL_POWERCAP,
&ph);
if (IS_ERR(powercap_ops))
return PTR_ERR(powercap_ops);
pr = devm_kzalloc(dev, sizeof(*pr), GFP_KERNEL);
if (!pr)
return -ENOMEM;
ret = powercap_ops->num_domains_get(ph);
if (ret < 0) {
dev_err(dev, "number of powercap domains not found\n");
return ret;
}
pr->num_zones = ret;
pr->spzones = devm_kcalloc(dev, pr->num_zones,
sizeof(*pr->spzones), GFP_KERNEL);
if (!pr->spzones)
return -ENOMEM;
/* Allocate for worst possible scenario of maximum tree height. */
pr->registered_zones = devm_kcalloc(dev, pr->num_zones,
sizeof(*pr->registered_zones),
GFP_KERNEL);
if (!pr->registered_zones)
return -ENOMEM;
INIT_LIST_HEAD(&pr->scmi_zones);
for (i = 0, spz = pr->spzones; i < pr->num_zones; i++, spz++) {
/*
* Powercap domains are validate by the protocol layer, i.e.
* when only non-NULL domains are returned here, whose
* parent_id is assured to point to another valid domain.
*/
spz->info = powercap_ops->info_get(ph, i);
spz->dev = dev;
spz->ph = ph;
spz->spzones = pr->spzones;
INIT_LIST_HEAD(&spz->node);
INIT_LIST_HEAD(&pr->registered_zones[i]);
list_add_tail(&spz->node, &pr->scmi_zones);
/*
* Forcibly skip powercap domains using an abstract scale.
* Note that only leaves domains can be skipped, so this could
* lead later to a global failure.
*/
if (!spz->info->powercap_scale_uw &&
!spz->info->powercap_scale_mw) {
dev_warn(dev,
"Abstract power scale not supported. Skip %s.\n",
spz->info->name);
spz->invalid = true;
continue;
}
}
/*
* Scan array of retrieved SCMI powercap domains and register them
* recursively starting from the root domains.
*/
ret = scmi_zones_register(dev, pr);
if (ret)
return ret;
dev_set_drvdata(dev, pr);
return ret;
}
static void scmi_powercap_remove(struct scmi_device *sdev)
{
struct device *dev = &sdev->dev;
struct scmi_powercap_root *pr = dev_get_drvdata(dev);
scmi_powercap_unregister_all_zones(pr);
}
static const struct scmi_device_id scmi_id_table[] = {
{ SCMI_PROTOCOL_POWERCAP, "powercap" },
{ },
};
MODULE_DEVICE_TABLE(scmi, scmi_id_table);
static struct scmi_driver scmi_powercap_driver = {
.name = "scmi-powercap",
.probe = scmi_powercap_probe,
.remove = scmi_powercap_remove,
.id_table = scmi_id_table,
};
static int __init scmi_powercap_init(void)
{
int ret;
scmi_top_pcntrl = powercap_register_control_type(NULL, "arm-scmi", NULL);
if (IS_ERR(scmi_top_pcntrl))
return PTR_ERR(scmi_top_pcntrl);
ret = scmi_register(&scmi_powercap_driver);
if (ret)
powercap_unregister_control_type(scmi_top_pcntrl);
return ret;
}
module_init(scmi_powercap_init);
static void __exit scmi_powercap_exit(void)
{
scmi_unregister(&scmi_powercap_driver);
powercap_unregister_control_type(scmi_top_pcntrl);
}
module_exit(scmi_powercap_exit);
MODULE_AUTHOR("Cristian Marussi <[email protected]>");
MODULE_DESCRIPTION("ARM SCMI Powercap driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/powercap/arm_scmi_powercap.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2021 Linaro Limited
*
* Author: Daniel Lezcano <[email protected]>
*
* The devfreq device combined with the energy model and the load can
* give an estimation of the power consumption as well as limiting the
* power.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpumask.h>
#include <linux/devfreq.h>
#include <linux/dtpm.h>
#include <linux/energy_model.h>
#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/units.h>
struct dtpm_devfreq {
struct dtpm dtpm;
struct dev_pm_qos_request qos_req;
struct devfreq *devfreq;
};
static struct dtpm_devfreq *to_dtpm_devfreq(struct dtpm *dtpm)
{
return container_of(dtpm, struct dtpm_devfreq, dtpm);
}
static int update_pd_power_uw(struct dtpm *dtpm)
{
struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
struct devfreq *devfreq = dtpm_devfreq->devfreq;
struct device *dev = devfreq->dev.parent;
struct em_perf_domain *pd = em_pd_get(dev);
dtpm->power_min = pd->table[0].power;
dtpm->power_min *= MICROWATT_PER_MILLIWATT;
dtpm->power_max = pd->table[pd->nr_perf_states - 1].power;
dtpm->power_max *= MICROWATT_PER_MILLIWATT;
return 0;
}
static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
{
struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
struct devfreq *devfreq = dtpm_devfreq->devfreq;
struct device *dev = devfreq->dev.parent;
struct em_perf_domain *pd = em_pd_get(dev);
unsigned long freq;
u64 power;
int i;
for (i = 0; i < pd->nr_perf_states; i++) {
power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
if (power > power_limit)
break;
}
freq = pd->table[i - 1].frequency;
dev_pm_qos_update_request(&dtpm_devfreq->qos_req, freq);
power_limit = pd->table[i - 1].power * MICROWATT_PER_MILLIWATT;
return power_limit;
}
static void _normalize_load(struct devfreq_dev_status *status)
{
if (status->total_time > 0xfffff) {
status->total_time >>= 10;
status->busy_time >>= 10;
}
status->busy_time <<= 10;
status->busy_time /= status->total_time ? : 1;
status->busy_time = status->busy_time ? : 1;
status->total_time = 1024;
}
static u64 get_pd_power_uw(struct dtpm *dtpm)
{
struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
struct devfreq *devfreq = dtpm_devfreq->devfreq;
struct device *dev = devfreq->dev.parent;
struct em_perf_domain *pd = em_pd_get(dev);
struct devfreq_dev_status status;
unsigned long freq;
u64 power;
int i;
mutex_lock(&devfreq->lock);
status = devfreq->last_status;
mutex_unlock(&devfreq->lock);
freq = DIV_ROUND_UP(status.current_frequency, HZ_PER_KHZ);
_normalize_load(&status);
for (i = 0; i < pd->nr_perf_states; i++) {
if (pd->table[i].frequency < freq)
continue;
power = pd->table[i].power * MICROWATT_PER_MILLIWATT;
power *= status.busy_time;
power >>= 10;
return power;
}
return 0;
}
static void pd_release(struct dtpm *dtpm)
{
struct dtpm_devfreq *dtpm_devfreq = to_dtpm_devfreq(dtpm);
if (dev_pm_qos_request_active(&dtpm_devfreq->qos_req))
dev_pm_qos_remove_request(&dtpm_devfreq->qos_req);
kfree(dtpm_devfreq);
}
static struct dtpm_ops dtpm_ops = {
.set_power_uw = set_pd_power_limit,
.get_power_uw = get_pd_power_uw,
.update_power_uw = update_pd_power_uw,
.release = pd_release,
};
static int __dtpm_devfreq_setup(struct devfreq *devfreq, struct dtpm *parent)
{
struct device *dev = devfreq->dev.parent;
struct dtpm_devfreq *dtpm_devfreq;
struct em_perf_domain *pd;
int ret = -ENOMEM;
pd = em_pd_get(dev);
if (!pd) {
ret = dev_pm_opp_of_register_em(dev, NULL);
if (ret) {
pr_err("No energy model available for '%s'\n", dev_name(dev));
return -EINVAL;
}
}
dtpm_devfreq = kzalloc(sizeof(*dtpm_devfreq), GFP_KERNEL);
if (!dtpm_devfreq)
return -ENOMEM;
dtpm_init(&dtpm_devfreq->dtpm, &dtpm_ops);
dtpm_devfreq->devfreq = devfreq;
ret = dtpm_register(dev_name(dev), &dtpm_devfreq->dtpm, parent);
if (ret) {
pr_err("Failed to register '%s': %d\n", dev_name(dev), ret);
kfree(dtpm_devfreq);
return ret;
}
ret = dev_pm_qos_add_request(dev, &dtpm_devfreq->qos_req,
DEV_PM_QOS_MAX_FREQUENCY,
PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
if (ret) {
pr_err("Failed to add QoS request: %d\n", ret);
goto out_dtpm_unregister;
}
dtpm_update_power(&dtpm_devfreq->dtpm);
return 0;
out_dtpm_unregister:
dtpm_unregister(&dtpm_devfreq->dtpm);
return ret;
}
static int dtpm_devfreq_setup(struct dtpm *dtpm, struct device_node *np)
{
struct devfreq *devfreq;
devfreq = devfreq_get_devfreq_by_node(np);
if (IS_ERR(devfreq))
return 0;
return __dtpm_devfreq_setup(devfreq, dtpm);
}
struct dtpm_subsys_ops dtpm_devfreq_ops = {
.name = KBUILD_MODNAME,
.setup = dtpm_devfreq_setup,
};
| linux-master | drivers/powercap/dtpm_devfreq.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright 2020 Linaro Limited
*
* Author: Daniel Lezcano <[email protected]>
*
* The DTPM CPU is based on the energy model. It hooks the CPU in the
* DTPM tree which in turns update the power number by propagating the
* power number from the CPU energy model information to the parents.
*
* The association between the power and the performance state, allows
* to set the power of the CPU at the OPP granularity.
*
* The CPU hotplug is supported and the power numbers will be updated
* if a CPU is hot plugged / unplugged.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
#include <linux/cpuhotplug.h>
#include <linux/dtpm.h>
#include <linux/energy_model.h>
#include <linux/of.h>
#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/units.h>
struct dtpm_cpu {
struct dtpm dtpm;
struct freq_qos_request qos_req;
int cpu;
};
static DEFINE_PER_CPU(struct dtpm_cpu *, dtpm_per_cpu);
static struct dtpm_cpu *to_dtpm_cpu(struct dtpm *dtpm)
{
return container_of(dtpm, struct dtpm_cpu, dtpm);
}
static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
{
struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
struct em_perf_domain *pd = em_cpu_get(dtpm_cpu->cpu);
struct cpumask cpus;
unsigned long freq;
u64 power;
int i, nr_cpus;
cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus));
nr_cpus = cpumask_weight(&cpus);
for (i = 0; i < pd->nr_perf_states; i++) {
power = pd->table[i].power * nr_cpus;
if (power > power_limit)
break;
}
freq = pd->table[i - 1].frequency;
freq_qos_update_request(&dtpm_cpu->qos_req, freq);
power_limit = pd->table[i - 1].power * nr_cpus;
return power_limit;
}
static u64 scale_pd_power_uw(struct cpumask *pd_mask, u64 power)
{
unsigned long max, sum_util = 0;
int cpu;
/*
* The capacity is the same for all CPUs belonging to
* the same perf domain.
*/
max = arch_scale_cpu_capacity(cpumask_first(pd_mask));
for_each_cpu_and(cpu, pd_mask, cpu_online_mask)
sum_util += sched_cpu_util(cpu);
return (power * ((sum_util << 10) / max)) >> 10;
}
static u64 get_pd_power_uw(struct dtpm *dtpm)
{
struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
struct em_perf_domain *pd;
struct cpumask *pd_mask;
unsigned long freq;
int i;
pd = em_cpu_get(dtpm_cpu->cpu);
pd_mask = em_span_cpus(pd);
freq = cpufreq_quick_get(dtpm_cpu->cpu);
for (i = 0; i < pd->nr_perf_states; i++) {
if (pd->table[i].frequency < freq)
continue;
return scale_pd_power_uw(pd_mask, pd->table[i].power *
MICROWATT_PER_MILLIWATT);
}
return 0;
}
static int update_pd_power_uw(struct dtpm *dtpm)
{
struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
struct em_perf_domain *em = em_cpu_get(dtpm_cpu->cpu);
struct cpumask cpus;
int nr_cpus;
cpumask_and(&cpus, cpu_online_mask, to_cpumask(em->cpus));
nr_cpus = cpumask_weight(&cpus);
dtpm->power_min = em->table[0].power;
dtpm->power_min *= MICROWATT_PER_MILLIWATT;
dtpm->power_min *= nr_cpus;
dtpm->power_max = em->table[em->nr_perf_states - 1].power;
dtpm->power_max *= MICROWATT_PER_MILLIWATT;
dtpm->power_max *= nr_cpus;
return 0;
}
static void pd_release(struct dtpm *dtpm)
{
struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
struct cpufreq_policy *policy;
if (freq_qos_request_active(&dtpm_cpu->qos_req))
freq_qos_remove_request(&dtpm_cpu->qos_req);
policy = cpufreq_cpu_get(dtpm_cpu->cpu);
if (policy) {
for_each_cpu(dtpm_cpu->cpu, policy->related_cpus)
per_cpu(dtpm_per_cpu, dtpm_cpu->cpu) = NULL;
}
kfree(dtpm_cpu);
}
static struct dtpm_ops dtpm_ops = {
.set_power_uw = set_pd_power_limit,
.get_power_uw = get_pd_power_uw,
.update_power_uw = update_pd_power_uw,
.release = pd_release,
};
static int cpuhp_dtpm_cpu_offline(unsigned int cpu)
{
struct dtpm_cpu *dtpm_cpu;
dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
if (dtpm_cpu)
dtpm_update_power(&dtpm_cpu->dtpm);
return 0;
}
static int cpuhp_dtpm_cpu_online(unsigned int cpu)
{
struct dtpm_cpu *dtpm_cpu;
dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
if (dtpm_cpu)
return dtpm_update_power(&dtpm_cpu->dtpm);
return 0;
}
static int __dtpm_cpu_setup(int cpu, struct dtpm *parent)
{
struct dtpm_cpu *dtpm_cpu;
struct cpufreq_policy *policy;
struct em_perf_domain *pd;
char name[CPUFREQ_NAME_LEN];
int ret = -ENOMEM;
dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
if (dtpm_cpu)
return 0;
policy = cpufreq_cpu_get(cpu);
if (!policy)
return 0;
pd = em_cpu_get(cpu);
if (!pd || em_is_artificial(pd))
return -EINVAL;
dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
if (!dtpm_cpu)
return -ENOMEM;
dtpm_init(&dtpm_cpu->dtpm, &dtpm_ops);
dtpm_cpu->cpu = cpu;
for_each_cpu(cpu, policy->related_cpus)
per_cpu(dtpm_per_cpu, cpu) = dtpm_cpu;
snprintf(name, sizeof(name), "cpu%d-cpufreq", dtpm_cpu->cpu);
ret = dtpm_register(name, &dtpm_cpu->dtpm, parent);
if (ret)
goto out_kfree_dtpm_cpu;
ret = freq_qos_add_request(&policy->constraints,
&dtpm_cpu->qos_req, FREQ_QOS_MAX,
pd->table[pd->nr_perf_states - 1].frequency);
if (ret)
goto out_dtpm_unregister;
return 0;
out_dtpm_unregister:
dtpm_unregister(&dtpm_cpu->dtpm);
dtpm_cpu = NULL;
out_kfree_dtpm_cpu:
for_each_cpu(cpu, policy->related_cpus)
per_cpu(dtpm_per_cpu, cpu) = NULL;
kfree(dtpm_cpu);
return ret;
}
static int dtpm_cpu_setup(struct dtpm *dtpm, struct device_node *np)
{
int cpu;
cpu = of_cpu_node_to_id(np);
if (cpu < 0)
return 0;
return __dtpm_cpu_setup(cpu, dtpm);
}
static int dtpm_cpu_init(void)
{
int ret;
/*
* The callbacks at CPU hotplug time are calling
* dtpm_update_power() which in turns calls update_pd_power().
*
* The function update_pd_power() uses the online mask to
* figure out the power consumption limits.
*
* At CPUHP_AP_ONLINE_DYN, the CPU is present in the CPU
* online mask when the cpuhp_dtpm_cpu_online function is
* called, but the CPU is still in the online mask for the
* tear down callback. So the power can not be updated when
* the CPU is unplugged.
*
* At CPUHP_AP_DTPM_CPU_DEAD, the situation is the opposite as
* above. The CPU online mask is not up to date when the CPU
* is plugged in.
*
* For this reason, we need to call the online and offline
* callbacks at different moments when the CPU online mask is
* consistent with the power numbers we want to update.
*/
ret = cpuhp_setup_state(CPUHP_AP_DTPM_CPU_DEAD, "dtpm_cpu:offline",
NULL, cpuhp_dtpm_cpu_offline);
if (ret < 0)
return ret;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dtpm_cpu:online",
cpuhp_dtpm_cpu_online, NULL);
if (ret < 0)
return ret;
return 0;
}
static void dtpm_cpu_exit(void)
{
cpuhp_remove_state_nocalls(CPUHP_AP_ONLINE_DYN);
cpuhp_remove_state_nocalls(CPUHP_AP_DTPM_CPU_DEAD);
}
struct dtpm_subsys_ops dtpm_cpu_ops = {
.name = KBUILD_MODNAME,
.init = dtpm_cpu_init,
.exit = dtpm_cpu_exit,
.setup = dtpm_cpu_setup,
};
| linux-master | drivers/powercap/dtpm_cpu.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel Running Average Power Limit (RAPL) Driver via MSR interface
* Copyright (c) 2019, Intel Corporation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/log2.h>
#include <linux/bitmap.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
#include <linux/cpu.h>
#include <linux/powercap.h>
#include <linux/suspend.h>
#include <linux/intel_rapl.h>
#include <linux/processor.h>
#include <linux/platform_device.h>
#include <asm/cpu_device_id.h>
#include <asm/intel-family.h>
/* Local defines */
#define MSR_PLATFORM_POWER_LIMIT 0x0000065C
#define MSR_VR_CURRENT_CONFIG 0x00000601
/* private data for RAPL MSR Interface */
static struct rapl_if_priv *rapl_msr_priv;
static struct rapl_if_priv rapl_msr_priv_intel = {
.type = RAPL_IF_MSR,
.reg_unit.msr = MSR_RAPL_POWER_UNIT,
.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PKG_POWER_LIMIT,
.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr = MSR_PKG_ENERGY_STATUS,
.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PERF].msr = MSR_PKG_PERF_STATUS,
.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_INFO].msr = MSR_PKG_POWER_INFO,
.regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PP0_POWER_LIMIT,
.regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr = MSR_PP0_ENERGY_STATUS,
.regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_POLICY].msr = MSR_PP0_POLICY,
.regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PP1_POWER_LIMIT,
.regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_STATUS].msr = MSR_PP1_ENERGY_STATUS,
.regs[RAPL_DOMAIN_PP1][RAPL_DOMAIN_REG_POLICY].msr = MSR_PP1_POLICY,
.regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_LIMIT].msr = MSR_DRAM_POWER_LIMIT,
.regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_STATUS].msr = MSR_DRAM_ENERGY_STATUS,
.regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_PERF].msr = MSR_DRAM_PERF_STATUS,
.regs[RAPL_DOMAIN_DRAM][RAPL_DOMAIN_REG_INFO].msr = MSR_DRAM_POWER_INFO,
.regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_LIMIT].msr = MSR_PLATFORM_POWER_LIMIT,
.regs[RAPL_DOMAIN_PLATFORM][RAPL_DOMAIN_REG_STATUS].msr = MSR_PLATFORM_ENERGY_STATUS,
.limits[RAPL_DOMAIN_PACKAGE] = BIT(POWER_LIMIT2),
.limits[RAPL_DOMAIN_PLATFORM] = BIT(POWER_LIMIT2),
};
static struct rapl_if_priv rapl_msr_priv_amd = {
.type = RAPL_IF_MSR,
.reg_unit.msr = MSR_AMD_RAPL_POWER_UNIT,
.regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_STATUS].msr = MSR_AMD_PKG_ENERGY_STATUS,
.regs[RAPL_DOMAIN_PP0][RAPL_DOMAIN_REG_STATUS].msr = MSR_AMD_CORE_ENERGY_STATUS,
};
/* Handles CPU hotplug on multi-socket systems.
* If a CPU goes online as the first CPU of the physical package
* we add the RAPL package to the system. Similarly, when the last
* CPU of the package is removed, we remove the RAPL package and its
* associated domains. Cooling devices are handled accordingly at
* per-domain level.
*/
static int rapl_cpu_online(unsigned int cpu)
{
struct rapl_package *rp;
rp = rapl_find_package_domain(cpu, rapl_msr_priv, true);
if (!rp) {
rp = rapl_add_package(cpu, rapl_msr_priv, true);
if (IS_ERR(rp))
return PTR_ERR(rp);
}
cpumask_set_cpu(cpu, &rp->cpumask);
return 0;
}
static int rapl_cpu_down_prep(unsigned int cpu)
{
struct rapl_package *rp;
int lead_cpu;
rp = rapl_find_package_domain(cpu, rapl_msr_priv, true);
if (!rp)
return 0;
cpumask_clear_cpu(cpu, &rp->cpumask);
lead_cpu = cpumask_first(&rp->cpumask);
if (lead_cpu >= nr_cpu_ids)
rapl_remove_package(rp);
else if (rp->lead_cpu == cpu)
rp->lead_cpu = lead_cpu;
return 0;
}
static int rapl_msr_read_raw(int cpu, struct reg_action *ra)
{
if (rdmsrl_safe_on_cpu(cpu, ra->reg.msr, &ra->value)) {
pr_debug("failed to read msr 0x%x on cpu %d\n", ra->reg.msr, cpu);
return -EIO;
}
ra->value &= ra->mask;
return 0;
}
static void rapl_msr_update_func(void *info)
{
struct reg_action *ra = info;
u64 val;
ra->err = rdmsrl_safe(ra->reg.msr, &val);
if (ra->err)
return;
val &= ~ra->mask;
val |= ra->value;
ra->err = wrmsrl_safe(ra->reg.msr, val);
}
static int rapl_msr_write_raw(int cpu, struct reg_action *ra)
{
int ret;
ret = smp_call_function_single(cpu, rapl_msr_update_func, ra, 1);
if (WARN_ON_ONCE(ret))
return ret;
return ra->err;
}
/* List of verified CPUs. */
static const struct x86_cpu_id pl4_support_ids[] = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, NULL),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL),
{}
};
static int rapl_msr_probe(struct platform_device *pdev)
{
const struct x86_cpu_id *id = x86_match_cpu(pl4_support_ids);
int ret;
switch (boot_cpu_data.x86_vendor) {
case X86_VENDOR_INTEL:
rapl_msr_priv = &rapl_msr_priv_intel;
break;
case X86_VENDOR_HYGON:
case X86_VENDOR_AMD:
rapl_msr_priv = &rapl_msr_priv_amd;
break;
default:
pr_err("intel-rapl does not support CPU vendor %d\n", boot_cpu_data.x86_vendor);
return -ENODEV;
}
rapl_msr_priv->read_raw = rapl_msr_read_raw;
rapl_msr_priv->write_raw = rapl_msr_write_raw;
if (id) {
rapl_msr_priv->limits[RAPL_DOMAIN_PACKAGE] |= BIT(POWER_LIMIT4);
rapl_msr_priv->regs[RAPL_DOMAIN_PACKAGE][RAPL_DOMAIN_REG_PL4].msr =
MSR_VR_CURRENT_CONFIG;
pr_info("PL4 support detected.\n");
}
rapl_msr_priv->control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
if (IS_ERR(rapl_msr_priv->control_type)) {
pr_debug("failed to register powercap control_type.\n");
return PTR_ERR(rapl_msr_priv->control_type);
}
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powercap/rapl:online",
rapl_cpu_online, rapl_cpu_down_prep);
if (ret < 0)
goto out;
rapl_msr_priv->pcap_rapl_online = ret;
return 0;
out:
if (ret)
powercap_unregister_control_type(rapl_msr_priv->control_type);
return ret;
}
static int rapl_msr_remove(struct platform_device *pdev)
{
cpuhp_remove_state(rapl_msr_priv->pcap_rapl_online);
powercap_unregister_control_type(rapl_msr_priv->control_type);
return 0;
}
static const struct platform_device_id rapl_msr_ids[] = {
{ .name = "intel_rapl_msr", },
{}
};
MODULE_DEVICE_TABLE(platform, rapl_msr_ids);
static struct platform_driver intel_rapl_msr_driver = {
.probe = rapl_msr_probe,
.remove = rapl_msr_remove,
.id_table = rapl_msr_ids,
.driver = {
.name = "intel_rapl_msr",
},
};
module_platform_driver(intel_rapl_msr_driver);
MODULE_DESCRIPTION("Driver for Intel RAPL (Running Average Power Limit) control via MSR interface");
MODULE_AUTHOR("Zhang Rui <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/powercap/intel_rapl_msr.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright 2018 Linaro Limited
*
* Author: Daniel Lezcano <[email protected]>
*
* The idle injection framework provides a way to force CPUs to enter idle
* states for a specified fraction of time over a specified period.
*
* It relies on the smpboot kthreads feature providing common code for CPU
* hotplug and thread [un]parking.
*
* All of the kthreads used for idle injection are created at init time.
*
* Next, the users of the idle injection framework provide a cpumask via
* its register function. The kthreads will be synchronized with respect to
* this cpumask.
*
* The idle + run duration is specified via separate helpers and that allows
* idle injection to be started.
*
* The idle injection kthreads will call play_idle_precise() with the idle
* duration and max allowed latency specified as per the above.
*
* After all of them have been woken up, a timer is set to start the next idle
* injection cycle.
*
* The timer interrupt handler will wake up the idle injection kthreads for
* all of the CPUs in the cpumask provided by the user.
*
* Idle injection is stopped synchronously and no leftover idle injection
* kthread activity after its completion is guaranteed.
*
* It is up to the user of this framework to provide a lock for higher-level
* synchronization to prevent race conditions like starting idle injection
* while unregistering from the framework.
*/
#define pr_fmt(fmt) "ii_dev: " fmt
#include <linux/cpu.h>
#include <linux/hrtimer.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/smpboot.h>
#include <linux/idle_inject.h>
#include <uapi/linux/sched/types.h>
/**
* struct idle_inject_thread - task on/off switch structure
* @tsk: task injecting the idle cycles
* @should_run: whether or not to run the task (for the smpboot kthread API)
*/
struct idle_inject_thread {
struct task_struct *tsk;
int should_run;
};
/**
* struct idle_inject_device - idle injection data
* @timer: idle injection period timer
* @idle_duration_us: duration of CPU idle time to inject
* @run_duration_us: duration of CPU run time to allow
* @latency_us: max allowed latency
* @update: Optional callback deciding whether or not to skip idle
* injection in the given cycle.
* @cpumask: mask of CPUs affected by idle injection
*
* This structure is used to define per instance idle inject device data. Each
* instance has an idle duration, a run duration and mask of CPUs to inject
* idle.
*
* Actual CPU idle time is injected by calling kernel scheduler interface
* play_idle_precise(). There is one optional callback that can be registered
* by calling idle_inject_register_full():
*
* update() - This callback is invoked just before waking up CPUs to inject
* idle. If it returns false, CPUs are not woken up to inject idle in the given
* cycle. It also allows the caller to readjust the idle and run duration by
* calling idle_inject_set_duration() for the next cycle.
*/
struct idle_inject_device {
struct hrtimer timer;
unsigned int idle_duration_us;
unsigned int run_duration_us;
unsigned int latency_us;
bool (*update)(void);
unsigned long cpumask[];
};
static DEFINE_PER_CPU(struct idle_inject_thread, idle_inject_thread);
static DEFINE_PER_CPU(struct idle_inject_device *, idle_inject_device);
/**
* idle_inject_wakeup - Wake up idle injection threads
* @ii_dev: target idle injection device
*
* Every idle injection task associated with the given idle injection device
* and running on an online CPU will be woken up.
*/
static void idle_inject_wakeup(struct idle_inject_device *ii_dev)
{
struct idle_inject_thread *iit;
unsigned int cpu;
for_each_cpu_and(cpu, to_cpumask(ii_dev->cpumask), cpu_online_mask) {
iit = per_cpu_ptr(&idle_inject_thread, cpu);
iit->should_run = 1;
wake_up_process(iit->tsk);
}
}
/**
* idle_inject_timer_fn - idle injection timer function
* @timer: idle injection hrtimer
*
* This function is called when the idle injection timer expires. It wakes up
* idle injection tasks associated with the timer and they, in turn, invoke
* play_idle_precise() to inject a specified amount of CPU idle time.
*
* Return: HRTIMER_RESTART.
*/
static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
{
unsigned int duration_us;
struct idle_inject_device *ii_dev =
container_of(timer, struct idle_inject_device, timer);
if (!ii_dev->update || (ii_dev->update && ii_dev->update()))
idle_inject_wakeup(ii_dev);
duration_us = READ_ONCE(ii_dev->run_duration_us);
duration_us += READ_ONCE(ii_dev->idle_duration_us);
hrtimer_forward_now(timer, ns_to_ktime(duration_us * NSEC_PER_USEC));
return HRTIMER_RESTART;
}
/**
* idle_inject_fn - idle injection work function
* @cpu: the CPU owning the task
*
* This function calls play_idle_precise() to inject a specified amount of CPU
* idle time.
*/
static void idle_inject_fn(unsigned int cpu)
{
struct idle_inject_device *ii_dev;
struct idle_inject_thread *iit;
ii_dev = per_cpu(idle_inject_device, cpu);
iit = per_cpu_ptr(&idle_inject_thread, cpu);
/*
* Let the smpboot main loop know that the task should not run again.
*/
iit->should_run = 0;
play_idle_precise(READ_ONCE(ii_dev->idle_duration_us) * NSEC_PER_USEC,
READ_ONCE(ii_dev->latency_us) * NSEC_PER_USEC);
}
/**
* idle_inject_set_duration - idle and run duration update helper
* @ii_dev: idle injection control device structure
* @run_duration_us: CPU run time to allow in microseconds
* @idle_duration_us: CPU idle time to inject in microseconds
*/
void idle_inject_set_duration(struct idle_inject_device *ii_dev,
unsigned int run_duration_us,
unsigned int idle_duration_us)
{
if (run_duration_us + idle_duration_us) {
WRITE_ONCE(ii_dev->run_duration_us, run_duration_us);
WRITE_ONCE(ii_dev->idle_duration_us, idle_duration_us);
}
if (!run_duration_us)
pr_debug("CPU is forced to 100 percent idle\n");
}
EXPORT_SYMBOL_NS_GPL(idle_inject_set_duration, IDLE_INJECT);
/**
* idle_inject_get_duration - idle and run duration retrieval helper
* @ii_dev: idle injection control device structure
* @run_duration_us: memory location to store the current CPU run time
* @idle_duration_us: memory location to store the current CPU idle time
*/
void idle_inject_get_duration(struct idle_inject_device *ii_dev,
unsigned int *run_duration_us,
unsigned int *idle_duration_us)
{
*run_duration_us = READ_ONCE(ii_dev->run_duration_us);
*idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
}
EXPORT_SYMBOL_NS_GPL(idle_inject_get_duration, IDLE_INJECT);
/**
* idle_inject_set_latency - set the maximum latency allowed
* @ii_dev: idle injection control device structure
* @latency_us: set the latency requirement for the idle state
*/
void idle_inject_set_latency(struct idle_inject_device *ii_dev,
unsigned int latency_us)
{
WRITE_ONCE(ii_dev->latency_us, latency_us);
}
EXPORT_SYMBOL_NS_GPL(idle_inject_set_latency, IDLE_INJECT);
/**
* idle_inject_start - start idle injections
* @ii_dev: idle injection control device structure
*
* The function starts idle injection by first waking up all of the idle
* injection kthreads associated with @ii_dev to let them inject CPU idle time
* sets up a timer to start the next idle injection period.
*
* Return: -EINVAL if the CPU idle or CPU run time is not set or 0 on success.
*/
int idle_inject_start(struct idle_inject_device *ii_dev)
{
unsigned int idle_duration_us = READ_ONCE(ii_dev->idle_duration_us);
unsigned int run_duration_us = READ_ONCE(ii_dev->run_duration_us);
if (!(idle_duration_us + run_duration_us))
return -EINVAL;
pr_debug("Starting injecting idle cycles on CPUs '%*pbl'\n",
cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
idle_inject_wakeup(ii_dev);
hrtimer_start(&ii_dev->timer,
ns_to_ktime((idle_duration_us + run_duration_us) *
NSEC_PER_USEC),
HRTIMER_MODE_REL);
return 0;
}
EXPORT_SYMBOL_NS_GPL(idle_inject_start, IDLE_INJECT);
/**
* idle_inject_stop - stops idle injections
* @ii_dev: idle injection control device structure
*
* The function stops idle injection and waits for the threads to finish work.
* If CPU idle time is being injected when this function runs, then it will
* wait until the end of the cycle.
*
* When it returns, there is no more idle injection kthread activity. The
* kthreads are scheduled out and the periodic timer is off.
*/
void idle_inject_stop(struct idle_inject_device *ii_dev)
{
struct idle_inject_thread *iit;
unsigned int cpu;
pr_debug("Stopping idle injection on CPUs '%*pbl'\n",
cpumask_pr_args(to_cpumask(ii_dev->cpumask)));
hrtimer_cancel(&ii_dev->timer);
/*
* Stopping idle injection requires all of the idle injection kthreads
* associated with the given cpumask to be parked and stay that way, so
* prevent CPUs from going online at this point. Any CPUs going online
* after the loop below will be covered by clearing the should_run flag
* that will cause the smpboot main loop to schedule them out.
*/
cpu_hotplug_disable();
/*
* Iterate over all (online + offline) CPUs here in case one of them
* goes offline with the should_run flag set so as to prevent its idle
* injection kthread from running when the CPU goes online again after
* the ii_dev has been freed.
*/
for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
iit = per_cpu_ptr(&idle_inject_thread, cpu);
iit->should_run = 0;
wait_task_inactive(iit->tsk, TASK_ANY);
}
cpu_hotplug_enable();
}
EXPORT_SYMBOL_NS_GPL(idle_inject_stop, IDLE_INJECT);
/**
* idle_inject_setup - prepare the current task for idle injection
* @cpu: not used
*
* Called once, this function is in charge of setting the current task's
* scheduler parameters to make it an RT task.
*/
static void idle_inject_setup(unsigned int cpu)
{
sched_set_fifo(current);
}
/**
* idle_inject_should_run - function helper for the smpboot API
* @cpu: CPU the kthread is running on
*
* Return: whether or not the thread can run.
*/
static int idle_inject_should_run(unsigned int cpu)
{
struct idle_inject_thread *iit =
per_cpu_ptr(&idle_inject_thread, cpu);
return iit->should_run;
}
/**
* idle_inject_register_full - initialize idle injection on a set of CPUs
* @cpumask: CPUs to be affected by idle injection
* @update: This callback is called just before waking up CPUs to inject
* idle
*
* This function creates an idle injection control device structure for the
* given set of CPUs and initializes the timer associated with it. This
* function also allows to register update()callback.
* It does not start any injection cycles.
*
* Return: NULL if memory allocation fails, idle injection control device
* pointer on success.
*/
struct idle_inject_device *idle_inject_register_full(struct cpumask *cpumask,
bool (*update)(void))
{
struct idle_inject_device *ii_dev;
int cpu, cpu_rb;
ii_dev = kzalloc(sizeof(*ii_dev) + cpumask_size(), GFP_KERNEL);
if (!ii_dev)
return NULL;
cpumask_copy(to_cpumask(ii_dev->cpumask), cpumask);
hrtimer_init(&ii_dev->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ii_dev->timer.function = idle_inject_timer_fn;
ii_dev->latency_us = UINT_MAX;
ii_dev->update = update;
for_each_cpu(cpu, to_cpumask(ii_dev->cpumask)) {
if (per_cpu(idle_inject_device, cpu)) {
pr_err("cpu%d is already registered\n", cpu);
goto out_rollback;
}
per_cpu(idle_inject_device, cpu) = ii_dev;
}
return ii_dev;
out_rollback:
for_each_cpu(cpu_rb, to_cpumask(ii_dev->cpumask)) {
if (cpu == cpu_rb)
break;
per_cpu(idle_inject_device, cpu_rb) = NULL;
}
kfree(ii_dev);
return NULL;
}
EXPORT_SYMBOL_NS_GPL(idle_inject_register_full, IDLE_INJECT);
/**
* idle_inject_register - initialize idle injection on a set of CPUs
* @cpumask: CPUs to be affected by idle injection
*
* This function creates an idle injection control device structure for the
* given set of CPUs and initializes the timer associated with it. It does not
* start any injection cycles.
*
* Return: NULL if memory allocation fails, idle injection control device
* pointer on success.
*/
struct idle_inject_device *idle_inject_register(struct cpumask *cpumask)
{
return idle_inject_register_full(cpumask, NULL);
}
EXPORT_SYMBOL_NS_GPL(idle_inject_register, IDLE_INJECT);
/**
* idle_inject_unregister - unregister idle injection control device
* @ii_dev: idle injection control device to unregister
*
* The function stops idle injection for the given control device,
* unregisters its kthreads and frees memory allocated when that device was
* created.
*/
void idle_inject_unregister(struct idle_inject_device *ii_dev)
{
unsigned int cpu;
idle_inject_stop(ii_dev);
for_each_cpu(cpu, to_cpumask(ii_dev->cpumask))
per_cpu(idle_inject_device, cpu) = NULL;
kfree(ii_dev);
}
EXPORT_SYMBOL_NS_GPL(idle_inject_unregister, IDLE_INJECT);
static struct smp_hotplug_thread idle_inject_threads = {
.store = &idle_inject_thread.tsk,
.setup = idle_inject_setup,
.thread_fn = idle_inject_fn,
.thread_comm = "idle_inject/%u",
.thread_should_run = idle_inject_should_run,
};
static int __init idle_inject_init(void)
{
return smpboot_register_percpu_thread(&idle_inject_threads);
}
early_initcall(idle_inject_init);
| linux-master | drivers/powercap/idle_inject.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* intel_rapl_tpmi: Intel RAPL driver via TPMI interface
*
* Copyright (c) 2023, Intel Corporation.
* All Rights Reserved.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/auxiliary_bus.h>
#include <linux/io.h>
#include <linux/intel_tpmi.h>
#include <linux/intel_rapl.h>
#include <linux/module.h>
#include <linux/slab.h>
#define TPMI_RAPL_VERSION 1
/* 1 header + 10 registers + 5 reserved. 8 bytes for each. */
#define TPMI_RAPL_DOMAIN_SIZE 128
enum tpmi_rapl_domain_type {
TPMI_RAPL_DOMAIN_INVALID,
TPMI_RAPL_DOMAIN_SYSTEM,
TPMI_RAPL_DOMAIN_PACKAGE,
TPMI_RAPL_DOMAIN_RESERVED,
TPMI_RAPL_DOMAIN_MEMORY,
TPMI_RAPL_DOMAIN_MAX,
};
enum tpmi_rapl_register {
TPMI_RAPL_REG_HEADER,
TPMI_RAPL_REG_UNIT,
TPMI_RAPL_REG_PL1,
TPMI_RAPL_REG_PL2,
TPMI_RAPL_REG_PL3,
TPMI_RAPL_REG_PL4,
TPMI_RAPL_REG_RESERVED,
TPMI_RAPL_REG_ENERGY_STATUS,
TPMI_RAPL_REG_PERF_STATUS,
TPMI_RAPL_REG_POWER_INFO,
TPMI_RAPL_REG_INTERRUPT,
TPMI_RAPL_REG_MAX = 15,
};
struct tpmi_rapl_package {
struct rapl_if_priv priv;
struct intel_tpmi_plat_info *tpmi_info;
struct rapl_package *rp;
void __iomem *base;
struct list_head node;
};
static LIST_HEAD(tpmi_rapl_packages);
static DEFINE_MUTEX(tpmi_rapl_lock);
static struct powercap_control_type *tpmi_control_type;
static int tpmi_rapl_read_raw(int id, struct reg_action *ra)
{
if (!ra->reg.mmio)
return -EINVAL;
ra->value = readq(ra->reg.mmio);
ra->value &= ra->mask;
return 0;
}
static int tpmi_rapl_write_raw(int id, struct reg_action *ra)
{
u64 val;
if (!ra->reg.mmio)
return -EINVAL;
val = readq(ra->reg.mmio);
val &= ~ra->mask;
val |= ra->value;
writeq(val, ra->reg.mmio);
return 0;
}
static struct tpmi_rapl_package *trp_alloc(int pkg_id)
{
struct tpmi_rapl_package *trp;
int ret;
mutex_lock(&tpmi_rapl_lock);
if (list_empty(&tpmi_rapl_packages)) {
tpmi_control_type = powercap_register_control_type(NULL, "intel-rapl", NULL);
if (IS_ERR(tpmi_control_type)) {
ret = PTR_ERR(tpmi_control_type);
goto err_unlock;
}
}
trp = kzalloc(sizeof(*trp), GFP_KERNEL);
if (!trp) {
ret = -ENOMEM;
goto err_del_powercap;
}
list_add(&trp->node, &tpmi_rapl_packages);
mutex_unlock(&tpmi_rapl_lock);
return trp;
err_del_powercap:
if (list_empty(&tpmi_rapl_packages))
powercap_unregister_control_type(tpmi_control_type);
err_unlock:
mutex_unlock(&tpmi_rapl_lock);
return ERR_PTR(ret);
}
static void trp_release(struct tpmi_rapl_package *trp)
{
mutex_lock(&tpmi_rapl_lock);
list_del(&trp->node);
if (list_empty(&tpmi_rapl_packages))
powercap_unregister_control_type(tpmi_control_type);
kfree(trp);
mutex_unlock(&tpmi_rapl_lock);
}
static int parse_one_domain(struct tpmi_rapl_package *trp, u32 offset)
{
u8 tpmi_domain_version;
enum rapl_domain_type domain_type;
enum tpmi_rapl_domain_type tpmi_domain_type;
enum tpmi_rapl_register reg_index;
enum rapl_domain_reg_id reg_id;
int tpmi_domain_size, tpmi_domain_flags;
u64 tpmi_domain_header = readq(trp->base + offset);
/* Domain Parent bits are ignored for now */
tpmi_domain_version = tpmi_domain_header & 0xff;
tpmi_domain_type = tpmi_domain_header >> 8 & 0xff;
tpmi_domain_size = tpmi_domain_header >> 16 & 0xff;
tpmi_domain_flags = tpmi_domain_header >> 32 & 0xffff;
if (tpmi_domain_version != TPMI_RAPL_VERSION) {
pr_warn(FW_BUG "Unsupported version:%d\n", tpmi_domain_version);
return -ENODEV;
}
/* Domain size: in unit of 128 Bytes */
if (tpmi_domain_size != 1) {
pr_warn(FW_BUG "Invalid Domain size %d\n", tpmi_domain_size);
return -EINVAL;
}
/* Unit register and Energy Status register are mandatory for each domain */
if (!(tpmi_domain_flags & BIT(TPMI_RAPL_REG_UNIT)) ||
!(tpmi_domain_flags & BIT(TPMI_RAPL_REG_ENERGY_STATUS))) {
pr_warn(FW_BUG "Invalid Domain flag 0x%x\n", tpmi_domain_flags);
return -EINVAL;
}
switch (tpmi_domain_type) {
case TPMI_RAPL_DOMAIN_PACKAGE:
domain_type = RAPL_DOMAIN_PACKAGE;
break;
case TPMI_RAPL_DOMAIN_SYSTEM:
domain_type = RAPL_DOMAIN_PLATFORM;
break;
case TPMI_RAPL_DOMAIN_MEMORY:
domain_type = RAPL_DOMAIN_DRAM;
break;
default:
pr_warn(FW_BUG "Unsupported Domain type %d\n", tpmi_domain_type);
return -EINVAL;
}
if (trp->priv.regs[domain_type][RAPL_DOMAIN_REG_UNIT].mmio) {
pr_warn(FW_BUG "Duplicate Domain type %d\n", tpmi_domain_type);
return -EINVAL;
}
reg_index = TPMI_RAPL_REG_HEADER;
while (++reg_index != TPMI_RAPL_REG_MAX) {
if (!(tpmi_domain_flags & BIT(reg_index)))
continue;
switch (reg_index) {
case TPMI_RAPL_REG_UNIT:
reg_id = RAPL_DOMAIN_REG_UNIT;
break;
case TPMI_RAPL_REG_PL1:
reg_id = RAPL_DOMAIN_REG_LIMIT;
trp->priv.limits[domain_type] |= BIT(POWER_LIMIT1);
break;
case TPMI_RAPL_REG_PL2:
reg_id = RAPL_DOMAIN_REG_PL2;
trp->priv.limits[domain_type] |= BIT(POWER_LIMIT2);
break;
case TPMI_RAPL_REG_PL4:
reg_id = RAPL_DOMAIN_REG_PL4;
trp->priv.limits[domain_type] |= BIT(POWER_LIMIT4);
break;
case TPMI_RAPL_REG_ENERGY_STATUS:
reg_id = RAPL_DOMAIN_REG_STATUS;
break;
case TPMI_RAPL_REG_PERF_STATUS:
reg_id = RAPL_DOMAIN_REG_PERF;
break;
case TPMI_RAPL_REG_POWER_INFO:
reg_id = RAPL_DOMAIN_REG_INFO;
break;
default:
continue;
}
trp->priv.regs[domain_type][reg_id].mmio = trp->base + offset + reg_index * 8;
}
return 0;
}
static int intel_rapl_tpmi_probe(struct auxiliary_device *auxdev,
const struct auxiliary_device_id *id)
{
struct tpmi_rapl_package *trp;
struct intel_tpmi_plat_info *info;
struct resource *res;
u32 offset;
int ret;
info = tpmi_get_platform_data(auxdev);
if (!info)
return -ENODEV;
trp = trp_alloc(info->package_id);
if (IS_ERR(trp))
return PTR_ERR(trp);
if (tpmi_get_resource_count(auxdev) > 1) {
dev_err(&auxdev->dev, "does not support multiple resources\n");
ret = -EINVAL;
goto err;
}
res = tpmi_get_resource_at_index(auxdev, 0);
if (!res) {
dev_err(&auxdev->dev, "can't fetch device resource info\n");
ret = -EIO;
goto err;
}
trp->base = devm_ioremap_resource(&auxdev->dev, res);
if (IS_ERR(trp->base)) {
ret = PTR_ERR(trp->base);
goto err;
}
for (offset = 0; offset < resource_size(res); offset += TPMI_RAPL_DOMAIN_SIZE) {
ret = parse_one_domain(trp, offset);
if (ret)
goto err;
}
trp->tpmi_info = info;
trp->priv.type = RAPL_IF_TPMI;
trp->priv.read_raw = tpmi_rapl_read_raw;
trp->priv.write_raw = tpmi_rapl_write_raw;
trp->priv.control_type = tpmi_control_type;
/* RAPL TPMI I/F is per physical package */
trp->rp = rapl_find_package_domain(info->package_id, &trp->priv, false);
if (trp->rp) {
dev_err(&auxdev->dev, "Domain for Package%d already exists\n", info->package_id);
ret = -EEXIST;
goto err;
}
trp->rp = rapl_add_package(info->package_id, &trp->priv, false);
if (IS_ERR(trp->rp)) {
dev_err(&auxdev->dev, "Failed to add RAPL Domain for Package%d, %ld\n",
info->package_id, PTR_ERR(trp->rp));
ret = PTR_ERR(trp->rp);
goto err;
}
auxiliary_set_drvdata(auxdev, trp);
return 0;
err:
trp_release(trp);
return ret;
}
static void intel_rapl_tpmi_remove(struct auxiliary_device *auxdev)
{
struct tpmi_rapl_package *trp = auxiliary_get_drvdata(auxdev);
rapl_remove_package(trp->rp);
trp_release(trp);
}
static const struct auxiliary_device_id intel_rapl_tpmi_ids[] = {
{.name = "intel_vsec.tpmi-rapl" },
{ }
};
MODULE_DEVICE_TABLE(auxiliary, intel_rapl_tpmi_ids);
static struct auxiliary_driver intel_rapl_tpmi_driver = {
.probe = intel_rapl_tpmi_probe,
.remove = intel_rapl_tpmi_remove,
.id_table = intel_rapl_tpmi_ids,
};
module_auxiliary_driver(intel_rapl_tpmi_driver)
MODULE_IMPORT_NS(INTEL_TPMI);
MODULE_DESCRIPTION("Intel RAPL TPMI Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/powercap/intel_rapl_tpmi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 - Google LLC
* Author: David Brazdil <[email protected]>
*
* Driver for Open Profile for DICE.
*
* This driver takes ownership of a reserved memory region containing data
* generated by the Open Profile for DICE measured boot protocol. The memory
* contents are not interpreted by the kernel but can be mapped into a userspace
* process via a misc device. Userspace can also request a wipe of the memory.
*
* Userspace can access the data with (w/o error handling):
*
* fd = open("/dev/open-dice0", O_RDWR);
* read(fd, &size, sizeof(unsigned long));
* data = mmap(NULL, size, PROT_READ, MAP_PRIVATE, fd, 0);
* write(fd, NULL, 0); // wipe
* close(fd);
*/
#include <linux/io.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#define DRIVER_NAME "open-dice"
struct open_dice_drvdata {
struct mutex lock;
char name[16];
struct reserved_mem *rmem;
struct miscdevice misc;
};
static inline struct open_dice_drvdata *to_open_dice_drvdata(struct file *filp)
{
return container_of(filp->private_data, struct open_dice_drvdata, misc);
}
static int open_dice_wipe(struct open_dice_drvdata *drvdata)
{
void *kaddr;
mutex_lock(&drvdata->lock);
kaddr = devm_memremap(drvdata->misc.this_device, drvdata->rmem->base,
drvdata->rmem->size, MEMREMAP_WC);
if (IS_ERR(kaddr)) {
mutex_unlock(&drvdata->lock);
return PTR_ERR(kaddr);
}
memset(kaddr, 0, drvdata->rmem->size);
devm_memunmap(drvdata->misc.this_device, kaddr);
mutex_unlock(&drvdata->lock);
return 0;
}
/*
* Copies the size of the reserved memory region to the user-provided buffer.
*/
static ssize_t open_dice_read(struct file *filp, char __user *ptr, size_t len,
loff_t *off)
{
unsigned long val = to_open_dice_drvdata(filp)->rmem->size;
return simple_read_from_buffer(ptr, len, off, &val, sizeof(val));
}
/*
* Triggers a wipe of the reserved memory region. The user-provided pointer
* is never dereferenced.
*/
static ssize_t open_dice_write(struct file *filp, const char __user *ptr,
size_t len, loff_t *off)
{
if (open_dice_wipe(to_open_dice_drvdata(filp)))
return -EIO;
/* Consume the input buffer. */
return len;
}
/*
* Creates a mapping of the reserved memory region in user address space.
*/
static int open_dice_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct open_dice_drvdata *drvdata = to_open_dice_drvdata(filp);
if (vma->vm_flags & VM_MAYSHARE) {
/* Do not allow userspace to modify the underlying data. */
if (vma->vm_flags & VM_WRITE)
return -EPERM;
/* Ensure userspace cannot acquire VM_WRITE later. */
vm_flags_clear(vma, VM_MAYWRITE);
}
/* Create write-combine mapping so all clients observe a wipe. */
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vm_flags_set(vma, VM_DONTCOPY | VM_DONTDUMP);
return vm_iomap_memory(vma, drvdata->rmem->base, drvdata->rmem->size);
}
static const struct file_operations open_dice_fops = {
.owner = THIS_MODULE,
.read = open_dice_read,
.write = open_dice_write,
.mmap = open_dice_mmap,
};
static int __init open_dice_probe(struct platform_device *pdev)
{
static unsigned int dev_idx;
struct device *dev = &pdev->dev;
struct reserved_mem *rmem;
struct open_dice_drvdata *drvdata;
int ret;
rmem = of_reserved_mem_lookup(dev->of_node);
if (!rmem) {
dev_err(dev, "failed to lookup reserved memory\n");
return -EINVAL;
}
if (!rmem->size || (rmem->size > ULONG_MAX)) {
dev_err(dev, "invalid memory region size\n");
return -EINVAL;
}
if (!PAGE_ALIGNED(rmem->base) || !PAGE_ALIGNED(rmem->size)) {
dev_err(dev, "memory region must be page-aligned\n");
return -EINVAL;
}
drvdata = devm_kmalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
*drvdata = (struct open_dice_drvdata){
.lock = __MUTEX_INITIALIZER(drvdata->lock),
.rmem = rmem,
.misc = (struct miscdevice){
.parent = dev,
.name = drvdata->name,
.minor = MISC_DYNAMIC_MINOR,
.fops = &open_dice_fops,
.mode = 0600,
},
};
/* Index overflow check not needed, misc_register() will fail. */
snprintf(drvdata->name, sizeof(drvdata->name), DRIVER_NAME"%u", dev_idx++);
ret = misc_register(&drvdata->misc);
if (ret) {
dev_err(dev, "failed to register misc device '%s': %d\n",
drvdata->name, ret);
return ret;
}
platform_set_drvdata(pdev, drvdata);
return 0;
}
static int open_dice_remove(struct platform_device *pdev)
{
struct open_dice_drvdata *drvdata = platform_get_drvdata(pdev);
misc_deregister(&drvdata->misc);
return 0;
}
static const struct of_device_id open_dice_of_match[] = {
{ .compatible = "google,open-dice" },
{},
};
static struct platform_driver open_dice_driver = {
.remove = open_dice_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = open_dice_of_match,
},
};
static int __init open_dice_init(void)
{
int ret = platform_driver_probe(&open_dice_driver, open_dice_probe);
/* DICE regions are optional. Succeed even with zero instances. */
return (ret == -ENODEV) ? 0 : ret;
}
static void __exit open_dice_exit(void)
{
platform_driver_unregister(&open_dice_driver);
}
module_init(open_dice_init);
module_exit(open_dice_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("David Brazdil <[email protected]>");
| linux-master | drivers/misc/open-dice.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Xilinx SDFEC
*
* Copyright (C) 2019 Xilinx, Inc.
*
* Description:
* This driver is developed for SDFEC16 (Soft Decision FEC 16nm)
* IP. It exposes a char device which supports file operations
* like open(), close() and ioctl().
*/
#include <linux/miscdevice.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/compat.h>
#include <linux/highmem.h>
#include <uapi/misc/xilinx_sdfec.h>
#define DEV_NAME_LEN 12
static DEFINE_IDA(dev_nrs);
/* Xilinx SDFEC Register Map */
/* CODE_WRI_PROTECT Register */
#define XSDFEC_CODE_WR_PROTECT_ADDR (0x4)
/* ACTIVE Register */
#define XSDFEC_ACTIVE_ADDR (0x8)
#define XSDFEC_IS_ACTIVITY_SET (0x1)
/* AXIS_WIDTH Register */
#define XSDFEC_AXIS_WIDTH_ADDR (0xC)
#define XSDFEC_AXIS_DOUT_WORDS_LSB (5)
#define XSDFEC_AXIS_DOUT_WIDTH_LSB (3)
#define XSDFEC_AXIS_DIN_WORDS_LSB (2)
#define XSDFEC_AXIS_DIN_WIDTH_LSB (0)
/* AXIS_ENABLE Register */
#define XSDFEC_AXIS_ENABLE_ADDR (0x10)
#define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38)
#define XSDFEC_AXIS_IN_ENABLE_MASK (0x7)
#define XSDFEC_AXIS_ENABLE_MASK \
(XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK)
/* FEC_CODE Register */
#define XSDFEC_FEC_CODE_ADDR (0x14)
/* ORDER Register Map */
#define XSDFEC_ORDER_ADDR (0x18)
/* Interrupt Status Register */
#define XSDFEC_ISR_ADDR (0x1C)
/* Interrupt Status Register Bit Mask */
#define XSDFEC_ISR_MASK (0x3F)
/* Write Only - Interrupt Enable Register */
#define XSDFEC_IER_ADDR (0x20)
/* Write Only - Interrupt Disable Register */
#define XSDFEC_IDR_ADDR (0x24)
/* Read Only - Interrupt Mask Register */
#define XSDFEC_IMR_ADDR (0x28)
/* ECC Interrupt Status Register */
#define XSDFEC_ECC_ISR_ADDR (0x2C)
/* Single Bit Errors */
#define XSDFEC_ECC_ISR_SBE_MASK (0x7FF)
/* PL Initialize Single Bit Errors */
#define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000)
/* Multi Bit Errors */
#define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800)
/* PL Initialize Multi Bit Errors */
#define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000)
/* Multi Bit Error to Event Shift */
#define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11)
/* PL Initialize Multi Bit Error to Event Shift */
#define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4)
/* ECC Interrupt Status Bit Mask */
#define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK)
/* ECC Interrupt Status PL Initialize Bit Mask */
#define XSDFEC_PL_INIT_ECC_ISR_MASK \
(XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
/* ECC Interrupt Status All Bit Mask */
#define XSDFEC_ALL_ECC_ISR_MASK \
(XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK)
/* ECC Interrupt Status Single Bit Errors Mask */
#define XSDFEC_ALL_ECC_ISR_SBE_MASK \
(XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK)
/* ECC Interrupt Status Multi Bit Errors Mask */
#define XSDFEC_ALL_ECC_ISR_MBE_MASK \
(XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
/* Write Only - ECC Interrupt Enable Register */
#define XSDFEC_ECC_IER_ADDR (0x30)
/* Write Only - ECC Interrupt Disable Register */
#define XSDFEC_ECC_IDR_ADDR (0x34)
/* Read Only - ECC Interrupt Mask Register */
#define XSDFEC_ECC_IMR_ADDR (0x38)
/* BYPASS Register */
#define XSDFEC_BYPASS_ADDR (0x3C)
/* Turbo Code Register */
#define XSDFEC_TURBO_ADDR (0x100)
#define XSDFEC_TURBO_SCALE_MASK (0xFFF)
#define XSDFEC_TURBO_SCALE_BIT_POS (8)
#define XSDFEC_TURBO_SCALE_MAX (15)
/* REG0 Register */
#define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000)
#define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0)
#define XSDFEC_REG0_N_MIN (4)
#define XSDFEC_REG0_N_MAX (32768)
#define XSDFEC_REG0_N_MUL_P (256)
#define XSDFEC_REG0_N_LSB (0)
#define XSDFEC_REG0_K_MIN (2)
#define XSDFEC_REG0_K_MAX (32766)
#define XSDFEC_REG0_K_MUL_P (256)
#define XSDFEC_REG0_K_LSB (16)
/* REG1 Register */
#define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004)
#define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4)
#define XSDFEC_REG1_PSIZE_MIN (2)
#define XSDFEC_REG1_PSIZE_MAX (512)
#define XSDFEC_REG1_NO_PACKING_MASK (0x400)
#define XSDFEC_REG1_NO_PACKING_LSB (10)
#define XSDFEC_REG1_NM_MASK (0xFF800)
#define XSDFEC_REG1_NM_LSB (11)
#define XSDFEC_REG1_BYPASS_MASK (0x100000)
/* REG2 Register */
#define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008)
#define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8)
#define XSDFEC_REG2_NLAYERS_MIN (1)
#define XSDFEC_REG2_NLAYERS_MAX (256)
#define XSDFEC_REG2_NNMQC_MASK (0xFFE00)
#define XSDFEC_REG2_NMQC_LSB (9)
#define XSDFEC_REG2_NORM_TYPE_MASK (0x100000)
#define XSDFEC_REG2_NORM_TYPE_LSB (20)
#define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000)
#define XSDFEC_REG2_SPEICAL_QC_LSB (21)
#define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000)
#define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22)
#define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000)
#define XSDFEC_REG2_MAX_SCHEDULE_LSB (23)
/* REG3 Register */
#define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C)
#define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC)
#define XSDFEC_REG3_LA_OFF_LSB (8)
#define XSDFEC_REG3_QC_OFF_LSB (16)
#define XSDFEC_LDPC_REG_JUMP (0x10)
#define XSDFEC_REG_WIDTH_JUMP (4)
/* The maximum number of pinned pages */
#define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1)
/**
* struct xsdfec_clks - For managing SD-FEC clocks
* @core_clk: Main processing clock for core
* @axi_clk: AXI4-Lite memory-mapped clock
* @din_words_clk: DIN Words AXI4-Stream Slave clock
* @din_clk: DIN AXI4-Stream Slave clock
* @dout_clk: DOUT Words AXI4-Stream Slave clock
* @dout_words_clk: DOUT AXI4-Stream Slave clock
* @ctrl_clk: Control AXI4-Stream Slave clock
* @status_clk: Status AXI4-Stream Slave clock
*/
struct xsdfec_clks {
struct clk *core_clk;
struct clk *axi_clk;
struct clk *din_words_clk;
struct clk *din_clk;
struct clk *dout_clk;
struct clk *dout_words_clk;
struct clk *ctrl_clk;
struct clk *status_clk;
};
/**
* struct xsdfec_dev - Driver data for SDFEC
* @miscdev: Misc device handle
* @clks: Clocks managed by the SDFEC driver
* @waitq: Driver wait queue
* @config: Configuration of the SDFEC device
* @dev_name: Device name
* @flags: spinlock flags
* @regs: device physical base address
* @dev: pointer to device struct
* @state: State of the SDFEC device
* @error_data_lock: Error counter and states spinlock
* @dev_id: Device ID
* @isr_err_count: Count of ISR errors
* @cecc_count: Count of Correctable ECC errors (SBE)
* @uecc_count: Count of Uncorrectable ECC errors (MBE)
* @irq: IRQ number
* @state_updated: indicates State updated by interrupt handler
* @stats_updated: indicates Stats updated by interrupt handler
* @intr_enabled: indicates IRQ enabled
*
* This structure contains necessary state for SDFEC driver to operate
*/
struct xsdfec_dev {
struct miscdevice miscdev;
struct xsdfec_clks clks;
wait_queue_head_t waitq;
struct xsdfec_config config;
char dev_name[DEV_NAME_LEN];
unsigned long flags;
void __iomem *regs;
struct device *dev;
enum xsdfec_state state;
/* Spinlock to protect state_updated and stats_updated */
spinlock_t error_data_lock;
int dev_id;
u32 isr_err_count;
u32 cecc_count;
u32 uecc_count;
int irq;
bool state_updated;
bool stats_updated;
bool intr_enabled;
};
static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr,
u32 value)
{
dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr);
iowrite32(value, xsdfec->regs + addr);
}
static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr)
{
u32 rval;
rval = ioread32(xsdfec->regs + addr);
dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr);
return rval;
}
static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec,
u32 reg_offset, u32 bit_num,
char *config_value)
{
u32 reg_val;
u32 bit_mask = 1 << bit_num;
reg_val = xsdfec_regread(xsdfec, reg_offset);
*config_value = (reg_val & bit_mask) > 0;
}
static void update_config_from_hw(struct xsdfec_dev *xsdfec)
{
u32 reg_value;
bool sdfec_started;
/* Update the Order */
reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR);
xsdfec->config.order = reg_value;
update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR,
0, /* Bit Number, maybe change to mask */
&xsdfec->config.bypass);
update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR,
0, /* Bit Number */
&xsdfec->config.code_wr_protect);
reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0;
reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
xsdfec->config.irq.enable_ecc_isr =
(reg_value & XSDFEC_ECC_ISR_MASK) > 0;
reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0;
if (sdfec_started)
xsdfec->state = XSDFEC_STARTED;
else
xsdfec->state = XSDFEC_STOPPED;
}
static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg)
{
struct xsdfec_status status;
int err;
memset(&status, 0, sizeof(status));
spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
status.state = xsdfec->state;
xsdfec->state_updated = false;
spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) &
XSDFEC_IS_ACTIVITY_SET);
err = copy_to_user(arg, &status, sizeof(status));
if (err)
err = -EFAULT;
return err;
}
static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg)
{
int err;
err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config));
if (err)
err = -EFAULT;
return err;
}
static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
{
u32 mask_read;
if (enable) {
/* Enable */
xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK);
mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
if (mask_read & XSDFEC_ISR_MASK) {
dev_dbg(xsdfec->dev,
"SDFEC enabling irq with IER failed");
return -EIO;
}
} else {
/* Disable */
xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK);
mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR);
if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) {
dev_dbg(xsdfec->dev,
"SDFEC disabling irq with IDR failed");
return -EIO;
}
}
return 0;
}
static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable)
{
u32 mask_read;
if (enable) {
/* Enable */
xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR,
XSDFEC_ALL_ECC_ISR_MASK);
mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) {
dev_dbg(xsdfec->dev,
"SDFEC enabling ECC irq with ECC IER failed");
return -EIO;
}
} else {
/* Disable */
xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR,
XSDFEC_ALL_ECC_ISR_MASK);
mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR);
if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
XSDFEC_ECC_ISR_MASK) ||
((mask_read & XSDFEC_ALL_ECC_ISR_MASK) ==
XSDFEC_PL_INIT_ECC_ISR_MASK))) {
dev_dbg(xsdfec->dev,
"SDFEC disable ECC irq with ECC IDR failed");
return -EIO;
}
}
return 0;
}
static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg)
{
struct xsdfec_irq irq;
int err;
int isr_err;
int ecc_err;
err = copy_from_user(&irq, arg, sizeof(irq));
if (err)
return -EFAULT;
/* Setup tlast related IRQ */
isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr);
if (!isr_err)
xsdfec->config.irq.enable_isr = irq.enable_isr;
/* Setup ECC related IRQ */
ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr);
if (!ecc_err)
xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr;
if (isr_err < 0 || ecc_err < 0)
err = -EIO;
return err;
}
static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
{
struct xsdfec_turbo turbo;
int err;
u32 turbo_write;
err = copy_from_user(&turbo, arg, sizeof(turbo));
if (err)
return -EFAULT;
if (turbo.alg >= XSDFEC_TURBO_ALG_MAX)
return -EINVAL;
if (turbo.scale > XSDFEC_TURBO_SCALE_MAX)
return -EINVAL;
/* Check to see what device tree says about the FEC codes */
if (xsdfec->config.code == XSDFEC_LDPC_CODE)
return -EIO;
turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK)
<< XSDFEC_TURBO_SCALE_BIT_POS) |
turbo.alg;
xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write);
return err;
}
static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg)
{
u32 reg_value;
struct xsdfec_turbo turbo_params;
int err;
if (xsdfec->config.code == XSDFEC_LDPC_CODE)
return -EIO;
memset(&turbo_params, 0, sizeof(turbo_params));
reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR);
turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >>
XSDFEC_TURBO_SCALE_BIT_POS;
turbo_params.alg = reg_value & 0x1;
err = copy_to_user(arg, &turbo_params, sizeof(turbo_params));
if (err)
err = -EFAULT;
return err;
}
static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize,
u32 offset)
{
u32 wdata;
if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || psize == 0 ||
(n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) {
dev_dbg(xsdfec->dev, "N value is not in range");
return -EINVAL;
}
n <<= XSDFEC_REG0_N_LSB;
if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX ||
(k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) {
dev_dbg(xsdfec->dev, "K value is not in range");
return -EINVAL;
}
k = k << XSDFEC_REG0_K_LSB;
wdata = k | n;
if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) {
dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x",
XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
(offset * XSDFEC_LDPC_REG_JUMP));
return -EINVAL;
}
xsdfec_regwrite(xsdfec,
XSDFEC_LDPC_CODE_REG0_ADDR_BASE +
(offset * XSDFEC_LDPC_REG_JUMP),
wdata);
return 0;
}
static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize,
u32 no_packing, u32 nm, u32 offset)
{
u32 wdata;
if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) {
dev_dbg(xsdfec->dev, "Psize is not in range");
return -EINVAL;
}
if (no_packing != 0 && no_packing != 1)
dev_dbg(xsdfec->dev, "No-packing bit register invalid");
no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) &
XSDFEC_REG1_NO_PACKING_MASK);
if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB))
dev_dbg(xsdfec->dev, "NM is beyond 10 bits");
nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK;
wdata = nm | no_packing | psize;
if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) {
dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x",
XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
(offset * XSDFEC_LDPC_REG_JUMP));
return -EINVAL;
}
xsdfec_regwrite(xsdfec,
XSDFEC_LDPC_CODE_REG1_ADDR_BASE +
(offset * XSDFEC_LDPC_REG_JUMP),
wdata);
return 0;
}
static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc,
u32 norm_type, u32 special_qc, u32 no_final_parity,
u32 max_schedule, u32 offset)
{
u32 wdata;
if (nlayers < XSDFEC_REG2_NLAYERS_MIN ||
nlayers > XSDFEC_REG2_NLAYERS_MAX) {
dev_dbg(xsdfec->dev, "Nlayers is not in range");
return -EINVAL;
}
if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB))
dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits");
nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK;
if (norm_type > 1)
dev_dbg(xsdfec->dev, "Norm type is invalid");
norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) &
XSDFEC_REG2_NORM_TYPE_MASK);
if (special_qc > 1)
dev_dbg(xsdfec->dev, "Special QC in invalid");
special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) &
XSDFEC_REG2_SPECIAL_QC_MASK);
if (no_final_parity > 1)
dev_dbg(xsdfec->dev, "No final parity check invalid");
no_final_parity =
((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) &
XSDFEC_REG2_NO_FINAL_PARITY_MASK);
if (max_schedule &
~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB))
dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits");
max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) &
XSDFEC_REG2_MAX_SCHEDULE_MASK);
wdata = (max_schedule | no_final_parity | special_qc | norm_type |
nmqc | nlayers);
if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) {
dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x",
XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
(offset * XSDFEC_LDPC_REG_JUMP));
return -EINVAL;
}
xsdfec_regwrite(xsdfec,
XSDFEC_LDPC_CODE_REG2_ADDR_BASE +
(offset * XSDFEC_LDPC_REG_JUMP),
wdata);
return 0;
}
static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off,
u16 qc_off, u32 offset)
{
u32 wdata;
wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) |
(la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off);
if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) >
XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) {
dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x",
XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
(offset * XSDFEC_LDPC_REG_JUMP));
return -EINVAL;
}
xsdfec_regwrite(xsdfec,
XSDFEC_LDPC_CODE_REG3_ADDR_BASE +
(offset * XSDFEC_LDPC_REG_JUMP),
wdata);
return 0;
}
static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset,
u32 *src_ptr, u32 len, const u32 base_addr,
const u32 depth)
{
u32 reg = 0;
int res, i, nr_pages;
u32 n;
u32 *addr = NULL;
struct page *pages[MAX_NUM_PAGES];
/*
* Writes that go beyond the length of
* Shared Scale(SC) table should fail
*/
if (offset > depth / XSDFEC_REG_WIDTH_JUMP ||
len > depth / XSDFEC_REG_WIDTH_JUMP ||
offset + len > depth / XSDFEC_REG_WIDTH_JUMP) {
dev_dbg(xsdfec->dev, "Write exceeds SC table length");
return -EINVAL;
}
n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE;
if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)
n += 1;
if (WARN_ON_ONCE(n > INT_MAX))
return -EINVAL;
nr_pages = n;
res = pin_user_pages_fast((unsigned long)src_ptr, nr_pages, 0, pages);
if (res < nr_pages) {
if (res > 0)
unpin_user_pages(pages, res);
return -EINVAL;
}
for (i = 0; i < nr_pages; i++) {
addr = kmap_local_page(pages[i]);
do {
xsdfec_regwrite(xsdfec,
base_addr + ((offset + reg) *
XSDFEC_REG_WIDTH_JUMP),
addr[reg]);
reg++;
} while ((reg < len) &&
((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE));
kunmap_local(addr);
unpin_user_page(pages[i]);
}
return 0;
}
static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg)
{
struct xsdfec_ldpc_params *ldpc;
int ret, n;
ldpc = memdup_user(arg, sizeof(*ldpc));
if (IS_ERR(ldpc))
return PTR_ERR(ldpc);
if (xsdfec->config.code == XSDFEC_TURBO_CODE) {
ret = -EIO;
goto err_out;
}
/* Verify Device has not started */
if (xsdfec->state == XSDFEC_STARTED) {
ret = -EIO;
goto err_out;
}
if (xsdfec->config.code_wr_protect) {
ret = -EIO;
goto err_out;
}
/* Write Reg 0 */
ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize,
ldpc->code_id);
if (ret)
goto err_out;
/* Write Reg 1 */
ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm,
ldpc->code_id);
if (ret)
goto err_out;
/* Write Reg 2 */
ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc,
ldpc->norm_type, ldpc->special_qc,
ldpc->no_final_parity, ldpc->max_schedule,
ldpc->code_id);
if (ret)
goto err_out;
/* Write Reg 3 */
ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off,
ldpc->qc_off, ldpc->code_id);
if (ret)
goto err_out;
/* Write Shared Codes */
n = ldpc->nlayers / 4;
if (ldpc->nlayers % 4)
n++;
ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n,
XSDFEC_LDPC_SC_TABLE_ADDR_BASE,
XSDFEC_SC_TABLE_DEPTH);
if (ret < 0)
goto err_out;
ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table,
ldpc->nlayers, XSDFEC_LDPC_LA_TABLE_ADDR_BASE,
XSDFEC_LA_TABLE_DEPTH);
if (ret < 0)
goto err_out;
ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table,
ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE,
XSDFEC_QC_TABLE_DEPTH);
err_out:
kfree(ldpc);
return ret;
}
static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg)
{
bool order_invalid;
enum xsdfec_order order;
int err;
err = get_user(order, (enum xsdfec_order __user *)arg);
if (err)
return -EFAULT;
order_invalid = (order != XSDFEC_MAINTAIN_ORDER) &&
(order != XSDFEC_OUT_OF_ORDER);
if (order_invalid)
return -EINVAL;
/* Verify Device has not started */
if (xsdfec->state == XSDFEC_STARTED)
return -EIO;
xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order);
xsdfec->config.order = order;
return 0;
}
static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg)
{
bool bypass;
int err;
err = get_user(bypass, arg);
if (err)
return -EFAULT;
/* Verify Device has not started */
if (xsdfec->state == XSDFEC_STARTED)
return -EIO;
if (bypass)
xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1);
else
xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0);
xsdfec->config.bypass = bypass;
return 0;
}
static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg)
{
u32 reg_value;
bool is_active;
int err;
reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR);
/* using a double ! operator instead of casting */
is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET);
err = put_user(is_active, arg);
if (err)
return -EFAULT;
return err;
}
static u32
xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg)
{
u32 axis_width_field = 0;
switch (axis_width_cfg) {
case XSDFEC_1x128b:
axis_width_field = 0;
break;
case XSDFEC_2x128b:
axis_width_field = 1;
break;
case XSDFEC_4x128b:
axis_width_field = 2;
break;
}
return axis_width_field;
}
static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include
axis_word_inc_cfg)
{
u32 axis_words_field = 0;
if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE ||
axis_word_inc_cfg == XSDFEC_IN_BLOCK)
axis_words_field = 0;
else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION)
axis_words_field = 1;
return axis_words_field;
}
static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec)
{
u32 reg_value;
u32 dout_words_field;
u32 dout_width_field;
u32 din_words_field;
u32 din_width_field;
struct xsdfec_config *config = &xsdfec->config;
/* translate config info to register values */
dout_words_field =
xsdfec_translate_axis_words_cfg_val(config->dout_word_include);
dout_width_field =
xsdfec_translate_axis_width_cfg_val(config->dout_width);
din_words_field =
xsdfec_translate_axis_words_cfg_val(config->din_word_include);
din_width_field =
xsdfec_translate_axis_width_cfg_val(config->din_width);
reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB;
reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB;
reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB;
reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB;
xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value);
return 0;
}
static int xsdfec_start(struct xsdfec_dev *xsdfec)
{
u32 regread;
regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR);
regread &= 0x1;
if (regread != xsdfec->config.code) {
dev_dbg(xsdfec->dev,
"%s SDFEC HW code does not match driver code, reg %d, code %d",
__func__, regread, xsdfec->config.code);
return -EINVAL;
}
/* Set AXIS enable */
xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR,
XSDFEC_AXIS_ENABLE_MASK);
/* Done */
xsdfec->state = XSDFEC_STARTED;
return 0;
}
static int xsdfec_stop(struct xsdfec_dev *xsdfec)
{
u32 regread;
if (xsdfec->state != XSDFEC_STARTED)
dev_dbg(xsdfec->dev, "Device not started correctly");
/* Disable AXIS_ENABLE Input interfaces only */
regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR);
regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK);
xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread);
/* Stop */
xsdfec->state = XSDFEC_STOPPED;
return 0;
}
static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec)
{
spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
xsdfec->isr_err_count = 0;
xsdfec->uecc_count = 0;
xsdfec->cecc_count = 0;
spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
return 0;
}
static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg)
{
int err;
struct xsdfec_stats user_stats;
spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
user_stats.isr_err_count = xsdfec->isr_err_count;
user_stats.cecc_count = xsdfec->cecc_count;
user_stats.uecc_count = xsdfec->uecc_count;
xsdfec->stats_updated = false;
spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
err = copy_to_user(arg, &user_stats, sizeof(user_stats));
if (err)
err = -EFAULT;
return err;
}
static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec)
{
/* Ensure registers are aligned with core configuration */
xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
xsdfec_cfg_axi_streams(xsdfec);
update_config_from_hw(xsdfec);
return 0;
}
static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd,
unsigned long data)
{
struct xsdfec_dev *xsdfec;
void __user *arg = (void __user *)data;
int rval;
xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev);
/* In failed state allow only reset and get status IOCTLs */
if (xsdfec->state == XSDFEC_NEEDS_RESET &&
(cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS &&
cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) {
return -EPERM;
}
switch (cmd) {
case XSDFEC_START_DEV:
rval = xsdfec_start(xsdfec);
break;
case XSDFEC_STOP_DEV:
rval = xsdfec_stop(xsdfec);
break;
case XSDFEC_CLEAR_STATS:
rval = xsdfec_clear_stats(xsdfec);
break;
case XSDFEC_GET_STATS:
rval = xsdfec_get_stats(xsdfec, arg);
break;
case XSDFEC_GET_STATUS:
rval = xsdfec_get_status(xsdfec, arg);
break;
case XSDFEC_GET_CONFIG:
rval = xsdfec_get_config(xsdfec, arg);
break;
case XSDFEC_SET_DEFAULT_CONFIG:
rval = xsdfec_set_default_config(xsdfec);
break;
case XSDFEC_SET_IRQ:
rval = xsdfec_set_irq(xsdfec, arg);
break;
case XSDFEC_SET_TURBO:
rval = xsdfec_set_turbo(xsdfec, arg);
break;
case XSDFEC_GET_TURBO:
rval = xsdfec_get_turbo(xsdfec, arg);
break;
case XSDFEC_ADD_LDPC_CODE_PARAMS:
rval = xsdfec_add_ldpc(xsdfec, arg);
break;
case XSDFEC_SET_ORDER:
rval = xsdfec_set_order(xsdfec, arg);
break;
case XSDFEC_SET_BYPASS:
rval = xsdfec_set_bypass(xsdfec, arg);
break;
case XSDFEC_IS_ACTIVE:
rval = xsdfec_is_active(xsdfec, (bool __user *)arg);
break;
default:
rval = -ENOTTY;
break;
}
return rval;
}
static __poll_t xsdfec_poll(struct file *file, poll_table *wait)
{
__poll_t mask = 0;
struct xsdfec_dev *xsdfec;
xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev);
poll_wait(file, &xsdfec->waitq, wait);
/* XSDFEC ISR detected an error */
spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
if (xsdfec->state_updated)
mask |= EPOLLIN | EPOLLPRI;
if (xsdfec->stats_updated)
mask |= EPOLLIN | EPOLLRDNORM;
spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
return mask;
}
static const struct file_operations xsdfec_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = xsdfec_dev_ioctl,
.poll = xsdfec_poll,
.compat_ioctl = compat_ptr_ioctl,
};
static int xsdfec_parse_of(struct xsdfec_dev *xsdfec)
{
struct device *dev = xsdfec->dev;
struct device_node *node = dev->of_node;
int rval;
const char *fec_code;
u32 din_width;
u32 din_word_include;
u32 dout_width;
u32 dout_word_include;
rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code);
if (rval < 0)
return rval;
if (!strcasecmp(fec_code, "ldpc"))
xsdfec->config.code = XSDFEC_LDPC_CODE;
else if (!strcasecmp(fec_code, "turbo"))
xsdfec->config.code = XSDFEC_TURBO_CODE;
else
return -EINVAL;
rval = of_property_read_u32(node, "xlnx,sdfec-din-words",
&din_word_include);
if (rval < 0)
return rval;
if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
xsdfec->config.din_word_include = din_word_include;
else
return -EINVAL;
rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width);
if (rval < 0)
return rval;
switch (din_width) {
/* Fall through and set for valid values */
case XSDFEC_1x128b:
case XSDFEC_2x128b:
case XSDFEC_4x128b:
xsdfec->config.din_width = din_width;
break;
default:
return -EINVAL;
}
rval = of_property_read_u32(node, "xlnx,sdfec-dout-words",
&dout_word_include);
if (rval < 0)
return rval;
if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX)
xsdfec->config.dout_word_include = dout_word_include;
else
return -EINVAL;
rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width);
if (rval < 0)
return rval;
switch (dout_width) {
/* Fall through and set for valid values */
case XSDFEC_1x128b:
case XSDFEC_2x128b:
case XSDFEC_4x128b:
xsdfec->config.dout_width = dout_width;
break;
default:
return -EINVAL;
}
/* Write LDPC to CODE Register */
xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code);
xsdfec_cfg_axi_streams(xsdfec);
return 0;
}
static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id)
{
struct xsdfec_dev *xsdfec = dev_id;
irqreturn_t ret = IRQ_HANDLED;
u32 ecc_err;
u32 isr_err;
u32 uecc_count;
u32 cecc_count;
u32 isr_err_count;
u32 aecc_count;
u32 tmp;
WARN_ON(xsdfec->irq != irq);
/* Mask Interrupts */
xsdfec_isr_enable(xsdfec, false);
xsdfec_ecc_isr_enable(xsdfec, false);
/* Read ISR */
ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR);
isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR);
/* Clear the interrupts */
xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err);
xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err);
tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK;
/* Count uncorrectable 2-bit errors */
uecc_count = hweight32(tmp);
/* Count all ECC errors */
aecc_count = hweight32(ecc_err);
/* Number of correctable 1-bit ECC error */
cecc_count = aecc_count - 2 * uecc_count;
/* Count ISR errors */
isr_err_count = hweight32(isr_err);
dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp,
uecc_count, aecc_count, cecc_count, isr_err_count);
dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count,
xsdfec->cecc_count, xsdfec->isr_err_count);
spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags);
/* Add new errors to a 2-bits counter */
if (uecc_count)
xsdfec->uecc_count += uecc_count;
/* Add new errors to a 1-bits counter */
if (cecc_count)
xsdfec->cecc_count += cecc_count;
/* Add new errors to a ISR counter */
if (isr_err_count)
xsdfec->isr_err_count += isr_err_count;
/* Update state/stats flag */
if (uecc_count) {
if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK)
xsdfec->state = XSDFEC_NEEDS_RESET;
else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK)
xsdfec->state = XSDFEC_PL_RECONFIGURE;
xsdfec->stats_updated = true;
xsdfec->state_updated = true;
}
if (cecc_count)
xsdfec->stats_updated = true;
if (isr_err_count) {
xsdfec->state = XSDFEC_NEEDS_RESET;
xsdfec->stats_updated = true;
xsdfec->state_updated = true;
}
spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags);
dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated,
xsdfec->stats_updated);
/* Enable another polling */
if (xsdfec->state_updated || xsdfec->stats_updated)
wake_up_interruptible(&xsdfec->waitq);
else
ret = IRQ_NONE;
/* Unmask Interrupts */
xsdfec_isr_enable(xsdfec, true);
xsdfec_ecc_isr_enable(xsdfec, true);
return ret;
}
static int xsdfec_clk_init(struct platform_device *pdev,
struct xsdfec_clks *clks)
{
int err;
clks->core_clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(clks->core_clk)) {
dev_err(&pdev->dev, "failed to get core_clk");
return PTR_ERR(clks->core_clk);
}
clks->axi_clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
if (IS_ERR(clks->axi_clk)) {
dev_err(&pdev->dev, "failed to get axi_clk");
return PTR_ERR(clks->axi_clk);
}
clks->din_words_clk = devm_clk_get(&pdev->dev, "s_axis_din_words_aclk");
if (IS_ERR(clks->din_words_clk)) {
if (PTR_ERR(clks->din_words_clk) != -ENOENT) {
err = PTR_ERR(clks->din_words_clk);
return err;
}
clks->din_words_clk = NULL;
}
clks->din_clk = devm_clk_get(&pdev->dev, "s_axis_din_aclk");
if (IS_ERR(clks->din_clk)) {
if (PTR_ERR(clks->din_clk) != -ENOENT) {
err = PTR_ERR(clks->din_clk);
return err;
}
clks->din_clk = NULL;
}
clks->dout_clk = devm_clk_get(&pdev->dev, "m_axis_dout_aclk");
if (IS_ERR(clks->dout_clk)) {
if (PTR_ERR(clks->dout_clk) != -ENOENT) {
err = PTR_ERR(clks->dout_clk);
return err;
}
clks->dout_clk = NULL;
}
clks->dout_words_clk =
devm_clk_get(&pdev->dev, "s_axis_dout_words_aclk");
if (IS_ERR(clks->dout_words_clk)) {
if (PTR_ERR(clks->dout_words_clk) != -ENOENT) {
err = PTR_ERR(clks->dout_words_clk);
return err;
}
clks->dout_words_clk = NULL;
}
clks->ctrl_clk = devm_clk_get(&pdev->dev, "s_axis_ctrl_aclk");
if (IS_ERR(clks->ctrl_clk)) {
if (PTR_ERR(clks->ctrl_clk) != -ENOENT) {
err = PTR_ERR(clks->ctrl_clk);
return err;
}
clks->ctrl_clk = NULL;
}
clks->status_clk = devm_clk_get(&pdev->dev, "m_axis_status_aclk");
if (IS_ERR(clks->status_clk)) {
if (PTR_ERR(clks->status_clk) != -ENOENT) {
err = PTR_ERR(clks->status_clk);
return err;
}
clks->status_clk = NULL;
}
err = clk_prepare_enable(clks->core_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable core_clk (%d)", err);
return err;
}
err = clk_prepare_enable(clks->axi_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable axi_clk (%d)", err);
goto err_disable_core_clk;
}
err = clk_prepare_enable(clks->din_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable din_clk (%d)", err);
goto err_disable_axi_clk;
}
err = clk_prepare_enable(clks->din_words_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable din_words_clk (%d)", err);
goto err_disable_din_clk;
}
err = clk_prepare_enable(clks->dout_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable dout_clk (%d)", err);
goto err_disable_din_words_clk;
}
err = clk_prepare_enable(clks->dout_words_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable dout_words_clk (%d)",
err);
goto err_disable_dout_clk;
}
err = clk_prepare_enable(clks->ctrl_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable ctrl_clk (%d)", err);
goto err_disable_dout_words_clk;
}
err = clk_prepare_enable(clks->status_clk);
if (err) {
dev_err(&pdev->dev, "failed to enable status_clk (%d)\n", err);
goto err_disable_ctrl_clk;
}
return err;
err_disable_ctrl_clk:
clk_disable_unprepare(clks->ctrl_clk);
err_disable_dout_words_clk:
clk_disable_unprepare(clks->dout_words_clk);
err_disable_dout_clk:
clk_disable_unprepare(clks->dout_clk);
err_disable_din_words_clk:
clk_disable_unprepare(clks->din_words_clk);
err_disable_din_clk:
clk_disable_unprepare(clks->din_clk);
err_disable_axi_clk:
clk_disable_unprepare(clks->axi_clk);
err_disable_core_clk:
clk_disable_unprepare(clks->core_clk);
return err;
}
static void xsdfec_disable_all_clks(struct xsdfec_clks *clks)
{
clk_disable_unprepare(clks->status_clk);
clk_disable_unprepare(clks->ctrl_clk);
clk_disable_unprepare(clks->dout_words_clk);
clk_disable_unprepare(clks->dout_clk);
clk_disable_unprepare(clks->din_words_clk);
clk_disable_unprepare(clks->din_clk);
clk_disable_unprepare(clks->core_clk);
clk_disable_unprepare(clks->axi_clk);
}
static int xsdfec_probe(struct platform_device *pdev)
{
struct xsdfec_dev *xsdfec;
struct device *dev;
int err;
bool irq_enabled = true;
xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL);
if (!xsdfec)
return -ENOMEM;
xsdfec->dev = &pdev->dev;
spin_lock_init(&xsdfec->error_data_lock);
err = xsdfec_clk_init(pdev, &xsdfec->clks);
if (err)
return err;
dev = xsdfec->dev;
xsdfec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xsdfec->regs)) {
err = PTR_ERR(xsdfec->regs);
goto err_xsdfec_dev;
}
xsdfec->irq = platform_get_irq(pdev, 0);
if (xsdfec->irq < 0) {
dev_dbg(dev, "platform_get_irq failed");
irq_enabled = false;
}
err = xsdfec_parse_of(xsdfec);
if (err < 0)
goto err_xsdfec_dev;
update_config_from_hw(xsdfec);
/* Save driver private data */
platform_set_drvdata(pdev, xsdfec);
if (irq_enabled) {
init_waitqueue_head(&xsdfec->waitq);
/* Register IRQ thread */
err = devm_request_threaded_irq(dev, xsdfec->irq, NULL,
xsdfec_irq_thread, IRQF_ONESHOT,
"xilinx-sdfec16", xsdfec);
if (err < 0) {
dev_err(dev, "unable to request IRQ%d", xsdfec->irq);
goto err_xsdfec_dev;
}
}
err = ida_alloc(&dev_nrs, GFP_KERNEL);
if (err < 0)
goto err_xsdfec_dev;
xsdfec->dev_id = err;
snprintf(xsdfec->dev_name, DEV_NAME_LEN, "xsdfec%d", xsdfec->dev_id);
xsdfec->miscdev.minor = MISC_DYNAMIC_MINOR;
xsdfec->miscdev.name = xsdfec->dev_name;
xsdfec->miscdev.fops = &xsdfec_fops;
xsdfec->miscdev.parent = dev;
err = misc_register(&xsdfec->miscdev);
if (err) {
dev_err(dev, "error:%d. Unable to register device", err);
goto err_xsdfec_ida;
}
return 0;
err_xsdfec_ida:
ida_free(&dev_nrs, xsdfec->dev_id);
err_xsdfec_dev:
xsdfec_disable_all_clks(&xsdfec->clks);
return err;
}
static int xsdfec_remove(struct platform_device *pdev)
{
struct xsdfec_dev *xsdfec;
xsdfec = platform_get_drvdata(pdev);
misc_deregister(&xsdfec->miscdev);
ida_free(&dev_nrs, xsdfec->dev_id);
xsdfec_disable_all_clks(&xsdfec->clks);
return 0;
}
static const struct of_device_id xsdfec_of_match[] = {
{
.compatible = "xlnx,sd-fec-1.1",
},
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, xsdfec_of_match);
static struct platform_driver xsdfec_driver = {
.driver = {
.name = "xilinx-sdfec",
.of_match_table = xsdfec_of_match,
},
.probe = xsdfec_probe,
.remove = xsdfec_remove,
};
module_platform_driver(xsdfec_driver);
MODULE_AUTHOR("Xilinx, Inc");
MODULE_DESCRIPTION("Xilinx SD-FEC16 Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/xilinx_sdfec.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic on-chip SRAM allocation driver
*
* Copyright (C) 2012 Philipp Zabel, Pengutronix
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/list_sort.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
#include <soc/at91/atmel-secumod.h>
#include "sram.h"
#define SRAM_GRANULARITY 32
static ssize_t sram_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t pos, size_t count)
{
struct sram_partition *part;
part = container_of(attr, struct sram_partition, battr);
mutex_lock(&part->lock);
memcpy_fromio(buf, part->base + pos, count);
mutex_unlock(&part->lock);
return count;
}
static ssize_t sram_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t pos, size_t count)
{
struct sram_partition *part;
part = container_of(attr, struct sram_partition, battr);
mutex_lock(&part->lock);
memcpy_toio(part->base + pos, buf, count);
mutex_unlock(&part->lock);
return count;
}
static int sram_add_pool(struct sram_dev *sram, struct sram_reserve *block,
phys_addr_t start, struct sram_partition *part)
{
int ret;
part->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
NUMA_NO_NODE, block->label);
if (IS_ERR(part->pool))
return PTR_ERR(part->pool);
ret = gen_pool_add_virt(part->pool, (unsigned long)part->base, start,
block->size, NUMA_NO_NODE);
if (ret < 0) {
dev_err(sram->dev, "failed to register subpool: %d\n", ret);
return ret;
}
return 0;
}
static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block,
phys_addr_t start, struct sram_partition *part)
{
sysfs_bin_attr_init(&part->battr);
part->battr.attr.name = devm_kasprintf(sram->dev, GFP_KERNEL,
"%llx.sram",
(unsigned long long)start);
if (!part->battr.attr.name)
return -ENOMEM;
part->battr.attr.mode = S_IRUSR | S_IWUSR;
part->battr.read = sram_read;
part->battr.write = sram_write;
part->battr.size = block->size;
return device_create_bin_file(sram->dev, &part->battr);
}
static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
phys_addr_t start)
{
int ret;
struct sram_partition *part = &sram->partition[sram->partitions];
mutex_init(&part->lock);
if (sram->config && sram->config->map_only_reserved) {
void __iomem *virt_base;
if (sram->no_memory_wc)
virt_base = devm_ioremap_resource(sram->dev, &block->res);
else
virt_base = devm_ioremap_resource_wc(sram->dev, &block->res);
if (IS_ERR(virt_base)) {
dev_err(sram->dev, "could not map SRAM at %pr\n", &block->res);
return PTR_ERR(virt_base);
}
part->base = virt_base;
} else {
part->base = sram->virt_base + block->start;
}
if (block->pool) {
ret = sram_add_pool(sram, block, start, part);
if (ret)
return ret;
}
if (block->export) {
ret = sram_add_export(sram, block, start, part);
if (ret)
return ret;
}
if (block->protect_exec) {
ret = sram_check_protect_exec(sram, block, part);
if (ret)
return ret;
ret = sram_add_pool(sram, block, start, part);
if (ret)
return ret;
sram_add_protect_exec(part);
}
sram->partitions++;
return 0;
}
static void sram_free_partitions(struct sram_dev *sram)
{
struct sram_partition *part;
if (!sram->partitions)
return;
part = &sram->partition[sram->partitions - 1];
for (; sram->partitions; sram->partitions--, part--) {
if (part->battr.size)
device_remove_bin_file(sram->dev, &part->battr);
if (part->pool &&
gen_pool_avail(part->pool) < gen_pool_size(part->pool))
dev_err(sram->dev, "removed pool while SRAM allocated\n");
}
}
static int sram_reserve_cmp(void *priv, const struct list_head *a,
const struct list_head *b)
{
struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
return ra->start - rb->start;
}
static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
{
struct device_node *np = sram->dev->of_node, *child;
unsigned long size, cur_start, cur_size;
struct sram_reserve *rblocks, *block;
struct list_head reserve_list;
unsigned int nblocks, exports = 0;
const char *label;
int ret = 0;
INIT_LIST_HEAD(&reserve_list);
size = resource_size(res);
/*
* We need an additional block to mark the end of the memory region
* after the reserved blocks from the dt are processed.
*/
nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
rblocks = kcalloc(nblocks, sizeof(*rblocks), GFP_KERNEL);
if (!rblocks)
return -ENOMEM;
block = &rblocks[0];
for_each_available_child_of_node(np, child) {
struct resource child_res;
ret = of_address_to_resource(child, 0, &child_res);
if (ret < 0) {
dev_err(sram->dev,
"could not get address for node %pOF\n",
child);
goto err_chunks;
}
if (child_res.start < res->start || child_res.end > res->end) {
dev_err(sram->dev,
"reserved block %pOF outside the sram area\n",
child);
ret = -EINVAL;
goto err_chunks;
}
block->start = child_res.start - res->start;
block->size = resource_size(&child_res);
block->res = child_res;
list_add_tail(&block->list, &reserve_list);
block->export = of_property_read_bool(child, "export");
block->pool = of_property_read_bool(child, "pool");
block->protect_exec = of_property_read_bool(child, "protect-exec");
if ((block->export || block->pool || block->protect_exec) &&
block->size) {
exports++;
label = NULL;
ret = of_property_read_string(child, "label", &label);
if (ret && ret != -EINVAL) {
dev_err(sram->dev,
"%pOF has invalid label name\n",
child);
goto err_chunks;
}
if (!label)
block->label = devm_kasprintf(sram->dev, GFP_KERNEL,
"%s", of_node_full_name(child));
else
block->label = devm_kstrdup(sram->dev,
label, GFP_KERNEL);
if (!block->label) {
ret = -ENOMEM;
goto err_chunks;
}
dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n",
block->export ? "exported " : "", block->label,
block->start, block->start + block->size);
} else {
dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
block->start, block->start + block->size);
}
block++;
}
child = NULL;
/* the last chunk marks the end of the region */
rblocks[nblocks - 1].start = size;
rblocks[nblocks - 1].size = 0;
list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
list_sort(NULL, &reserve_list, sram_reserve_cmp);
if (exports) {
sram->partition = devm_kcalloc(sram->dev,
exports, sizeof(*sram->partition),
GFP_KERNEL);
if (!sram->partition) {
ret = -ENOMEM;
goto err_chunks;
}
}
cur_start = 0;
list_for_each_entry(block, &reserve_list, list) {
/* can only happen if sections overlap */
if (block->start < cur_start) {
dev_err(sram->dev,
"block at 0x%x starts after current offset 0x%lx\n",
block->start, cur_start);
ret = -EINVAL;
sram_free_partitions(sram);
goto err_chunks;
}
if ((block->export || block->pool || block->protect_exec) &&
block->size) {
ret = sram_add_partition(sram, block,
res->start + block->start);
if (ret) {
sram_free_partitions(sram);
goto err_chunks;
}
}
/* current start is in a reserved block, so continue after it */
if (block->start == cur_start) {
cur_start = block->start + block->size;
continue;
}
/*
* allocate the space between the current starting
* address and the following reserved block, or the
* end of the region.
*/
cur_size = block->start - cur_start;
if (sram->pool) {
dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
cur_start, cur_start + cur_size);
ret = gen_pool_add_virt(sram->pool,
(unsigned long)sram->virt_base + cur_start,
res->start + cur_start, cur_size, -1);
if (ret < 0) {
sram_free_partitions(sram);
goto err_chunks;
}
}
/* next allocation after this reserved block */
cur_start = block->start + block->size;
}
err_chunks:
of_node_put(child);
kfree(rblocks);
return ret;
}
static int atmel_securam_wait(void)
{
struct regmap *regmap;
u32 val;
regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-secumod");
if (IS_ERR(regmap))
return -ENODEV;
return regmap_read_poll_timeout(regmap, AT91_SECUMOD_RAMRDY, val,
val & AT91_SECUMOD_RAMRDY_READY,
10000, 500000);
}
static const struct sram_config atmel_securam_config = {
.init = atmel_securam_wait,
};
/*
* SYSRAM contains areas that are not accessible by the
* kernel, such as the first 256K that is reserved for TZ.
* Accesses to those areas (including speculative accesses)
* trigger SErrors. As such we must map only the areas of
* SYSRAM specified in the device tree.
*/
static const struct sram_config tegra_sysram_config = {
.map_only_reserved = true,
};
static const struct of_device_id sram_dt_ids[] = {
{ .compatible = "mmio-sram" },
{ .compatible = "atmel,sama5d2-securam", .data = &atmel_securam_config },
{ .compatible = "nvidia,tegra186-sysram", .data = &tegra_sysram_config },
{ .compatible = "nvidia,tegra194-sysram", .data = &tegra_sysram_config },
{ .compatible = "nvidia,tegra234-sysram", .data = &tegra_sysram_config },
{}
};
static int sram_probe(struct platform_device *pdev)
{
const struct sram_config *config;
struct sram_dev *sram;
int ret;
struct resource *res;
struct clk *clk;
config = of_device_get_match_data(&pdev->dev);
sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
if (!sram)
return -ENOMEM;
sram->dev = &pdev->dev;
sram->no_memory_wc = of_property_read_bool(pdev->dev.of_node, "no-memory-wc");
sram->config = config;
if (!config || !config->map_only_reserved) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (sram->no_memory_wc)
sram->virt_base = devm_ioremap_resource(&pdev->dev, res);
else
sram->virt_base = devm_ioremap_resource_wc(&pdev->dev, res);
if (IS_ERR(sram->virt_base)) {
dev_err(&pdev->dev, "could not map SRAM registers\n");
return PTR_ERR(sram->virt_base);
}
sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
NUMA_NO_NODE, NULL);
if (IS_ERR(sram->pool))
return PTR_ERR(sram->pool);
}
clk = devm_clk_get_optional_enabled(sram->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
ret = sram_reserve_regions(sram,
platform_get_resource(pdev, IORESOURCE_MEM, 0));
if (ret)
return ret;
platform_set_drvdata(pdev, sram);
if (config && config->init) {
ret = config->init();
if (ret)
goto err_free_partitions;
}
if (sram->pool)
dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
gen_pool_size(sram->pool) / 1024, sram->virt_base);
return 0;
err_free_partitions:
sram_free_partitions(sram);
return ret;
}
static int sram_remove(struct platform_device *pdev)
{
struct sram_dev *sram = platform_get_drvdata(pdev);
sram_free_partitions(sram);
if (sram->pool && gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
dev_err(sram->dev, "removed while SRAM allocated\n");
return 0;
}
static struct platform_driver sram_driver = {
.driver = {
.name = "sram",
.of_match_table = sram_dt_ids,
},
.probe = sram_probe,
.remove = sram_remove,
};
static int __init sram_init(void)
{
return platform_driver_register(&sram_driver);
}
postcore_initcall(sram_init);
| linux-master | drivers/misc/sram.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* datasheet: https://www.nxp.com/docs/en/data-sheet/K20P144M120SF3.pdf
*
* Copyright (C) 2018-2021 Collabora
* Copyright (C) 2018-2021 GE Healthcare
*/
#include <linux/delay.h>
#include <linux/firmware.h>
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/spi/spi.h>
#define ACHC_MAX_FREQ_HZ 300000
#define ACHC_FAST_READ_FREQ_HZ 1000000
struct achc_data {
struct spi_device *main;
struct spi_device *ezport;
struct gpio_desc *reset;
struct mutex device_lock; /* avoid concurrent device access */
};
#define EZPORT_RESET_DELAY_MS 100
#define EZPORT_STARTUP_DELAY_MS 200
#define EZPORT_WRITE_WAIT_MS 10
#define EZPORT_TRANSFER_SIZE 2048
#define EZPORT_CMD_SP 0x02 /* flash section program */
#define EZPORT_CMD_RDSR 0x05 /* read status register */
#define EZPORT_CMD_WREN 0x06 /* write enable */
#define EZPORT_CMD_FAST_READ 0x0b /* flash read data at high speed */
#define EZPORT_CMD_RESET 0xb9 /* reset chip */
#define EZPORT_CMD_BE 0xc7 /* bulk erase */
#define EZPORT_CMD_SE 0xd8 /* sector erase */
#define EZPORT_SECTOR_SIZE 4096
#define EZPORT_SECTOR_MASK (EZPORT_SECTOR_SIZE - 1)
#define EZPORT_STATUS_WIP BIT(0) /* write in progress */
#define EZPORT_STATUS_WEN BIT(1) /* write enable */
#define EZPORT_STATUS_BEDIS BIT(2) /* bulk erase disable */
#define EZPORT_STATUS_FLEXRAM BIT(3) /* FlexRAM mode */
#define EZPORT_STATUS_WEF BIT(6) /* write error flag */
#define EZPORT_STATUS_FS BIT(7) /* flash security */
static void ezport_reset(struct gpio_desc *reset)
{
gpiod_set_value(reset, 1);
msleep(EZPORT_RESET_DELAY_MS);
gpiod_set_value(reset, 0);
msleep(EZPORT_STARTUP_DELAY_MS);
}
static int ezport_start_programming(struct spi_device *spi, struct gpio_desc *reset)
{
struct spi_message msg;
struct spi_transfer assert_cs = {
.cs_change = 1,
};
struct spi_transfer release_cs = { };
int ret;
spi_bus_lock(spi->master);
/* assert chip select */
spi_message_init(&msg);
spi_message_add_tail(&assert_cs, &msg);
ret = spi_sync_locked(spi, &msg);
if (ret)
goto fail;
msleep(EZPORT_STARTUP_DELAY_MS);
/* reset with asserted chip select to switch into programming mode */
ezport_reset(reset);
/* release chip select */
spi_message_init(&msg);
spi_message_add_tail(&release_cs, &msg);
ret = spi_sync_locked(spi, &msg);
fail:
spi_bus_unlock(spi->master);
return ret;
}
static void ezport_stop_programming(struct spi_device *spi, struct gpio_desc *reset)
{
/* reset without asserted chip select to return into normal mode */
spi_bus_lock(spi->master);
ezport_reset(reset);
spi_bus_unlock(spi->master);
}
static int ezport_get_status_register(struct spi_device *spi)
{
int ret;
ret = spi_w8r8(spi, EZPORT_CMD_RDSR);
if (ret < 0)
return ret;
if (ret == 0xff) {
dev_err(&spi->dev, "Invalid EzPort status, EzPort is not functional!\n");
return -EINVAL;
}
return ret;
}
static int ezport_soft_reset(struct spi_device *spi)
{
u8 cmd = EZPORT_CMD_RESET;
int ret;
ret = spi_write(spi, &cmd, 1);
if (ret < 0)
return ret;
msleep(EZPORT_STARTUP_DELAY_MS);
return 0;
}
static int ezport_send_simple(struct spi_device *spi, u8 cmd)
{
int ret;
ret = spi_write(spi, &cmd, 1);
if (ret < 0)
return ret;
return ezport_get_status_register(spi);
}
static int ezport_wait_write(struct spi_device *spi, u32 retries)
{
int ret;
u32 i;
for (i = 0; i < retries; i++) {
ret = ezport_get_status_register(spi);
if (ret >= 0 && !(ret & EZPORT_STATUS_WIP))
break;
msleep(EZPORT_WRITE_WAIT_MS);
}
return ret;
}
static int ezport_write_enable(struct spi_device *spi)
{
int ret = 0, retries = 3;
for (retries = 0; retries < 3; retries++) {
ret = ezport_send_simple(spi, EZPORT_CMD_WREN);
if (ret > 0 && ret & EZPORT_STATUS_WEN)
break;
}
if (!(ret & EZPORT_STATUS_WEN)) {
dev_err(&spi->dev, "EzPort write enable timed out\n");
return -ETIMEDOUT;
}
return 0;
}
static int ezport_bulk_erase(struct spi_device *spi)
{
int ret;
static const u8 cmd = EZPORT_CMD_BE;
dev_dbg(&spi->dev, "EzPort bulk erase...\n");
ret = ezport_write_enable(spi);
if (ret < 0)
return ret;
ret = spi_write(spi, &cmd, 1);
if (ret < 0)
return ret;
ret = ezport_wait_write(spi, 1000);
if (ret < 0)
return ret;
return 0;
}
static int ezport_section_erase(struct spi_device *spi, u32 address)
{
u8 query[] = {EZPORT_CMD_SE, (address >> 16) & 0xff, (address >> 8) & 0xff, address & 0xff};
int ret;
dev_dbg(&spi->dev, "Ezport section erase @ 0x%06x...\n", address);
if (address & EZPORT_SECTOR_MASK)
return -EINVAL;
ret = ezport_write_enable(spi);
if (ret < 0)
return ret;
ret = spi_write(spi, query, sizeof(query));
if (ret < 0)
return ret;
return ezport_wait_write(spi, 200);
}
static int ezport_flash_transfer(struct spi_device *spi, u32 address,
const u8 *payload, size_t payload_size)
{
struct spi_transfer xfers[2] = {};
u8 *command;
int ret;
dev_dbg(&spi->dev, "EzPort write %zu bytes @ 0x%06x...\n", payload_size, address);
ret = ezport_write_enable(spi);
if (ret < 0)
return ret;
command = kmalloc(4, GFP_KERNEL | GFP_DMA);
if (!command)
return -ENOMEM;
command[0] = EZPORT_CMD_SP;
command[1] = address >> 16;
command[2] = address >> 8;
command[3] = address >> 0;
xfers[0].tx_buf = command;
xfers[0].len = 4;
xfers[1].tx_buf = payload;
xfers[1].len = payload_size;
ret = spi_sync_transfer(spi, xfers, 2);
kfree(command);
if (ret < 0)
return ret;
return ezport_wait_write(spi, 40);
}
static int ezport_flash_compare(struct spi_device *spi, u32 address,
const u8 *payload, size_t payload_size)
{
struct spi_transfer xfers[2] = {};
u8 *buffer;
int ret;
buffer = kmalloc(payload_size + 5, GFP_KERNEL | GFP_DMA);
if (!buffer)
return -ENOMEM;
buffer[0] = EZPORT_CMD_FAST_READ;
buffer[1] = address >> 16;
buffer[2] = address >> 8;
buffer[3] = address >> 0;
xfers[0].tx_buf = buffer;
xfers[0].len = 4;
xfers[0].speed_hz = ACHC_FAST_READ_FREQ_HZ;
xfers[1].rx_buf = buffer + 4;
xfers[1].len = payload_size + 1;
xfers[1].speed_hz = ACHC_FAST_READ_FREQ_HZ;
ret = spi_sync_transfer(spi, xfers, 2);
if (ret)
goto err;
/* FAST_READ receives one dummy byte before the real data */
ret = memcmp(payload, buffer + 4 + 1, payload_size);
if (ret) {
ret = -EBADMSG;
dev_dbg(&spi->dev, "Verification failure @ %06x", address);
print_hex_dump_bytes("fw: ", DUMP_PREFIX_OFFSET, payload, payload_size);
print_hex_dump_bytes("dev: ", DUMP_PREFIX_OFFSET, buffer + 4, payload_size);
}
err:
kfree(buffer);
return ret;
}
static int ezport_firmware_compare_data(struct spi_device *spi,
const u8 *data, size_t size)
{
int ret;
size_t address = 0;
size_t transfer_size;
dev_dbg(&spi->dev, "EzPort compare data with %zu bytes...\n", size);
ret = ezport_get_status_register(spi);
if (ret < 0)
return ret;
if (ret & EZPORT_STATUS_FS) {
dev_info(&spi->dev, "Device is in secure mode (status=0x%02x)!\n", ret);
dev_info(&spi->dev, "FW verification is not possible\n");
return -EACCES;
}
while (size - address > 0) {
transfer_size = min((size_t) EZPORT_TRANSFER_SIZE, size - address);
ret = ezport_flash_compare(spi, address, data+address, transfer_size);
if (ret)
return ret;
address += transfer_size;
}
return 0;
}
static int ezport_firmware_flash_data(struct spi_device *spi,
const u8 *data, size_t size)
{
int ret;
size_t address = 0;
size_t transfer_size;
dev_dbg(&spi->dev, "EzPort flash data with %zu bytes...\n", size);
ret = ezport_get_status_register(spi);
if (ret < 0)
return ret;
if (ret & EZPORT_STATUS_FS) {
ret = ezport_bulk_erase(spi);
if (ret < 0)
return ret;
if (ret & EZPORT_STATUS_FS)
return -EINVAL;
}
while (size - address > 0) {
if (!(address & EZPORT_SECTOR_MASK)) {
ret = ezport_section_erase(spi, address);
if (ret < 0)
return ret;
if (ret & EZPORT_STATUS_WIP || ret & EZPORT_STATUS_WEF)
return -EIO;
}
transfer_size = min((size_t) EZPORT_TRANSFER_SIZE, size - address);
ret = ezport_flash_transfer(spi, address,
data+address, transfer_size);
if (ret < 0)
return ret;
else if (ret & EZPORT_STATUS_WIP)
return -ETIMEDOUT;
else if (ret & EZPORT_STATUS_WEF)
return -EIO;
address += transfer_size;
}
dev_dbg(&spi->dev, "EzPort verify flashed data...\n");
ret = ezport_firmware_compare_data(spi, data, size);
/* allow missing FW verfication in secure mode */
if (ret == -EACCES)
ret = 0;
if (ret < 0)
dev_err(&spi->dev, "Failed to verify flashed data: %d\n", ret);
ret = ezport_soft_reset(spi);
if (ret < 0)
dev_warn(&spi->dev, "EzPort reset failed!\n");
return ret;
}
static int ezport_firmware_load(struct spi_device *spi, const char *fwname)
{
const struct firmware *fw;
int ret;
ret = request_firmware(&fw, fwname, &spi->dev);
if (ret) {
dev_err(&spi->dev, "Could not get firmware: %d\n", ret);
return ret;
}
ret = ezport_firmware_flash_data(spi, fw->data, fw->size);
release_firmware(fw);
return ret;
}
/**
* ezport_flash - flash device firmware
* @spi: SPI device for NXP EzPort interface
* @reset: the gpio connected to the device reset pin
* @fwname: filename of the firmware that should be flashed
*
* Context: can sleep
*
* Return: 0 on success; negative errno on failure
*/
static int ezport_flash(struct spi_device *spi, struct gpio_desc *reset, const char *fwname)
{
int ret;
ret = ezport_start_programming(spi, reset);
if (ret)
return ret;
ret = ezport_firmware_load(spi, fwname);
ezport_stop_programming(spi, reset);
if (ret)
dev_err(&spi->dev, "Failed to flash firmware: %d\n", ret);
else
dev_dbg(&spi->dev, "Finished FW flashing!\n");
return ret;
}
static ssize_t update_firmware_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct achc_data *achc = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret < 0 || value != 1)
return -EINVAL;
mutex_lock(&achc->device_lock);
ret = ezport_flash(achc->ezport, achc->reset, "achc.bin");
mutex_unlock(&achc->device_lock);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR_WO(update_firmware);
static ssize_t reset_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct achc_data *achc = dev_get_drvdata(dev);
int ret;
mutex_lock(&achc->device_lock);
ret = gpiod_get_value(achc->reset);
mutex_unlock(&achc->device_lock);
if (ret < 0)
return ret;
return sysfs_emit(buf, "%d\n", ret);
}
static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct achc_data *achc = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret < 0 || value > 1)
return -EINVAL;
mutex_lock(&achc->device_lock);
gpiod_set_value(achc->reset, value);
mutex_unlock(&achc->device_lock);
return count;
}
static DEVICE_ATTR_RW(reset);
static struct attribute *gehc_achc_attrs[] = {
&dev_attr_update_firmware.attr,
&dev_attr_reset.attr,
NULL,
};
ATTRIBUTE_GROUPS(gehc_achc);
static void unregister_ezport(void *data)
{
struct spi_device *ezport = data;
spi_unregister_device(ezport);
}
static int gehc_achc_probe(struct spi_device *spi)
{
struct achc_data *achc;
int ezport_reg, ret;
spi->max_speed_hz = ACHC_MAX_FREQ_HZ;
spi->bits_per_word = 8;
spi->mode = SPI_MODE_0;
achc = devm_kzalloc(&spi->dev, sizeof(*achc), GFP_KERNEL);
if (!achc)
return -ENOMEM;
spi_set_drvdata(spi, achc);
achc->main = spi;
mutex_init(&achc->device_lock);
ret = of_property_read_u32_index(spi->dev.of_node, "reg", 1, &ezport_reg);
if (ret)
return dev_err_probe(&spi->dev, ret, "missing second reg entry!\n");
achc->ezport = spi_new_ancillary_device(spi, ezport_reg);
if (IS_ERR(achc->ezport))
return PTR_ERR(achc->ezport);
ret = devm_add_action_or_reset(&spi->dev, unregister_ezport, achc->ezport);
if (ret)
return ret;
achc->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(achc->reset))
return dev_err_probe(&spi->dev, PTR_ERR(achc->reset), "Could not get reset gpio\n");
return 0;
}
static const struct spi_device_id gehc_achc_id[] = {
{ "ge,achc", 0 },
{ "achc", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, gehc_achc_id);
static const struct of_device_id gehc_achc_of_match[] = {
{ .compatible = "ge,achc" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, gehc_achc_of_match);
static struct spi_driver gehc_achc_spi_driver = {
.driver = {
.name = "gehc-achc",
.of_match_table = gehc_achc_of_match,
.dev_groups = gehc_achc_groups,
},
.probe = gehc_achc_probe,
.id_table = gehc_achc_id,
};
module_spi_driver(gehc_achc_spi_driver);
MODULE_DESCRIPTION("GEHC ACHC driver");
MODULE_AUTHOR("Sebastian Reichel <[email protected]>");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/gehc-achc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Support for usb functionality of Hikey series boards
* based on Hisilicon Kirin Soc.
*
* Copyright (C) 2017-2018 Hilisicon Electronics Co., Ltd.
* http://www.huawei.com
*
* Authors: Yu Chen <[email protected]>
*/
#include <linux/gpio/consumer.h>
#include <linux/kernel.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/usb/role.h>
#define DEVICE_DRIVER_NAME "hisi_hikey_usb"
#define HUB_VBUS_POWER_ON 1
#define HUB_VBUS_POWER_OFF 0
#define USB_SWITCH_TO_HUB 1
#define USB_SWITCH_TO_TYPEC 0
#define TYPEC_VBUS_POWER_ON 1
#define TYPEC_VBUS_POWER_OFF 0
struct hisi_hikey_usb {
struct device *dev;
struct gpio_desc *otg_switch;
struct gpio_desc *typec_vbus;
struct gpio_desc *reset;
struct regulator *regulator;
struct usb_role_switch *hub_role_sw;
struct usb_role_switch *dev_role_sw;
enum usb_role role;
struct mutex lock;
struct work_struct work;
struct notifier_block nb;
};
static void hub_power_ctrl(struct hisi_hikey_usb *hisi_hikey_usb, int value)
{
int ret, status;
if (!hisi_hikey_usb->regulator)
return;
status = regulator_is_enabled(hisi_hikey_usb->regulator);
if (status == !!value)
return;
if (value)
ret = regulator_enable(hisi_hikey_usb->regulator);
else
ret = regulator_disable(hisi_hikey_usb->regulator);
if (ret)
dev_err(hisi_hikey_usb->dev,
"Can't switch regulator state to %s\n",
value ? "enabled" : "disabled");
}
static void usb_switch_ctrl(struct hisi_hikey_usb *hisi_hikey_usb,
int switch_to)
{
if (!hisi_hikey_usb->otg_switch)
return;
gpiod_set_value_cansleep(hisi_hikey_usb->otg_switch, switch_to);
}
static void usb_typec_power_ctrl(struct hisi_hikey_usb *hisi_hikey_usb,
int value)
{
if (!hisi_hikey_usb->typec_vbus)
return;
gpiod_set_value_cansleep(hisi_hikey_usb->typec_vbus, value);
}
static void relay_set_role_switch(struct work_struct *work)
{
struct hisi_hikey_usb *hisi_hikey_usb = container_of(work,
struct hisi_hikey_usb,
work);
struct usb_role_switch *sw;
enum usb_role role;
if (!hisi_hikey_usb || !hisi_hikey_usb->dev_role_sw)
return;
mutex_lock(&hisi_hikey_usb->lock);
switch (hisi_hikey_usb->role) {
case USB_ROLE_NONE:
usb_typec_power_ctrl(hisi_hikey_usb, TYPEC_VBUS_POWER_OFF);
usb_switch_ctrl(hisi_hikey_usb, USB_SWITCH_TO_HUB);
hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_ON);
break;
case USB_ROLE_HOST:
hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_OFF);
usb_switch_ctrl(hisi_hikey_usb, USB_SWITCH_TO_TYPEC);
usb_typec_power_ctrl(hisi_hikey_usb, TYPEC_VBUS_POWER_ON);
break;
case USB_ROLE_DEVICE:
hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_OFF);
usb_typec_power_ctrl(hisi_hikey_usb, TYPEC_VBUS_POWER_OFF);
usb_switch_ctrl(hisi_hikey_usb, USB_SWITCH_TO_TYPEC);
break;
default:
break;
}
sw = hisi_hikey_usb->dev_role_sw;
role = hisi_hikey_usb->role;
mutex_unlock(&hisi_hikey_usb->lock);
usb_role_switch_set_role(sw, role);
}
static int hub_usb_role_switch_set(struct usb_role_switch *sw, enum usb_role role)
{
struct hisi_hikey_usb *hisi_hikey_usb = usb_role_switch_get_drvdata(sw);
if (!hisi_hikey_usb || !hisi_hikey_usb->dev_role_sw)
return -EINVAL;
mutex_lock(&hisi_hikey_usb->lock);
hisi_hikey_usb->role = role;
mutex_unlock(&hisi_hikey_usb->lock);
schedule_work(&hisi_hikey_usb->work);
return 0;
}
static int hisi_hikey_usb_of_role_switch(struct platform_device *pdev,
struct hisi_hikey_usb *hisi_hikey_usb)
{
struct device *dev = &pdev->dev;
struct usb_role_switch_desc hub_role_switch = {NULL};
if (!device_property_read_bool(dev, "usb-role-switch"))
return 0;
hisi_hikey_usb->otg_switch = devm_gpiod_get(dev, "otg-switch",
GPIOD_OUT_HIGH);
if (IS_ERR(hisi_hikey_usb->otg_switch)) {
dev_err(dev, "get otg-switch failed with error %ld\n",
PTR_ERR(hisi_hikey_usb->otg_switch));
return PTR_ERR(hisi_hikey_usb->otg_switch);
}
hisi_hikey_usb->typec_vbus = devm_gpiod_get(dev, "typec-vbus",
GPIOD_OUT_LOW);
if (IS_ERR(hisi_hikey_usb->typec_vbus)) {
dev_err(dev, "get typec-vbus failed with error %ld\n",
PTR_ERR(hisi_hikey_usb->typec_vbus));
return PTR_ERR(hisi_hikey_usb->typec_vbus);
}
hisi_hikey_usb->reset = devm_gpiod_get_optional(dev,
"hub-reset-en",
GPIOD_OUT_HIGH);
if (IS_ERR(hisi_hikey_usb->reset)) {
dev_err(dev, "get hub-reset-en failed with error %ld\n",
PTR_ERR(hisi_hikey_usb->reset));
return PTR_ERR(hisi_hikey_usb->reset);
}
hisi_hikey_usb->dev_role_sw = usb_role_switch_get(dev);
if (!hisi_hikey_usb->dev_role_sw)
return -EPROBE_DEFER;
if (IS_ERR(hisi_hikey_usb->dev_role_sw)) {
dev_err(dev, "get device role switch failed with error %ld\n",
PTR_ERR(hisi_hikey_usb->dev_role_sw));
return PTR_ERR(hisi_hikey_usb->dev_role_sw);
}
INIT_WORK(&hisi_hikey_usb->work, relay_set_role_switch);
hub_role_switch.fwnode = dev_fwnode(dev);
hub_role_switch.set = hub_usb_role_switch_set;
hub_role_switch.driver_data = hisi_hikey_usb;
hisi_hikey_usb->hub_role_sw = usb_role_switch_register(dev,
&hub_role_switch);
if (IS_ERR(hisi_hikey_usb->hub_role_sw)) {
dev_err(dev,
"failed to register hub role with error %ld\n",
PTR_ERR(hisi_hikey_usb->hub_role_sw));
usb_role_switch_put(hisi_hikey_usb->dev_role_sw);
return PTR_ERR(hisi_hikey_usb->hub_role_sw);
}
return 0;
}
static int hisi_hikey_usb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct hisi_hikey_usb *hisi_hikey_usb;
int ret;
hisi_hikey_usb = devm_kzalloc(dev, sizeof(*hisi_hikey_usb), GFP_KERNEL);
if (!hisi_hikey_usb)
return -ENOMEM;
hisi_hikey_usb->dev = &pdev->dev;
mutex_init(&hisi_hikey_usb->lock);
hisi_hikey_usb->regulator = devm_regulator_get(dev, "hub-vdd");
if (IS_ERR(hisi_hikey_usb->regulator)) {
if (PTR_ERR(hisi_hikey_usb->regulator) == -EPROBE_DEFER) {
dev_info(dev, "waiting for hub-vdd-supply\n");
return PTR_ERR(hisi_hikey_usb->regulator);
}
dev_err(dev, "get hub-vdd-supply failed with error %ld\n",
PTR_ERR(hisi_hikey_usb->regulator));
return PTR_ERR(hisi_hikey_usb->regulator);
}
ret = hisi_hikey_usb_of_role_switch(pdev, hisi_hikey_usb);
if (ret)
return ret;
platform_set_drvdata(pdev, hisi_hikey_usb);
return 0;
}
static int hisi_hikey_usb_remove(struct platform_device *pdev)
{
struct hisi_hikey_usb *hisi_hikey_usb = platform_get_drvdata(pdev);
if (hisi_hikey_usb->hub_role_sw) {
usb_role_switch_unregister(hisi_hikey_usb->hub_role_sw);
if (hisi_hikey_usb->dev_role_sw)
usb_role_switch_put(hisi_hikey_usb->dev_role_sw);
} else {
hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_OFF);
}
return 0;
}
static const struct of_device_id id_table_hisi_hikey_usb[] = {
{ .compatible = "hisilicon,usbhub" },
{}
};
MODULE_DEVICE_TABLE(of, id_table_hisi_hikey_usb);
static struct platform_driver hisi_hikey_usb_driver = {
.probe = hisi_hikey_usb_probe,
.remove = hisi_hikey_usb_remove,
.driver = {
.name = DEVICE_DRIVER_NAME,
.of_match_table = id_table_hisi_hikey_usb,
},
};
module_platform_driver(hisi_hikey_usb_driver);
MODULE_AUTHOR("Yu Chen <[email protected]>");
MODULE_DESCRIPTION("Driver Support for USB functionality of Hikey");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/hisi_hikey_usb.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Atmel SSC driver
*
* Copyright (C) 2007 Atmel Corporation
*/
#include <linux/platform_device.h>
#include <linux/list.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/atmel-ssc.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/of.h>
#include "../../sound/soc/atmel/atmel_ssc_dai.h"
/* Serialize access to ssc_list and user count */
static DEFINE_MUTEX(user_lock);
static LIST_HEAD(ssc_list);
struct ssc_device *ssc_request(unsigned int ssc_num)
{
int ssc_valid = 0;
struct ssc_device *ssc;
mutex_lock(&user_lock);
list_for_each_entry(ssc, &ssc_list, list) {
if (ssc->pdev->dev.of_node) {
if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc")
== ssc_num) {
ssc->pdev->id = ssc_num;
ssc_valid = 1;
break;
}
} else if (ssc->pdev->id == ssc_num) {
ssc_valid = 1;
break;
}
}
if (!ssc_valid) {
mutex_unlock(&user_lock);
pr_err("ssc: ssc%d platform device is missing\n", ssc_num);
return ERR_PTR(-ENODEV);
}
if (ssc->user) {
mutex_unlock(&user_lock);
dev_dbg(&ssc->pdev->dev, "module busy\n");
return ERR_PTR(-EBUSY);
}
ssc->user++;
mutex_unlock(&user_lock);
clk_prepare(ssc->clk);
return ssc;
}
EXPORT_SYMBOL(ssc_request);
void ssc_free(struct ssc_device *ssc)
{
bool disable_clk = true;
mutex_lock(&user_lock);
if (ssc->user)
ssc->user--;
else {
disable_clk = false;
dev_dbg(&ssc->pdev->dev, "device already free\n");
}
mutex_unlock(&user_lock);
if (disable_clk)
clk_unprepare(ssc->clk);
}
EXPORT_SYMBOL(ssc_free);
static struct atmel_ssc_platform_data at91rm9200_config = {
.use_dma = 0,
.has_fslen_ext = 0,
};
static struct atmel_ssc_platform_data at91sam9rl_config = {
.use_dma = 0,
.has_fslen_ext = 1,
};
static struct atmel_ssc_platform_data at91sam9g45_config = {
.use_dma = 1,
.has_fslen_ext = 1,
};
static const struct platform_device_id atmel_ssc_devtypes[] = {
{
.name = "at91rm9200_ssc",
.driver_data = (unsigned long) &at91rm9200_config,
}, {
.name = "at91sam9rl_ssc",
.driver_data = (unsigned long) &at91sam9rl_config,
}, {
.name = "at91sam9g45_ssc",
.driver_data = (unsigned long) &at91sam9g45_config,
}, {
/* sentinel */
}
};
#ifdef CONFIG_OF
static const struct of_device_id atmel_ssc_dt_ids[] = {
{
.compatible = "atmel,at91rm9200-ssc",
.data = &at91rm9200_config,
}, {
.compatible = "atmel,at91sam9rl-ssc",
.data = &at91sam9rl_config,
}, {
.compatible = "atmel,at91sam9g45-ssc",
.data = &at91sam9g45_config,
}, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids);
#endif
static inline const struct atmel_ssc_platform_data *
atmel_ssc_get_driver_data(struct platform_device *pdev)
{
if (pdev->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(atmel_ssc_dt_ids, pdev->dev.of_node);
if (match == NULL)
return NULL;
return match->data;
}
return (struct atmel_ssc_platform_data *)
platform_get_device_id(pdev)->driver_data;
}
#ifdef CONFIG_SND_ATMEL_SOC_SSC
static int ssc_sound_dai_probe(struct ssc_device *ssc)
{
struct device_node *np = ssc->pdev->dev.of_node;
int ret;
int id;
ssc->sound_dai = false;
if (!of_property_read_bool(np, "#sound-dai-cells"))
return 0;
id = of_alias_get_id(np, "ssc");
if (id < 0)
return id;
ret = atmel_ssc_set_audio(id);
ssc->sound_dai = !ret;
return ret;
}
static void ssc_sound_dai_remove(struct ssc_device *ssc)
{
if (!ssc->sound_dai)
return;
atmel_ssc_put_audio(of_alias_get_id(ssc->pdev->dev.of_node, "ssc"));
}
#else
static inline int ssc_sound_dai_probe(struct ssc_device *ssc)
{
if (of_property_read_bool(ssc->pdev->dev.of_node, "#sound-dai-cells"))
return -ENOTSUPP;
return 0;
}
static inline void ssc_sound_dai_remove(struct ssc_device *ssc)
{
}
#endif
static int ssc_probe(struct platform_device *pdev)
{
struct resource *regs;
struct ssc_device *ssc;
const struct atmel_ssc_platform_data *plat_dat;
ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL);
if (!ssc) {
dev_dbg(&pdev->dev, "out of memory\n");
return -ENOMEM;
}
ssc->pdev = pdev;
plat_dat = atmel_ssc_get_driver_data(pdev);
if (!plat_dat)
return -ENODEV;
ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat;
if (pdev->dev.of_node) {
struct device_node *np = pdev->dev.of_node;
ssc->clk_from_rk_pin =
of_property_read_bool(np, "atmel,clk-from-rk-pin");
}
ssc->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ®s);
if (IS_ERR(ssc->regs))
return PTR_ERR(ssc->regs);
ssc->phybase = regs->start;
ssc->clk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(ssc->clk)) {
dev_dbg(&pdev->dev, "no pclk clock defined\n");
return -ENXIO;
}
/* disable all interrupts */
clk_prepare_enable(ssc->clk);
ssc_writel(ssc->regs, IDR, -1);
ssc_readl(ssc->regs, SR);
clk_disable_unprepare(ssc->clk);
ssc->irq = platform_get_irq(pdev, 0);
if (ssc->irq < 0) {
dev_dbg(&pdev->dev, "could not get irq\n");
return ssc->irq;
}
mutex_lock(&user_lock);
list_add_tail(&ssc->list, &ssc_list);
mutex_unlock(&user_lock);
platform_set_drvdata(pdev, ssc);
dev_info(&pdev->dev, "Atmel SSC device at 0x%p (irq %d)\n",
ssc->regs, ssc->irq);
if (ssc_sound_dai_probe(ssc))
dev_err(&pdev->dev, "failed to auto-setup ssc for audio\n");
return 0;
}
static int ssc_remove(struct platform_device *pdev)
{
struct ssc_device *ssc = platform_get_drvdata(pdev);
ssc_sound_dai_remove(ssc);
mutex_lock(&user_lock);
list_del(&ssc->list);
mutex_unlock(&user_lock);
return 0;
}
static struct platform_driver ssc_driver = {
.driver = {
.name = "ssc",
.of_match_table = of_match_ptr(atmel_ssc_dt_ids),
},
.id_table = atmel_ssc_devtypes,
.probe = ssc_probe,
.remove = ssc_remove,
};
module_platform_driver(ssc_driver);
MODULE_AUTHOR("Hans-Christian Noren Egtvedt <[email protected]>");
MODULE_DESCRIPTION("SSC driver for Atmel AT91");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ssc");
| linux-master | drivers/misc/atmel-ssc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* VMware Balloon driver.
*
* Copyright (C) 2000-2018, VMware, Inc. All Rights Reserved.
*
* This is VMware physical memory management driver for Linux. The driver
* acts like a "balloon" that can be inflated to reclaim physical pages by
* reserving them in the guest and invalidating them in the monitor,
* freeing up the underlying machine pages so they can be allocated to
* other guests. The balloon can also be deflated to allow the guest to
* use more physical memory. Higher level policies can control the sizes
* of balloons in VMs in order to manage physical memory resources.
*/
//#define DEBUG
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/balloon_compaction.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
#include <asm/hypervisor.h>
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");
static bool __read_mostly vmwballoon_shrinker_enable;
module_param(vmwballoon_shrinker_enable, bool, 0444);
MODULE_PARM_DESC(vmwballoon_shrinker_enable,
"Enable non-cooperative out-of-memory protection. Disabled by default as it may degrade performance.");
/* Delay in seconds after shrink before inflation. */
#define VMBALLOON_SHRINK_DELAY (5)
/* Maximum number of refused pages we accumulate during inflation cycle */
#define VMW_BALLOON_MAX_REFUSED 16
/* Magic number for the balloon mount-point */
#define BALLOON_VMW_MAGIC 0x0ba11007
/*
* Hypervisor communication port definitions.
*/
#define VMW_BALLOON_HV_PORT 0x5670
#define VMW_BALLOON_HV_MAGIC 0x456c6d6f
#define VMW_BALLOON_GUEST_ID 1 /* Linux */
enum vmwballoon_capabilities {
/*
* Bit 0 is reserved and not associated to any capability.
*/
VMW_BALLOON_BASIC_CMDS = (1 << 1),
VMW_BALLOON_BATCHED_CMDS = (1 << 2),
VMW_BALLOON_BATCHED_2M_CMDS = (1 << 3),
VMW_BALLOON_SIGNALLED_WAKEUP_CMD = (1 << 4),
VMW_BALLOON_64_BIT_TARGET = (1 << 5)
};
#define VMW_BALLOON_CAPABILITIES_COMMON (VMW_BALLOON_BASIC_CMDS \
| VMW_BALLOON_BATCHED_CMDS \
| VMW_BALLOON_BATCHED_2M_CMDS \
| VMW_BALLOON_SIGNALLED_WAKEUP_CMD)
#define VMW_BALLOON_2M_ORDER (PMD_SHIFT - PAGE_SHIFT)
/*
* 64-bit targets are only supported in 64-bit
*/
#ifdef CONFIG_64BIT
#define VMW_BALLOON_CAPABILITIES (VMW_BALLOON_CAPABILITIES_COMMON \
| VMW_BALLOON_64_BIT_TARGET)
#else
#define VMW_BALLOON_CAPABILITIES VMW_BALLOON_CAPABILITIES_COMMON
#endif
enum vmballoon_page_size_type {
VMW_BALLOON_4K_PAGE,
VMW_BALLOON_2M_PAGE,
VMW_BALLOON_LAST_SIZE = VMW_BALLOON_2M_PAGE
};
#define VMW_BALLOON_NUM_PAGE_SIZES (VMW_BALLOON_LAST_SIZE + 1)
static const char * const vmballoon_page_size_names[] = {
[VMW_BALLOON_4K_PAGE] = "4k",
[VMW_BALLOON_2M_PAGE] = "2M"
};
enum vmballoon_op {
VMW_BALLOON_INFLATE,
VMW_BALLOON_DEFLATE
};
enum vmballoon_op_stat_type {
VMW_BALLOON_OP_STAT,
VMW_BALLOON_OP_FAIL_STAT
};
#define VMW_BALLOON_OP_STAT_TYPES (VMW_BALLOON_OP_FAIL_STAT + 1)
/**
* enum vmballoon_cmd_type - backdoor commands.
*
* Availability of the commands is as followed:
*
* %VMW_BALLOON_CMD_START, %VMW_BALLOON_CMD_GET_TARGET and
* %VMW_BALLOON_CMD_GUEST_ID are always available.
*
* If the host reports %VMW_BALLOON_BASIC_CMDS are supported then
* %VMW_BALLOON_CMD_LOCK and %VMW_BALLOON_CMD_UNLOCK commands are available.
*
* If the host reports %VMW_BALLOON_BATCHED_CMDS are supported then
* %VMW_BALLOON_CMD_BATCHED_LOCK and VMW_BALLOON_CMD_BATCHED_UNLOCK commands
* are available.
*
* If the host reports %VMW_BALLOON_BATCHED_2M_CMDS are supported then
* %VMW_BALLOON_CMD_BATCHED_2M_LOCK and %VMW_BALLOON_CMD_BATCHED_2M_UNLOCK
* are supported.
*
* If the host reports VMW_BALLOON_SIGNALLED_WAKEUP_CMD is supported then
* VMW_BALLOON_CMD_VMCI_DOORBELL_SET command is supported.
*
* @VMW_BALLOON_CMD_START: Communicating supported version with the hypervisor.
* @VMW_BALLOON_CMD_GET_TARGET: Gets the balloon target size.
* @VMW_BALLOON_CMD_LOCK: Informs the hypervisor about a ballooned page.
* @VMW_BALLOON_CMD_UNLOCK: Informs the hypervisor about a page that is about
* to be deflated from the balloon.
* @VMW_BALLOON_CMD_GUEST_ID: Informs the hypervisor about the type of OS that
* runs in the VM.
* @VMW_BALLOON_CMD_BATCHED_LOCK: Inform the hypervisor about a batch of
* ballooned pages (up to 512).
* @VMW_BALLOON_CMD_BATCHED_UNLOCK: Inform the hypervisor about a batch of
* pages that are about to be deflated from the
* balloon (up to 512).
* @VMW_BALLOON_CMD_BATCHED_2M_LOCK: Similar to @VMW_BALLOON_CMD_BATCHED_LOCK
* for 2MB pages.
* @VMW_BALLOON_CMD_BATCHED_2M_UNLOCK: Similar to
* @VMW_BALLOON_CMD_BATCHED_UNLOCK for 2MB
* pages.
* @VMW_BALLOON_CMD_VMCI_DOORBELL_SET: A command to set doorbell notification
* that would be invoked when the balloon
* size changes.
* @VMW_BALLOON_CMD_LAST: Value of the last command.
*/
enum vmballoon_cmd_type {
VMW_BALLOON_CMD_START,
VMW_BALLOON_CMD_GET_TARGET,
VMW_BALLOON_CMD_LOCK,
VMW_BALLOON_CMD_UNLOCK,
VMW_BALLOON_CMD_GUEST_ID,
/* No command 5 */
VMW_BALLOON_CMD_BATCHED_LOCK = 6,
VMW_BALLOON_CMD_BATCHED_UNLOCK,
VMW_BALLOON_CMD_BATCHED_2M_LOCK,
VMW_BALLOON_CMD_BATCHED_2M_UNLOCK,
VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
VMW_BALLOON_CMD_LAST = VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
};
#define VMW_BALLOON_CMD_NUM (VMW_BALLOON_CMD_LAST + 1)
enum vmballoon_error_codes {
VMW_BALLOON_SUCCESS,
VMW_BALLOON_ERROR_CMD_INVALID,
VMW_BALLOON_ERROR_PPN_INVALID,
VMW_BALLOON_ERROR_PPN_LOCKED,
VMW_BALLOON_ERROR_PPN_UNLOCKED,
VMW_BALLOON_ERROR_PPN_PINNED,
VMW_BALLOON_ERROR_PPN_NOTNEEDED,
VMW_BALLOON_ERROR_RESET,
VMW_BALLOON_ERROR_BUSY
};
#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES (0x03000000)
#define VMW_BALLOON_CMD_WITH_TARGET_MASK \
((1UL << VMW_BALLOON_CMD_GET_TARGET) | \
(1UL << VMW_BALLOON_CMD_LOCK) | \
(1UL << VMW_BALLOON_CMD_UNLOCK) | \
(1UL << VMW_BALLOON_CMD_BATCHED_LOCK) | \
(1UL << VMW_BALLOON_CMD_BATCHED_UNLOCK) | \
(1UL << VMW_BALLOON_CMD_BATCHED_2M_LOCK) | \
(1UL << VMW_BALLOON_CMD_BATCHED_2M_UNLOCK))
static const char * const vmballoon_cmd_names[] = {
[VMW_BALLOON_CMD_START] = "start",
[VMW_BALLOON_CMD_GET_TARGET] = "target",
[VMW_BALLOON_CMD_LOCK] = "lock",
[VMW_BALLOON_CMD_UNLOCK] = "unlock",
[VMW_BALLOON_CMD_GUEST_ID] = "guestType",
[VMW_BALLOON_CMD_BATCHED_LOCK] = "batchLock",
[VMW_BALLOON_CMD_BATCHED_UNLOCK] = "batchUnlock",
[VMW_BALLOON_CMD_BATCHED_2M_LOCK] = "2m-lock",
[VMW_BALLOON_CMD_BATCHED_2M_UNLOCK] = "2m-unlock",
[VMW_BALLOON_CMD_VMCI_DOORBELL_SET] = "doorbellSet"
};
enum vmballoon_stat_page {
VMW_BALLOON_PAGE_STAT_ALLOC,
VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
VMW_BALLOON_PAGE_STAT_FREE,
VMW_BALLOON_PAGE_STAT_LAST = VMW_BALLOON_PAGE_STAT_FREE
};
#define VMW_BALLOON_PAGE_STAT_NUM (VMW_BALLOON_PAGE_STAT_LAST + 1)
enum vmballoon_stat_general {
VMW_BALLOON_STAT_TIMER,
VMW_BALLOON_STAT_DOORBELL,
VMW_BALLOON_STAT_RESET,
VMW_BALLOON_STAT_SHRINK,
VMW_BALLOON_STAT_SHRINK_FREE,
VMW_BALLOON_STAT_LAST = VMW_BALLOON_STAT_SHRINK_FREE
};
#define VMW_BALLOON_STAT_NUM (VMW_BALLOON_STAT_LAST + 1)
static DEFINE_STATIC_KEY_TRUE(vmw_balloon_batching);
static DEFINE_STATIC_KEY_FALSE(balloon_stat_enabled);
struct vmballoon_ctl {
struct list_head pages;
struct list_head refused_pages;
struct list_head prealloc_pages;
unsigned int n_refused_pages;
unsigned int n_pages;
enum vmballoon_page_size_type page_size;
enum vmballoon_op op;
};
/**
* struct vmballoon_batch_entry - a batch entry for lock or unlock.
*
* @status: the status of the operation, which is written by the hypervisor.
* @reserved: reserved for future use. Must be set to zero.
* @pfn: the physical frame number of the page to be locked or unlocked.
*/
struct vmballoon_batch_entry {
u64 status : 5;
u64 reserved : PAGE_SHIFT - 5;
u64 pfn : 52;
} __packed;
struct vmballoon {
/**
* @max_page_size: maximum supported page size for ballooning.
*
* Protected by @conf_sem
*/
enum vmballoon_page_size_type max_page_size;
/**
* @size: balloon actual size in basic page size (frames).
*
* While we currently do not support size which is bigger than 32-bit,
* in preparation for future support, use 64-bits.
*/
atomic64_t size;
/**
* @target: balloon target size in basic page size (frames).
*
* We do not protect the target under the assumption that setting the
* value is always done through a single write. If this assumption ever
* breaks, we would have to use X_ONCE for accesses, and suffer the less
* optimized code. Although we may read stale target value if multiple
* accesses happen at once, the performance impact should be minor.
*/
unsigned long target;
/**
* @reset_required: reset flag
*
* Setting this flag may introduce races, but the code is expected to
* handle them gracefully. In the worst case, another operation will
* fail as reset did not take place. Clearing the flag is done while
* holding @conf_sem for write.
*/
bool reset_required;
/**
* @capabilities: hypervisor balloon capabilities.
*
* Protected by @conf_sem.
*/
unsigned long capabilities;
/**
* @batch_page: pointer to communication batch page.
*
* When batching is used, batch_page points to a page, which holds up to
* %VMW_BALLOON_BATCH_MAX_PAGES entries for locking or unlocking.
*/
struct vmballoon_batch_entry *batch_page;
/**
* @batch_max_pages: maximum pages that can be locked/unlocked.
*
* Indicates the number of pages that the hypervisor can lock or unlock
* at once, according to whether batching is enabled. If batching is
* disabled, only a single page can be locked/unlock on each operation.
*
* Protected by @conf_sem.
*/
unsigned int batch_max_pages;
/**
* @page: page to be locked/unlocked by the hypervisor
*
* @page is only used when batching is disabled and a single page is
* reclaimed on each iteration.
*
* Protected by @comm_lock.
*/
struct page *page;
/**
* @shrink_timeout: timeout until the next inflation.
*
* After an shrink event, indicates the time in jiffies after which
* inflation is allowed again. Can be written concurrently with reads,
* so must use READ_ONCE/WRITE_ONCE when accessing.
*/
unsigned long shrink_timeout;
/* statistics */
struct vmballoon_stats *stats;
/**
* @b_dev_info: balloon device information descriptor.
*/
struct balloon_dev_info b_dev_info;
struct delayed_work dwork;
/**
* @huge_pages - list of the inflated 2MB pages.
*
* Protected by @b_dev_info.pages_lock .
*/
struct list_head huge_pages;
/**
* @vmci_doorbell.
*
* Protected by @conf_sem.
*/
struct vmci_handle vmci_doorbell;
/**
* @conf_sem: semaphore to protect the configuration and the statistics.
*/
struct rw_semaphore conf_sem;
/**
* @comm_lock: lock to protect the communication with the host.
*
* Lock ordering: @conf_sem -> @comm_lock .
*/
spinlock_t comm_lock;
/**
* @shrinker: shrinker interface that is used to avoid over-inflation.
*/
struct shrinker shrinker;
/**
* @shrinker_registered: whether the shrinker was registered.
*
* The shrinker interface does not handle gracefully the removal of
* shrinker that was not registered before. This indication allows to
* simplify the unregistration process.
*/
bool shrinker_registered;
};
static struct vmballoon balloon;
struct vmballoon_stats {
/* timer / doorbell operations */
atomic64_t general_stat[VMW_BALLOON_STAT_NUM];
/* allocation statistics for huge and small pages */
atomic64_t
page_stat[VMW_BALLOON_PAGE_STAT_NUM][VMW_BALLOON_NUM_PAGE_SIZES];
/* Monitor operations: total operations, and failures */
atomic64_t ops[VMW_BALLOON_CMD_NUM][VMW_BALLOON_OP_STAT_TYPES];
};
static inline bool is_vmballoon_stats_on(void)
{
return IS_ENABLED(CONFIG_DEBUG_FS) &&
static_branch_unlikely(&balloon_stat_enabled);
}
static inline void vmballoon_stats_op_inc(struct vmballoon *b, unsigned int op,
enum vmballoon_op_stat_type type)
{
if (is_vmballoon_stats_on())
atomic64_inc(&b->stats->ops[op][type]);
}
static inline void vmballoon_stats_gen_inc(struct vmballoon *b,
enum vmballoon_stat_general stat)
{
if (is_vmballoon_stats_on())
atomic64_inc(&b->stats->general_stat[stat]);
}
static inline void vmballoon_stats_gen_add(struct vmballoon *b,
enum vmballoon_stat_general stat,
unsigned int val)
{
if (is_vmballoon_stats_on())
atomic64_add(val, &b->stats->general_stat[stat]);
}
static inline void vmballoon_stats_page_inc(struct vmballoon *b,
enum vmballoon_stat_page stat,
enum vmballoon_page_size_type size)
{
if (is_vmballoon_stats_on())
atomic64_inc(&b->stats->page_stat[stat][size]);
}
static inline void vmballoon_stats_page_add(struct vmballoon *b,
enum vmballoon_stat_page stat,
enum vmballoon_page_size_type size,
unsigned int val)
{
if (is_vmballoon_stats_on())
atomic64_add(val, &b->stats->page_stat[stat][size]);
}
static inline unsigned long
__vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
unsigned long arg2, unsigned long *result)
{
unsigned long status, dummy1, dummy2, dummy3, local_result;
vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_STAT);
asm volatile ("inl %%dx" :
"=a"(status),
"=c"(dummy1),
"=d"(dummy2),
"=b"(local_result),
"=S"(dummy3) :
"0"(VMW_BALLOON_HV_MAGIC),
"1"(cmd),
"2"(VMW_BALLOON_HV_PORT),
"3"(arg1),
"4"(arg2) :
"memory");
/* update the result if needed */
if (result)
*result = (cmd == VMW_BALLOON_CMD_START) ? dummy1 :
local_result;
/* update target when applicable */
if (status == VMW_BALLOON_SUCCESS &&
((1ul << cmd) & VMW_BALLOON_CMD_WITH_TARGET_MASK))
WRITE_ONCE(b->target, local_result);
if (status != VMW_BALLOON_SUCCESS &&
status != VMW_BALLOON_SUCCESS_WITH_CAPABILITIES) {
vmballoon_stats_op_inc(b, cmd, VMW_BALLOON_OP_FAIL_STAT);
pr_debug("%s: %s [0x%lx,0x%lx) failed, returned %ld\n",
__func__, vmballoon_cmd_names[cmd], arg1, arg2,
status);
}
/* mark reset required accordingly */
if (status == VMW_BALLOON_ERROR_RESET)
b->reset_required = true;
return status;
}
static __always_inline unsigned long
vmballoon_cmd(struct vmballoon *b, unsigned long cmd, unsigned long arg1,
unsigned long arg2)
{
unsigned long dummy;
return __vmballoon_cmd(b, cmd, arg1, arg2, &dummy);
}
/*
* Send "start" command to the host, communicating supported version
* of the protocol.
*/
static int vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
{
unsigned long status, capabilities;
status = __vmballoon_cmd(b, VMW_BALLOON_CMD_START, req_caps, 0,
&capabilities);
switch (status) {
case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
b->capabilities = capabilities;
break;
case VMW_BALLOON_SUCCESS:
b->capabilities = VMW_BALLOON_BASIC_CMDS;
break;
default:
return -EIO;
}
/*
* 2MB pages are only supported with batching. If batching is for some
* reason disabled, do not use 2MB pages, since otherwise the legacy
* mechanism is used with 2MB pages, causing a failure.
*/
b->max_page_size = VMW_BALLOON_4K_PAGE;
if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) &&
(b->capabilities & VMW_BALLOON_BATCHED_CMDS))
b->max_page_size = VMW_BALLOON_2M_PAGE;
return 0;
}
/**
* vmballoon_send_guest_id - communicate guest type to the host.
*
* @b: pointer to the balloon.
*
* Communicate guest type to the host so that it can adjust ballooning
* algorithm to the one most appropriate for the guest. This command
* is normally issued after sending "start" command and is part of
* standard reset sequence.
*
* Return: zero on success or appropriate error code.
*/
static int vmballoon_send_guest_id(struct vmballoon *b)
{
unsigned long status;
status = vmballoon_cmd(b, VMW_BALLOON_CMD_GUEST_ID,
VMW_BALLOON_GUEST_ID, 0);
return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
}
/**
* vmballoon_page_order() - return the order of the page
* @page_size: the size of the page.
*
* Return: the allocation order.
*/
static inline
unsigned int vmballoon_page_order(enum vmballoon_page_size_type page_size)
{
return page_size == VMW_BALLOON_2M_PAGE ? VMW_BALLOON_2M_ORDER : 0;
}
/**
* vmballoon_page_in_frames() - returns the number of frames in a page.
* @page_size: the size of the page.
*
* Return: the number of 4k frames.
*/
static inline unsigned int
vmballoon_page_in_frames(enum vmballoon_page_size_type page_size)
{
return 1 << vmballoon_page_order(page_size);
}
/**
* vmballoon_mark_page_offline() - mark a page as offline
* @page: pointer for the page.
* @page_size: the size of the page.
*/
static void
vmballoon_mark_page_offline(struct page *page,
enum vmballoon_page_size_type page_size)
{
int i;
for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
__SetPageOffline(page + i);
}
/**
* vmballoon_mark_page_online() - mark a page as online
* @page: pointer for the page.
* @page_size: the size of the page.
*/
static void
vmballoon_mark_page_online(struct page *page,
enum vmballoon_page_size_type page_size)
{
int i;
for (i = 0; i < vmballoon_page_in_frames(page_size); i++)
__ClearPageOffline(page + i);
}
/**
* vmballoon_send_get_target() - Retrieve desired balloon size from the host.
*
* @b: pointer to the balloon.
*
* Return: zero on success, EINVAL if limit does not fit in 32-bit, as required
* by the host-guest protocol and EIO if an error occurred in communicating with
* the host.
*/
static int vmballoon_send_get_target(struct vmballoon *b)
{
unsigned long status;
unsigned long limit;
limit = totalram_pages();
/* Ensure limit fits in 32-bits if 64-bit targets are not supported */
if (!(b->capabilities & VMW_BALLOON_64_BIT_TARGET) &&
limit != (u32)limit)
return -EINVAL;
status = vmballoon_cmd(b, VMW_BALLOON_CMD_GET_TARGET, limit, 0);
return status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
}
/**
* vmballoon_alloc_page_list - allocates a list of pages.
*
* @b: pointer to the balloon.
* @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
* @req_n_pages: the number of requested pages.
*
* Tries to allocate @req_n_pages. Add them to the list of balloon pages in
* @ctl.pages and updates @ctl.n_pages to reflect the number of pages.
*
* Return: zero on success or error code otherwise.
*/
static int vmballoon_alloc_page_list(struct vmballoon *b,
struct vmballoon_ctl *ctl,
unsigned int req_n_pages)
{
struct page *page;
unsigned int i;
for (i = 0; i < req_n_pages; i++) {
/*
* First check if we happen to have pages that were allocated
* before. This happens when 2MB page rejected during inflation
* by the hypervisor, and then split into 4KB pages.
*/
if (!list_empty(&ctl->prealloc_pages)) {
page = list_first_entry(&ctl->prealloc_pages,
struct page, lru);
list_del(&page->lru);
} else {
if (ctl->page_size == VMW_BALLOON_2M_PAGE)
page = alloc_pages(__GFP_HIGHMEM|__GFP_NOWARN|
__GFP_NOMEMALLOC, VMW_BALLOON_2M_ORDER);
else
page = balloon_page_alloc();
vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC,
ctl->page_size);
}
if (page) {
/* Success. Add the page to the list and continue. */
list_add(&page->lru, &ctl->pages);
continue;
}
/* Allocation failed. Update statistics and stop. */
vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_ALLOC_FAIL,
ctl->page_size);
break;
}
ctl->n_pages = i;
return req_n_pages == ctl->n_pages ? 0 : -ENOMEM;
}
/**
* vmballoon_handle_one_result - Handle lock/unlock result for a single page.
*
* @b: pointer for %struct vmballoon.
* @page: pointer for the page whose result should be handled.
* @page_size: size of the page.
* @status: status of the operation as provided by the hypervisor.
*/
static int vmballoon_handle_one_result(struct vmballoon *b, struct page *page,
enum vmballoon_page_size_type page_size,
unsigned long status)
{
/* On success do nothing. The page is already on the balloon list. */
if (likely(status == VMW_BALLOON_SUCCESS))
return 0;
pr_debug("%s: failed comm pfn %lx status %lu page_size %s\n", __func__,
page_to_pfn(page), status,
vmballoon_page_size_names[page_size]);
/* Error occurred */
vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC,
page_size);
return -EIO;
}
/**
* vmballoon_status_page - returns the status of (un)lock operation
*
* @b: pointer to the balloon.
* @idx: index for the page for which the operation is performed.
* @p: pointer to where the page struct is returned.
*
* Following a lock or unlock operation, returns the status of the operation for
* an individual page. Provides the page that the operation was performed on on
* the @page argument.
*
* Returns: The status of a lock or unlock operation for an individual page.
*/
static unsigned long vmballoon_status_page(struct vmballoon *b, int idx,
struct page **p)
{
if (static_branch_likely(&vmw_balloon_batching)) {
/* batching mode */
*p = pfn_to_page(b->batch_page[idx].pfn);
return b->batch_page[idx].status;
}
/* non-batching mode */
*p = b->page;
/*
* If a failure occurs, the indication will be provided in the status
* of the entire operation, which is considered before the individual
* page status. So for non-batching mode, the indication is always of
* success.
*/
return VMW_BALLOON_SUCCESS;
}
/**
* vmballoon_lock_op - notifies the host about inflated/deflated pages.
* @b: pointer to the balloon.
* @num_pages: number of inflated/deflated pages.
* @page_size: size of the page.
* @op: the type of operation (lock or unlock).
*
* Notify the host about page(s) that were ballooned (or removed from the
* balloon) so that host can use it without fear that guest will need it (or
* stop using them since the VM does). Host may reject some pages, we need to
* check the return value and maybe submit a different page. The pages that are
* inflated/deflated are pointed by @b->page.
*
* Return: result as provided by the hypervisor.
*/
static unsigned long vmballoon_lock_op(struct vmballoon *b,
unsigned int num_pages,
enum vmballoon_page_size_type page_size,
enum vmballoon_op op)
{
unsigned long cmd, pfn;
lockdep_assert_held(&b->comm_lock);
if (static_branch_likely(&vmw_balloon_batching)) {
if (op == VMW_BALLOON_INFLATE)
cmd = page_size == VMW_BALLOON_2M_PAGE ?
VMW_BALLOON_CMD_BATCHED_2M_LOCK :
VMW_BALLOON_CMD_BATCHED_LOCK;
else
cmd = page_size == VMW_BALLOON_2M_PAGE ?
VMW_BALLOON_CMD_BATCHED_2M_UNLOCK :
VMW_BALLOON_CMD_BATCHED_UNLOCK;
pfn = PHYS_PFN(virt_to_phys(b->batch_page));
} else {
cmd = op == VMW_BALLOON_INFLATE ? VMW_BALLOON_CMD_LOCK :
VMW_BALLOON_CMD_UNLOCK;
pfn = page_to_pfn(b->page);
/* In non-batching mode, PFNs must fit in 32-bit */
if (unlikely(pfn != (u32)pfn))
return VMW_BALLOON_ERROR_PPN_INVALID;
}
return vmballoon_cmd(b, cmd, pfn, num_pages);
}
/**
* vmballoon_add_page - adds a page towards lock/unlock operation.
*
* @b: pointer to the balloon.
* @idx: index of the page to be ballooned in this batch.
* @p: pointer to the page that is about to be ballooned.
*
* Adds the page to be ballooned. Must be called while holding @comm_lock.
*/
static void vmballoon_add_page(struct vmballoon *b, unsigned int idx,
struct page *p)
{
lockdep_assert_held(&b->comm_lock);
if (static_branch_likely(&vmw_balloon_batching))
b->batch_page[idx] = (struct vmballoon_batch_entry)
{ .pfn = page_to_pfn(p) };
else
b->page = p;
}
/**
* vmballoon_lock - lock or unlock a batch of pages.
*
* @b: pointer to the balloon.
* @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
*
* Notifies the host of about ballooned pages (after inflation or deflation,
* according to @ctl). If the host rejects the page put it on the
* @ctl refuse list. These refused page are then released when moving to the
* next size of pages.
*
* Note that we neither free any @page here nor put them back on the ballooned
* pages list. Instead we queue it for later processing. We do that for several
* reasons. First, we do not want to free the page under the lock. Second, it
* allows us to unify the handling of lock and unlock. In the inflate case, the
* caller will check if there are too many refused pages and release them.
* Although it is not identical to the past behavior, it should not affect
* performance.
*/
static int vmballoon_lock(struct vmballoon *b, struct vmballoon_ctl *ctl)
{
unsigned long batch_status;
struct page *page;
unsigned int i, num_pages;
num_pages = ctl->n_pages;
if (num_pages == 0)
return 0;
/* communication with the host is done under the communication lock */
spin_lock(&b->comm_lock);
i = 0;
list_for_each_entry(page, &ctl->pages, lru)
vmballoon_add_page(b, i++, page);
batch_status = vmballoon_lock_op(b, ctl->n_pages, ctl->page_size,
ctl->op);
/*
* Iterate over the pages in the provided list. Since we are changing
* @ctl->n_pages we are saving the original value in @num_pages and
* use this value to bound the loop.
*/
for (i = 0; i < num_pages; i++) {
unsigned long status;
status = vmballoon_status_page(b, i, &page);
/*
* Failure of the whole batch overrides a single operation
* results.
*/
if (batch_status != VMW_BALLOON_SUCCESS)
status = batch_status;
/* Continue if no error happened */
if (!vmballoon_handle_one_result(b, page, ctl->page_size,
status))
continue;
/*
* Error happened. Move the pages to the refused list and update
* the pages number.
*/
list_move(&page->lru, &ctl->refused_pages);
ctl->n_pages--;
ctl->n_refused_pages++;
}
spin_unlock(&b->comm_lock);
return batch_status == VMW_BALLOON_SUCCESS ? 0 : -EIO;
}
/**
* vmballoon_release_page_list() - Releases a page list
*
* @page_list: list of pages to release.
* @n_pages: pointer to the number of pages.
* @page_size: whether the pages in the list are 2MB (or else 4KB).
*
* Releases the list of pages and zeros the number of pages.
*/
static void vmballoon_release_page_list(struct list_head *page_list,
int *n_pages,
enum vmballoon_page_size_type page_size)
{
struct page *page, *tmp;
list_for_each_entry_safe(page, tmp, page_list, lru) {
list_del(&page->lru);
__free_pages(page, vmballoon_page_order(page_size));
}
if (n_pages)
*n_pages = 0;
}
/*
* Release pages that were allocated while attempting to inflate the
* balloon but were refused by the host for one reason or another.
*/
static void vmballoon_release_refused_pages(struct vmballoon *b,
struct vmballoon_ctl *ctl)
{
vmballoon_stats_page_inc(b, VMW_BALLOON_PAGE_STAT_REFUSED_FREE,
ctl->page_size);
vmballoon_release_page_list(&ctl->refused_pages, &ctl->n_refused_pages,
ctl->page_size);
}
/**
* vmballoon_change - retrieve the required balloon change
*
* @b: pointer for the balloon.
*
* Return: the required change for the balloon size. A positive number
* indicates inflation, a negative number indicates a deflation.
*/
static int64_t vmballoon_change(struct vmballoon *b)
{
int64_t size, target;
size = atomic64_read(&b->size);
target = READ_ONCE(b->target);
/*
* We must cast first because of int sizes
* Otherwise we might get huge positives instead of negatives
*/
if (b->reset_required)
return 0;
/* consider a 2MB slack on deflate, unless the balloon is emptied */
if (target < size && target != 0 &&
size - target < vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE))
return 0;
/* If an out-of-memory recently occurred, inflation is disallowed. */
if (target > size && time_before(jiffies, READ_ONCE(b->shrink_timeout)))
return 0;
return target - size;
}
/**
* vmballoon_enqueue_page_list() - Enqueues list of pages after inflation.
*
* @b: pointer to balloon.
* @pages: list of pages to enqueue.
* @n_pages: pointer to number of pages in list. The value is zeroed.
* @page_size: whether the pages are 2MB or 4KB pages.
*
* Enqueues the provides list of pages in the ballooned page list, clears the
* list and zeroes the number of pages that was provided.
*/
static void vmballoon_enqueue_page_list(struct vmballoon *b,
struct list_head *pages,
unsigned int *n_pages,
enum vmballoon_page_size_type page_size)
{
unsigned long flags;
struct page *page;
if (page_size == VMW_BALLOON_4K_PAGE) {
balloon_page_list_enqueue(&b->b_dev_info, pages);
} else {
/*
* Keep the huge pages in a local list which is not available
* for the balloon compaction mechanism.
*/
spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
list_for_each_entry(page, pages, lru) {
vmballoon_mark_page_offline(page, VMW_BALLOON_2M_PAGE);
}
list_splice_init(pages, &b->huge_pages);
__count_vm_events(BALLOON_INFLATE, *n_pages *
vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
}
*n_pages = 0;
}
/**
* vmballoon_dequeue_page_list() - Dequeues page lists for deflation.
*
* @b: pointer to balloon.
* @pages: list of pages to enqueue.
* @n_pages: pointer to number of pages in list. The value is zeroed.
* @page_size: whether the pages are 2MB or 4KB pages.
* @n_req_pages: the number of requested pages.
*
* Dequeues the number of requested pages from the balloon for deflation. The
* number of dequeued pages may be lower, if not enough pages in the requested
* size are available.
*/
static void vmballoon_dequeue_page_list(struct vmballoon *b,
struct list_head *pages,
unsigned int *n_pages,
enum vmballoon_page_size_type page_size,
unsigned int n_req_pages)
{
struct page *page, *tmp;
unsigned int i = 0;
unsigned long flags;
/* In the case of 4k pages, use the compaction infrastructure */
if (page_size == VMW_BALLOON_4K_PAGE) {
*n_pages = balloon_page_list_dequeue(&b->b_dev_info, pages,
n_req_pages);
return;
}
/* 2MB pages */
spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
list_for_each_entry_safe(page, tmp, &b->huge_pages, lru) {
vmballoon_mark_page_online(page, VMW_BALLOON_2M_PAGE);
list_move(&page->lru, pages);
if (++i == n_req_pages)
break;
}
__count_vm_events(BALLOON_DEFLATE,
i * vmballoon_page_in_frames(VMW_BALLOON_2M_PAGE));
spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
*n_pages = i;
}
/**
* vmballoon_split_refused_pages() - Split the 2MB refused pages to 4k.
*
* If inflation of 2MB pages was denied by the hypervisor, it is likely to be
* due to one or few 4KB pages. These 2MB pages may keep being allocated and
* then being refused. To prevent this case, this function splits the refused
* pages into 4KB pages and adds them into @prealloc_pages list.
*
* @ctl: pointer for the %struct vmballoon_ctl, which defines the operation.
*/
static void vmballoon_split_refused_pages(struct vmballoon_ctl *ctl)
{
struct page *page, *tmp;
unsigned int i, order;
order = vmballoon_page_order(ctl->page_size);
list_for_each_entry_safe(page, tmp, &ctl->refused_pages, lru) {
list_del(&page->lru);
split_page(page, order);
for (i = 0; i < (1 << order); i++)
list_add(&page[i].lru, &ctl->prealloc_pages);
}
ctl->n_refused_pages = 0;
}
/**
* vmballoon_inflate() - Inflate the balloon towards its target size.
*
* @b: pointer to the balloon.
*/
static void vmballoon_inflate(struct vmballoon *b)
{
int64_t to_inflate_frames;
struct vmballoon_ctl ctl = {
.pages = LIST_HEAD_INIT(ctl.pages),
.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
.prealloc_pages = LIST_HEAD_INIT(ctl.prealloc_pages),
.page_size = b->max_page_size,
.op = VMW_BALLOON_INFLATE
};
while ((to_inflate_frames = vmballoon_change(b)) > 0) {
unsigned int to_inflate_pages, page_in_frames;
int alloc_error, lock_error = 0;
VM_BUG_ON(!list_empty(&ctl.pages));
VM_BUG_ON(ctl.n_pages != 0);
page_in_frames = vmballoon_page_in_frames(ctl.page_size);
to_inflate_pages = min_t(unsigned long, b->batch_max_pages,
DIV_ROUND_UP_ULL(to_inflate_frames,
page_in_frames));
/* Start by allocating */
alloc_error = vmballoon_alloc_page_list(b, &ctl,
to_inflate_pages);
/* Actually lock the pages by telling the hypervisor */
lock_error = vmballoon_lock(b, &ctl);
/*
* If an error indicates that something serious went wrong,
* stop the inflation.
*/
if (lock_error)
break;
/* Update the balloon size */
atomic64_add(ctl.n_pages * page_in_frames, &b->size);
vmballoon_enqueue_page_list(b, &ctl.pages, &ctl.n_pages,
ctl.page_size);
/*
* If allocation failed or the number of refused pages exceeds
* the maximum allowed, move to the next page size.
*/
if (alloc_error ||
ctl.n_refused_pages >= VMW_BALLOON_MAX_REFUSED) {
if (ctl.page_size == VMW_BALLOON_4K_PAGE)
break;
/*
* Split the refused pages to 4k. This will also empty
* the refused pages list.
*/
vmballoon_split_refused_pages(&ctl);
ctl.page_size--;
}
cond_resched();
}
/*
* Release pages that were allocated while attempting to inflate the
* balloon but were refused by the host for one reason or another,
* and update the statistics.
*/
if (ctl.n_refused_pages != 0)
vmballoon_release_refused_pages(b, &ctl);
vmballoon_release_page_list(&ctl.prealloc_pages, NULL, ctl.page_size);
}
/**
* vmballoon_deflate() - Decrease the size of the balloon.
*
* @b: pointer to the balloon
* @n_frames: the number of frames to deflate. If zero, automatically
* calculated according to the target size.
* @coordinated: whether to coordinate with the host
*
* Decrease the size of the balloon allowing guest to use more memory.
*
* Return: The number of deflated frames (i.e., basic page size units)
*/
static unsigned long vmballoon_deflate(struct vmballoon *b, uint64_t n_frames,
bool coordinated)
{
unsigned long deflated_frames = 0;
unsigned long tried_frames = 0;
struct vmballoon_ctl ctl = {
.pages = LIST_HEAD_INIT(ctl.pages),
.refused_pages = LIST_HEAD_INIT(ctl.refused_pages),
.page_size = VMW_BALLOON_4K_PAGE,
.op = VMW_BALLOON_DEFLATE
};
/* free pages to reach target */
while (true) {
unsigned int to_deflate_pages, n_unlocked_frames;
unsigned int page_in_frames;
int64_t to_deflate_frames;
bool deflated_all;
page_in_frames = vmballoon_page_in_frames(ctl.page_size);
VM_BUG_ON(!list_empty(&ctl.pages));
VM_BUG_ON(ctl.n_pages);
VM_BUG_ON(!list_empty(&ctl.refused_pages));
VM_BUG_ON(ctl.n_refused_pages);
/*
* If we were requested a specific number of frames, we try to
* deflate this number of frames. Otherwise, deflation is
* performed according to the target and balloon size.
*/
to_deflate_frames = n_frames ? n_frames - tried_frames :
-vmballoon_change(b);
/* break if no work to do */
if (to_deflate_frames <= 0)
break;
/*
* Calculate the number of frames based on current page size,
* but limit the deflated frames to a single chunk
*/
to_deflate_pages = min_t(unsigned long, b->batch_max_pages,
DIV_ROUND_UP_ULL(to_deflate_frames,
page_in_frames));
/* First take the pages from the balloon pages. */
vmballoon_dequeue_page_list(b, &ctl.pages, &ctl.n_pages,
ctl.page_size, to_deflate_pages);
/*
* Before pages are moving to the refused list, count their
* frames as frames that we tried to deflate.
*/
tried_frames += ctl.n_pages * page_in_frames;
/*
* Unlock the pages by communicating with the hypervisor if the
* communication is coordinated (i.e., not pop). We ignore the
* return code. Instead we check if all the pages we manage to
* unlock all the pages. If we failed, we will move to the next
* page size, and would eventually try again later.
*/
if (coordinated)
vmballoon_lock(b, &ctl);
/*
* Check if we deflated enough. We will move to the next page
* size if we did not manage to do so. This calculation takes
* place now, as once the pages are released, the number of
* pages is zeroed.
*/
deflated_all = (ctl.n_pages == to_deflate_pages);
/* Update local and global counters */
n_unlocked_frames = ctl.n_pages * page_in_frames;
atomic64_sub(n_unlocked_frames, &b->size);
deflated_frames += n_unlocked_frames;
vmballoon_stats_page_add(b, VMW_BALLOON_PAGE_STAT_FREE,
ctl.page_size, ctl.n_pages);
/* free the ballooned pages */
vmballoon_release_page_list(&ctl.pages, &ctl.n_pages,
ctl.page_size);
/* Return the refused pages to the ballooned list. */
vmballoon_enqueue_page_list(b, &ctl.refused_pages,
&ctl.n_refused_pages,
ctl.page_size);
/* If we failed to unlock all the pages, move to next size. */
if (!deflated_all) {
if (ctl.page_size == b->max_page_size)
break;
ctl.page_size++;
}
cond_resched();
}
return deflated_frames;
}
/**
* vmballoon_deinit_batching - disables batching mode.
*
* @b: pointer to &struct vmballoon.
*
* Disables batching, by deallocating the page for communication with the
* hypervisor and disabling the static key to indicate that batching is off.
*/
static void vmballoon_deinit_batching(struct vmballoon *b)
{
free_page((unsigned long)b->batch_page);
b->batch_page = NULL;
static_branch_disable(&vmw_balloon_batching);
b->batch_max_pages = 1;
}
/**
* vmballoon_init_batching - enable batching mode.
*
* @b: pointer to &struct vmballoon.
*
* Enables batching, by allocating a page for communication with the hypervisor
* and enabling the static_key to use batching.
*
* Return: zero on success or an appropriate error-code.
*/
static int vmballoon_init_batching(struct vmballoon *b)
{
struct page *page;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
return -ENOMEM;
b->batch_page = page_address(page);
b->batch_max_pages = PAGE_SIZE / sizeof(struct vmballoon_batch_entry);
static_branch_enable(&vmw_balloon_batching);
return 0;
}
/*
* Receive notification and resize balloon
*/
static void vmballoon_doorbell(void *client_data)
{
struct vmballoon *b = client_data;
vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_DOORBELL);
mod_delayed_work(system_freezable_wq, &b->dwork, 0);
}
/*
* Clean up vmci doorbell
*/
static void vmballoon_vmci_cleanup(struct vmballoon *b)
{
vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
VMCI_INVALID_ID, VMCI_INVALID_ID);
if (!vmci_handle_is_invalid(b->vmci_doorbell)) {
vmci_doorbell_destroy(b->vmci_doorbell);
b->vmci_doorbell = VMCI_INVALID_HANDLE;
}
}
/**
* vmballoon_vmci_init - Initialize vmci doorbell.
*
* @b: pointer to the balloon.
*
* Return: zero on success or when wakeup command not supported. Error-code
* otherwise.
*
* Initialize vmci doorbell, to get notified as soon as balloon changes.
*/
static int vmballoon_vmci_init(struct vmballoon *b)
{
unsigned long error;
if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0)
return 0;
error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB,
VMCI_PRIVILEGE_FLAG_RESTRICTED,
vmballoon_doorbell, b);
if (error != VMCI_SUCCESS)
goto fail;
error = __vmballoon_cmd(b, VMW_BALLOON_CMD_VMCI_DOORBELL_SET,
b->vmci_doorbell.context,
b->vmci_doorbell.resource, NULL);
if (error != VMW_BALLOON_SUCCESS)
goto fail;
return 0;
fail:
vmballoon_vmci_cleanup(b);
return -EIO;
}
/**
* vmballoon_pop - Quickly release all pages allocate for the balloon.
*
* @b: pointer to the balloon.
*
* This function is called when host decides to "reset" balloon for one reason
* or another. Unlike normal "deflate" we do not (shall not) notify host of the
* pages being released.
*/
static void vmballoon_pop(struct vmballoon *b)
{
unsigned long size;
while ((size = atomic64_read(&b->size)))
vmballoon_deflate(b, size, false);
}
/*
* Perform standard reset sequence by popping the balloon (in case it
* is not empty) and then restarting protocol. This operation normally
* happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
*/
static void vmballoon_reset(struct vmballoon *b)
{
int error;
down_write(&b->conf_sem);
vmballoon_vmci_cleanup(b);
/* free all pages, skipping monitor unlock */
vmballoon_pop(b);
if (vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
goto unlock;
if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
if (vmballoon_init_batching(b)) {
/*
* We failed to initialize batching, inform the monitor
* about it by sending a null capability.
*
* The guest will retry in one second.
*/
vmballoon_send_start(b, 0);
goto unlock;
}
} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
vmballoon_deinit_batching(b);
}
vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_RESET);
b->reset_required = false;
error = vmballoon_vmci_init(b);
if (error)
pr_err_once("failed to initialize vmci doorbell\n");
if (vmballoon_send_guest_id(b))
pr_err_once("failed to send guest ID to the host\n");
unlock:
up_write(&b->conf_sem);
}
/**
* vmballoon_work - periodic balloon worker for reset, inflation and deflation.
*
* @work: pointer to the &work_struct which is provided by the workqueue.
*
* Resets the protocol if needed, gets the new size and adjusts balloon as
* needed. Repeat in 1 sec.
*/
static void vmballoon_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
int64_t change = 0;
if (b->reset_required)
vmballoon_reset(b);
down_read(&b->conf_sem);
/*
* Update the stats while holding the semaphore to ensure that
* @stats_enabled is consistent with whether the stats are actually
* enabled
*/
vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_TIMER);
if (!vmballoon_send_get_target(b))
change = vmballoon_change(b);
if (change != 0) {
pr_debug("%s - size: %llu, target %lu\n", __func__,
atomic64_read(&b->size), READ_ONCE(b->target));
if (change > 0)
vmballoon_inflate(b);
else /* (change < 0) */
vmballoon_deflate(b, 0, true);
}
up_read(&b->conf_sem);
/*
* We are using a freezable workqueue so that balloon operations are
* stopped while the system transitions to/from sleep/hibernation.
*/
queue_delayed_work(system_freezable_wq,
dwork, round_jiffies_relative(HZ));
}
/**
* vmballoon_shrinker_scan() - deflate the balloon due to memory pressure.
* @shrinker: pointer to the balloon shrinker.
* @sc: page reclaim information.
*
* Returns: number of pages that were freed during deflation.
*/
static unsigned long vmballoon_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc)
{
struct vmballoon *b = &balloon;
unsigned long deflated_frames;
pr_debug("%s - size: %llu", __func__, atomic64_read(&b->size));
vmballoon_stats_gen_inc(b, VMW_BALLOON_STAT_SHRINK);
/*
* If the lock is also contended for read, we cannot easily reclaim and
* we bail out.
*/
if (!down_read_trylock(&b->conf_sem))
return 0;
deflated_frames = vmballoon_deflate(b, sc->nr_to_scan, true);
vmballoon_stats_gen_add(b, VMW_BALLOON_STAT_SHRINK_FREE,
deflated_frames);
/*
* Delay future inflation for some time to mitigate the situations in
* which balloon continuously grows and shrinks. Use WRITE_ONCE() since
* the access is asynchronous.
*/
WRITE_ONCE(b->shrink_timeout, jiffies + HZ * VMBALLOON_SHRINK_DELAY);
up_read(&b->conf_sem);
return deflated_frames;
}
/**
* vmballoon_shrinker_count() - return the number of ballooned pages.
* @shrinker: pointer to the balloon shrinker.
* @sc: page reclaim information.
*
* Returns: number of 4k pages that are allocated for the balloon and can
* therefore be reclaimed under pressure.
*/
static unsigned long vmballoon_shrinker_count(struct shrinker *shrinker,
struct shrink_control *sc)
{
struct vmballoon *b = &balloon;
return atomic64_read(&b->size);
}
static void vmballoon_unregister_shrinker(struct vmballoon *b)
{
if (b->shrinker_registered)
unregister_shrinker(&b->shrinker);
b->shrinker_registered = false;
}
static int vmballoon_register_shrinker(struct vmballoon *b)
{
int r;
/* Do nothing if the shrinker is not enabled */
if (!vmwballoon_shrinker_enable)
return 0;
b->shrinker.scan_objects = vmballoon_shrinker_scan;
b->shrinker.count_objects = vmballoon_shrinker_count;
b->shrinker.seeks = DEFAULT_SEEKS;
r = register_shrinker(&b->shrinker, "vmw-balloon");
if (r == 0)
b->shrinker_registered = true;
return r;
}
/*
* DEBUGFS Interface
*/
#ifdef CONFIG_DEBUG_FS
static const char * const vmballoon_stat_page_names[] = {
[VMW_BALLOON_PAGE_STAT_ALLOC] = "alloc",
[VMW_BALLOON_PAGE_STAT_ALLOC_FAIL] = "allocFail",
[VMW_BALLOON_PAGE_STAT_REFUSED_ALLOC] = "errAlloc",
[VMW_BALLOON_PAGE_STAT_REFUSED_FREE] = "errFree",
[VMW_BALLOON_PAGE_STAT_FREE] = "free"
};
static const char * const vmballoon_stat_names[] = {
[VMW_BALLOON_STAT_TIMER] = "timer",
[VMW_BALLOON_STAT_DOORBELL] = "doorbell",
[VMW_BALLOON_STAT_RESET] = "reset",
[VMW_BALLOON_STAT_SHRINK] = "shrink",
[VMW_BALLOON_STAT_SHRINK_FREE] = "shrinkFree"
};
static int vmballoon_enable_stats(struct vmballoon *b)
{
int r = 0;
down_write(&b->conf_sem);
/* did we somehow race with another reader which enabled stats? */
if (b->stats)
goto out;
b->stats = kzalloc(sizeof(*b->stats), GFP_KERNEL);
if (!b->stats) {
/* allocation failed */
r = -ENOMEM;
goto out;
}
static_key_enable(&balloon_stat_enabled.key);
out:
up_write(&b->conf_sem);
return r;
}
/**
* vmballoon_debug_show - shows statistics of balloon operations.
* @f: pointer to the &struct seq_file.
* @offset: ignored.
*
* Provides the statistics that can be accessed in vmmemctl in the debugfs.
* To avoid the overhead - mainly that of memory - of collecting the statistics,
* we only collect statistics after the first time the counters are read.
*
* Return: zero on success or an error code.
*/
static int vmballoon_debug_show(struct seq_file *f, void *offset)
{
struct vmballoon *b = f->private;
int i, j;
/* enables stats if they are disabled */
if (!b->stats) {
int r = vmballoon_enable_stats(b);
if (r)
return r;
}
/* format capabilities info */
seq_printf(f, "%-22s: %#16x\n", "balloon capabilities",
VMW_BALLOON_CAPABILITIES);
seq_printf(f, "%-22s: %#16lx\n", "used capabilities", b->capabilities);
seq_printf(f, "%-22s: %16s\n", "is resetting",
b->reset_required ? "y" : "n");
/* format size info */
seq_printf(f, "%-22s: %16lu\n", "target", READ_ONCE(b->target));
seq_printf(f, "%-22s: %16llu\n", "current", atomic64_read(&b->size));
for (i = 0; i < VMW_BALLOON_CMD_NUM; i++) {
if (vmballoon_cmd_names[i] == NULL)
continue;
seq_printf(f, "%-22s: %16llu (%llu failed)\n",
vmballoon_cmd_names[i],
atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_STAT]),
atomic64_read(&b->stats->ops[i][VMW_BALLOON_OP_FAIL_STAT]));
}
for (i = 0; i < VMW_BALLOON_STAT_NUM; i++)
seq_printf(f, "%-22s: %16llu\n",
vmballoon_stat_names[i],
atomic64_read(&b->stats->general_stat[i]));
for (i = 0; i < VMW_BALLOON_PAGE_STAT_NUM; i++) {
for (j = 0; j < VMW_BALLOON_NUM_PAGE_SIZES; j++)
seq_printf(f, "%-18s(%s): %16llu\n",
vmballoon_stat_page_names[i],
vmballoon_page_size_names[j],
atomic64_read(&b->stats->page_stat[i][j]));
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(vmballoon_debug);
static void __init vmballoon_debugfs_init(struct vmballoon *b)
{
debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
&vmballoon_debug_fops);
}
static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
{
static_key_disable(&balloon_stat_enabled.key);
debugfs_lookup_and_remove("vmmemctl", NULL);
kfree(b->stats);
b->stats = NULL;
}
#else
static inline void vmballoon_debugfs_init(struct vmballoon *b)
{
}
static inline void vmballoon_debugfs_exit(struct vmballoon *b)
{
}
#endif /* CONFIG_DEBUG_FS */
#ifdef CONFIG_BALLOON_COMPACTION
/**
* vmballoon_migratepage() - migrates a balloon page.
* @b_dev_info: balloon device information descriptor.
* @newpage: the page to which @page should be migrated.
* @page: a ballooned page that should be migrated.
* @mode: migration mode, ignored.
*
* This function is really open-coded, but that is according to the interface
* that balloon_compaction provides.
*
* Return: zero on success, -EAGAIN when migration cannot be performed
* momentarily, and -EBUSY if migration failed and should be retried
* with that specific page.
*/
static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info,
struct page *newpage, struct page *page,
enum migrate_mode mode)
{
unsigned long status, flags;
struct vmballoon *b;
int ret;
b = container_of(b_dev_info, struct vmballoon, b_dev_info);
/*
* If the semaphore is taken, there is ongoing configuration change
* (i.e., balloon reset), so try again.
*/
if (!down_read_trylock(&b->conf_sem))
return -EAGAIN;
spin_lock(&b->comm_lock);
/*
* We must start by deflating and not inflating, as otherwise the
* hypervisor may tell us that it has enough memory and the new page is
* not needed. Since the old page is isolated, we cannot use the list
* interface to unlock it, as the LRU field is used for isolation.
* Instead, we use the native interface directly.
*/
vmballoon_add_page(b, 0, page);
status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
VMW_BALLOON_DEFLATE);
if (status == VMW_BALLOON_SUCCESS)
status = vmballoon_status_page(b, 0, &page);
/*
* If a failure happened, let the migration mechanism know that it
* should not retry.
*/
if (status != VMW_BALLOON_SUCCESS) {
spin_unlock(&b->comm_lock);
ret = -EBUSY;
goto out_unlock;
}
/*
* The page is isolated, so it is safe to delete it without holding
* @pages_lock . We keep holding @comm_lock since we will need it in a
* second.
*/
balloon_page_delete(page);
put_page(page);
/* Inflate */
vmballoon_add_page(b, 0, newpage);
status = vmballoon_lock_op(b, 1, VMW_BALLOON_4K_PAGE,
VMW_BALLOON_INFLATE);
if (status == VMW_BALLOON_SUCCESS)
status = vmballoon_status_page(b, 0, &newpage);
spin_unlock(&b->comm_lock);
if (status != VMW_BALLOON_SUCCESS) {
/*
* A failure happened. While we can deflate the page we just
* inflated, this deflation can also encounter an error. Instead
* we will decrease the size of the balloon to reflect the
* change and report failure.
*/
atomic64_dec(&b->size);
ret = -EBUSY;
} else {
/*
* Success. Take a reference for the page, and we will add it to
* the list after acquiring the lock.
*/
get_page(newpage);
ret = MIGRATEPAGE_SUCCESS;
}
/* Update the balloon list under the @pages_lock */
spin_lock_irqsave(&b->b_dev_info.pages_lock, flags);
/*
* On inflation success, we already took a reference for the @newpage.
* If we succeed just insert it to the list and update the statistics
* under the lock.
*/
if (ret == MIGRATEPAGE_SUCCESS) {
balloon_page_insert(&b->b_dev_info, newpage);
__count_vm_event(BALLOON_MIGRATE);
}
/*
* We deflated successfully, so regardless to the inflation success, we
* need to reduce the number of isolated_pages.
*/
b->b_dev_info.isolated_pages--;
spin_unlock_irqrestore(&b->b_dev_info.pages_lock, flags);
out_unlock:
up_read(&b->conf_sem);
return ret;
}
/**
* vmballoon_compaction_init() - initialized compaction for the balloon.
*
* @b: pointer to the balloon.
*
* If during the initialization a failure occurred, this function does not
* perform cleanup. The caller must call vmballoon_compaction_deinit() in this
* case.
*
* Return: zero on success or error code on failure.
*/
static __init void vmballoon_compaction_init(struct vmballoon *b)
{
b->b_dev_info.migratepage = vmballoon_migratepage;
}
#else /* CONFIG_BALLOON_COMPACTION */
static inline void vmballoon_compaction_init(struct vmballoon *b)
{
}
#endif /* CONFIG_BALLOON_COMPACTION */
static int __init vmballoon_init(void)
{
int error;
/*
* Check if we are running on VMware's hypervisor and bail out
* if we are not.
*/
if (x86_hyper_type != X86_HYPER_VMWARE)
return -ENODEV;
INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);
error = vmballoon_register_shrinker(&balloon);
if (error)
goto fail;
/*
* Initialization of compaction must be done after the call to
* balloon_devinfo_init() .
*/
balloon_devinfo_init(&balloon.b_dev_info);
vmballoon_compaction_init(&balloon);
INIT_LIST_HEAD(&balloon.huge_pages);
spin_lock_init(&balloon.comm_lock);
init_rwsem(&balloon.conf_sem);
balloon.vmci_doorbell = VMCI_INVALID_HANDLE;
balloon.batch_page = NULL;
balloon.page = NULL;
balloon.reset_required = true;
queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
vmballoon_debugfs_init(&balloon);
return 0;
fail:
vmballoon_unregister_shrinker(&balloon);
return error;
}
/*
* Using late_initcall() instead of module_init() allows the balloon to use the
* VMCI doorbell even when the balloon is built into the kernel. Otherwise the
* VMCI is probed only after the balloon is initialized. If the balloon is used
* as a module, late_initcall() is equivalent to module_init().
*/
late_initcall(vmballoon_init);
static void __exit vmballoon_exit(void)
{
vmballoon_unregister_shrinker(&balloon);
vmballoon_vmci_cleanup(&balloon);
cancel_delayed_work_sync(&balloon.dwork);
vmballoon_debugfs_exit(&balloon);
/*
* Deallocate all reserved memory, and reset connection with monitor.
* Reset connection before deallocating memory to avoid potential for
* additional spurious resets from guest touching deallocated pages.
*/
vmballoon_send_start(&balloon, 0);
vmballoon_pop(&balloon);
}
module_exit(vmballoon_exit);
| linux-master | drivers/misc/vmw_balloon.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ampere Computing SoC's SMpro Misc Driver
*
* Copyright (c) 2022, Ampere Computing LLC
*/
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
/* Boot Stage/Progress Registers */
#define BOOTSTAGE 0xB0
#define BOOTSTAGE_LO 0xB1
#define CUR_BOOTSTAGE 0xB2
#define BOOTSTAGE_HI 0xB3
/* SOC State Registers */
#define SOC_POWER_LIMIT 0xE5
struct smpro_misc {
struct regmap *regmap;
};
static ssize_t boot_progress_show(struct device *dev, struct device_attribute *da, char *buf)
{
struct smpro_misc *misc = dev_get_drvdata(dev);
u16 boot_progress[3] = { 0 };
u32 bootstage;
u8 boot_stage;
u8 cur_stage;
u32 reg_lo;
u32 reg;
int ret;
/* Read current boot stage */
ret = regmap_read(misc->regmap, CUR_BOOTSTAGE, ®);
if (ret)
return ret;
cur_stage = reg & 0xff;
ret = regmap_read(misc->regmap, BOOTSTAGE, &bootstage);
if (ret)
return ret;
boot_stage = (bootstage >> 8) & 0xff;
if (boot_stage > cur_stage)
return -EINVAL;
ret = regmap_read(misc->regmap, BOOTSTAGE_LO, ®_lo);
if (!ret)
ret = regmap_read(misc->regmap, BOOTSTAGE_HI, ®);
if (ret)
return ret;
/* Firmware to report new boot stage next time */
if (boot_stage < cur_stage) {
ret = regmap_write(misc->regmap, BOOTSTAGE, ((bootstage & 0xff00) | 0x1));
if (ret)
return ret;
}
boot_progress[0] = bootstage;
boot_progress[1] = swab16(reg);
boot_progress[2] = swab16(reg_lo);
return sysfs_emit(buf, "%*phN\n", (int)sizeof(boot_progress), boot_progress);
}
static DEVICE_ATTR_RO(boot_progress);
static ssize_t soc_power_limit_show(struct device *dev, struct device_attribute *da, char *buf)
{
struct smpro_misc *misc = dev_get_drvdata(dev);
unsigned int value;
int ret;
ret = regmap_read(misc->regmap, SOC_POWER_LIMIT, &value);
if (ret)
return ret;
return sysfs_emit(buf, "%d\n", value);
}
static ssize_t soc_power_limit_store(struct device *dev, struct device_attribute *da,
const char *buf, size_t count)
{
struct smpro_misc *misc = dev_get_drvdata(dev);
unsigned long val;
s32 ret;
ret = kstrtoul(buf, 0, &val);
if (ret)
return ret;
ret = regmap_write(misc->regmap, SOC_POWER_LIMIT, (unsigned int)val);
if (ret)
return -EPROTO;
return count;
}
static DEVICE_ATTR_RW(soc_power_limit);
static struct attribute *smpro_misc_attrs[] = {
&dev_attr_boot_progress.attr,
&dev_attr_soc_power_limit.attr,
NULL
};
ATTRIBUTE_GROUPS(smpro_misc);
static int smpro_misc_probe(struct platform_device *pdev)
{
struct smpro_misc *misc;
misc = devm_kzalloc(&pdev->dev, sizeof(struct smpro_misc), GFP_KERNEL);
if (!misc)
return -ENOMEM;
platform_set_drvdata(pdev, misc);
misc->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!misc->regmap)
return -ENODEV;
return 0;
}
static struct platform_driver smpro_misc_driver = {
.probe = smpro_misc_probe,
.driver = {
.name = "smpro-misc",
.dev_groups = smpro_misc_groups,
},
};
module_platform_driver(smpro_misc_driver);
MODULE_AUTHOR("Tung Nguyen <[email protected]>");
MODULE_AUTHOR("Quan Nguyen <[email protected]>");
MODULE_DESCRIPTION("Ampere Altra SMpro Misc driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/smpro-misc.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the HP iLO management processor.
*
* Copyright (C) 2008 Hewlett-Packard Development Company, L.P.
* David Altobelli <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/device.h>
#include <linux/file.h>
#include <linux/cdev.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include "hpilo.h"
static const struct class ilo_class = {
.name = "iLO",
};
static unsigned int ilo_major;
static unsigned int max_ccb = 16;
static char ilo_hwdev[MAX_ILO_DEV];
static const struct pci_device_id ilo_blacklist[] = {
/* auxiliary iLO */
{PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP, 0x1979)},
/* CL */
{PCI_DEVICE_SUB(PCI_VENDOR_ID_HP, 0x3307, PCI_VENDOR_ID_HP_3PAR, 0x0289)},
{}
};
static inline int get_entry_id(int entry)
{
return (entry & ENTRY_MASK_DESCRIPTOR) >> ENTRY_BITPOS_DESCRIPTOR;
}
static inline int get_entry_len(int entry)
{
return ((entry & ENTRY_MASK_QWORDS) >> ENTRY_BITPOS_QWORDS) << 3;
}
static inline int mk_entry(int id, int len)
{
int qlen = len & 7 ? (len >> 3) + 1 : len >> 3;
return id << ENTRY_BITPOS_DESCRIPTOR | qlen << ENTRY_BITPOS_QWORDS;
}
static inline int desc_mem_sz(int nr_entry)
{
return nr_entry << L2_QENTRY_SZ;
}
/*
* FIFO queues, shared with hardware.
*
* If a queue has empty slots, an entry is added to the queue tail,
* and that entry is marked as occupied.
* Entries can be dequeued from the head of the list, when the device
* has marked the entry as consumed.
*
* Returns true on successful queue/dequeue, false on failure.
*/
static int fifo_enqueue(struct ilo_hwinfo *hw, char *fifobar, int entry)
{
struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&hw->fifo_lock, flags);
if (!(fifo_q->fifobar[(fifo_q->tail + 1) & fifo_q->imask]
& ENTRY_MASK_O)) {
fifo_q->fifobar[fifo_q->tail & fifo_q->imask] |=
(entry & ENTRY_MASK_NOSTATE) | fifo_q->merge;
fifo_q->tail += 1;
ret = 1;
}
spin_unlock_irqrestore(&hw->fifo_lock, flags);
return ret;
}
static int fifo_dequeue(struct ilo_hwinfo *hw, char *fifobar, int *entry)
{
struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
unsigned long flags;
int ret = 0;
u64 c;
spin_lock_irqsave(&hw->fifo_lock, flags);
c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
if (c & ENTRY_MASK_C) {
if (entry)
*entry = c & ENTRY_MASK_NOSTATE;
fifo_q->fifobar[fifo_q->head & fifo_q->imask] =
(c | ENTRY_MASK) + 1;
fifo_q->head += 1;
ret = 1;
}
spin_unlock_irqrestore(&hw->fifo_lock, flags);
return ret;
}
static int fifo_check_recv(struct ilo_hwinfo *hw, char *fifobar)
{
struct fifo *fifo_q = FIFOBARTOHANDLE(fifobar);
unsigned long flags;
int ret = 0;
u64 c;
spin_lock_irqsave(&hw->fifo_lock, flags);
c = fifo_q->fifobar[fifo_q->head & fifo_q->imask];
if (c & ENTRY_MASK_C)
ret = 1;
spin_unlock_irqrestore(&hw->fifo_lock, flags);
return ret;
}
static int ilo_pkt_enqueue(struct ilo_hwinfo *hw, struct ccb *ccb,
int dir, int id, int len)
{
char *fifobar;
int entry;
if (dir == SENDQ)
fifobar = ccb->ccb_u1.send_fifobar;
else
fifobar = ccb->ccb_u3.recv_fifobar;
entry = mk_entry(id, len);
return fifo_enqueue(hw, fifobar, entry);
}
static int ilo_pkt_dequeue(struct ilo_hwinfo *hw, struct ccb *ccb,
int dir, int *id, int *len, void **pkt)
{
char *fifobar, *desc;
int entry = 0, pkt_id = 0;
int ret;
if (dir == SENDQ) {
fifobar = ccb->ccb_u1.send_fifobar;
desc = ccb->ccb_u2.send_desc;
} else {
fifobar = ccb->ccb_u3.recv_fifobar;
desc = ccb->ccb_u4.recv_desc;
}
ret = fifo_dequeue(hw, fifobar, &entry);
if (ret) {
pkt_id = get_entry_id(entry);
if (id)
*id = pkt_id;
if (len)
*len = get_entry_len(entry);
if (pkt)
*pkt = (void *)(desc + desc_mem_sz(pkt_id));
}
return ret;
}
static int ilo_pkt_recv(struct ilo_hwinfo *hw, struct ccb *ccb)
{
char *fifobar = ccb->ccb_u3.recv_fifobar;
return fifo_check_recv(hw, fifobar);
}
static inline void doorbell_set(struct ccb *ccb)
{
iowrite8(1, ccb->ccb_u5.db_base);
}
static inline void doorbell_clr(struct ccb *ccb)
{
iowrite8(2, ccb->ccb_u5.db_base);
}
static inline int ctrl_set(int l2sz, int idxmask, int desclim)
{
int active = 0, go = 1;
return l2sz << CTRL_BITPOS_L2SZ |
idxmask << CTRL_BITPOS_FIFOINDEXMASK |
desclim << CTRL_BITPOS_DESCLIMIT |
active << CTRL_BITPOS_A |
go << CTRL_BITPOS_G;
}
static void ctrl_setup(struct ccb *ccb, int nr_desc, int l2desc_sz)
{
/* for simplicity, use the same parameters for send and recv ctrls */
ccb->send_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
ccb->recv_ctrl = ctrl_set(l2desc_sz, nr_desc-1, nr_desc-1);
}
static inline int fifo_sz(int nr_entry)
{
/* size of a fifo is determined by the number of entries it contains */
return nr_entry * sizeof(u64) + FIFOHANDLESIZE;
}
static void fifo_setup(void *base_addr, int nr_entry)
{
struct fifo *fifo_q = base_addr;
int i;
/* set up an empty fifo */
fifo_q->head = 0;
fifo_q->tail = 0;
fifo_q->reset = 0;
fifo_q->nrents = nr_entry;
fifo_q->imask = nr_entry - 1;
fifo_q->merge = ENTRY_MASK_O;
for (i = 0; i < nr_entry; i++)
fifo_q->fifobar[i] = 0;
}
static void ilo_ccb_close(struct pci_dev *pdev, struct ccb_data *data)
{
struct ccb *driver_ccb = &data->driver_ccb;
struct ccb __iomem *device_ccb = data->mapped_ccb;
int retries;
/* complicated dance to tell the hw we are stopping */
doorbell_clr(driver_ccb);
iowrite32(ioread32(&device_ccb->send_ctrl) & ~(1 << CTRL_BITPOS_G),
&device_ccb->send_ctrl);
iowrite32(ioread32(&device_ccb->recv_ctrl) & ~(1 << CTRL_BITPOS_G),
&device_ccb->recv_ctrl);
/* give iLO some time to process stop request */
for (retries = MAX_WAIT; retries > 0; retries--) {
doorbell_set(driver_ccb);
udelay(WAIT_TIME);
if (!(ioread32(&device_ccb->send_ctrl) & (1 << CTRL_BITPOS_A))
&&
!(ioread32(&device_ccb->recv_ctrl) & (1 << CTRL_BITPOS_A)))
break;
}
if (retries == 0)
dev_err(&pdev->dev, "Closing, but controller still active\n");
/* clear the hw ccb */
memset_io(device_ccb, 0, sizeof(struct ccb));
/* free resources used to back send/recv queues */
dma_free_coherent(&pdev->dev, data->dma_size, data->dma_va,
data->dma_pa);
}
static int ilo_ccb_setup(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
{
char *dma_va;
dma_addr_t dma_pa;
struct ccb *driver_ccb, *ilo_ccb;
driver_ccb = &data->driver_ccb;
ilo_ccb = &data->ilo_ccb;
data->dma_size = 2 * fifo_sz(NR_QENTRY) +
2 * desc_mem_sz(NR_QENTRY) +
ILO_START_ALIGN + ILO_CACHE_SZ;
data->dma_va = dma_alloc_coherent(&hw->ilo_dev->dev, data->dma_size,
&data->dma_pa, GFP_ATOMIC);
if (!data->dma_va)
return -ENOMEM;
dma_va = (char *)data->dma_va;
dma_pa = data->dma_pa;
dma_va = (char *)roundup((unsigned long)dma_va, ILO_START_ALIGN);
dma_pa = roundup(dma_pa, ILO_START_ALIGN);
/*
* Create two ccb's, one with virt addrs, one with phys addrs.
* Copy the phys addr ccb to device shared mem.
*/
ctrl_setup(driver_ccb, NR_QENTRY, L2_QENTRY_SZ);
ctrl_setup(ilo_ccb, NR_QENTRY, L2_QENTRY_SZ);
fifo_setup(dma_va, NR_QENTRY);
driver_ccb->ccb_u1.send_fifobar = dma_va + FIFOHANDLESIZE;
ilo_ccb->ccb_u1.send_fifobar_pa = dma_pa + FIFOHANDLESIZE;
dma_va += fifo_sz(NR_QENTRY);
dma_pa += fifo_sz(NR_QENTRY);
dma_va = (char *)roundup((unsigned long)dma_va, ILO_CACHE_SZ);
dma_pa = roundup(dma_pa, ILO_CACHE_SZ);
fifo_setup(dma_va, NR_QENTRY);
driver_ccb->ccb_u3.recv_fifobar = dma_va + FIFOHANDLESIZE;
ilo_ccb->ccb_u3.recv_fifobar_pa = dma_pa + FIFOHANDLESIZE;
dma_va += fifo_sz(NR_QENTRY);
dma_pa += fifo_sz(NR_QENTRY);
driver_ccb->ccb_u2.send_desc = dma_va;
ilo_ccb->ccb_u2.send_desc_pa = dma_pa;
dma_pa += desc_mem_sz(NR_QENTRY);
dma_va += desc_mem_sz(NR_QENTRY);
driver_ccb->ccb_u4.recv_desc = dma_va;
ilo_ccb->ccb_u4.recv_desc_pa = dma_pa;
driver_ccb->channel = slot;
ilo_ccb->channel = slot;
driver_ccb->ccb_u5.db_base = hw->db_vaddr + (slot << L2_DB_SIZE);
ilo_ccb->ccb_u5.db_base = NULL; /* hw ccb's doorbell is not used */
return 0;
}
static void ilo_ccb_open(struct ilo_hwinfo *hw, struct ccb_data *data, int slot)
{
int pkt_id, pkt_sz;
struct ccb *driver_ccb = &data->driver_ccb;
/* copy the ccb with physical addrs to device memory */
data->mapped_ccb = (struct ccb __iomem *)
(hw->ram_vaddr + (slot * ILOHW_CCB_SZ));
memcpy_toio(data->mapped_ccb, &data->ilo_ccb, sizeof(struct ccb));
/* put packets on the send and receive queues */
pkt_sz = 0;
for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++) {
ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, pkt_sz);
doorbell_set(driver_ccb);
}
pkt_sz = desc_mem_sz(1);
for (pkt_id = 0; pkt_id < NR_QENTRY; pkt_id++)
ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, pkt_sz);
/* the ccb is ready to use */
doorbell_clr(driver_ccb);
}
static int ilo_ccb_verify(struct ilo_hwinfo *hw, struct ccb_data *data)
{
int pkt_id, i;
struct ccb *driver_ccb = &data->driver_ccb;
/* make sure iLO is really handling requests */
for (i = MAX_WAIT; i > 0; i--) {
if (ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, NULL, NULL))
break;
udelay(WAIT_TIME);
}
if (i == 0) {
dev_err(&hw->ilo_dev->dev, "Open could not dequeue a packet\n");
return -EBUSY;
}
ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, 0);
doorbell_set(driver_ccb);
return 0;
}
static inline int is_channel_reset(struct ccb *ccb)
{
/* check for this particular channel needing a reset */
return FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset;
}
static inline void set_channel_reset(struct ccb *ccb)
{
/* set a flag indicating this channel needs a reset */
FIFOBARTOHANDLE(ccb->ccb_u1.send_fifobar)->reset = 1;
}
static inline int get_device_outbound(struct ilo_hwinfo *hw)
{
return ioread32(&hw->mmio_vaddr[DB_OUT]);
}
static inline int is_db_reset(int db_out)
{
return db_out & (1 << DB_RESET);
}
static inline void clear_pending_db(struct ilo_hwinfo *hw, int clr)
{
iowrite32(clr, &hw->mmio_vaddr[DB_OUT]);
}
static inline void clear_device(struct ilo_hwinfo *hw)
{
/* clear the device (reset bits, pending channel entries) */
clear_pending_db(hw, -1);
}
static inline void ilo_enable_interrupts(struct ilo_hwinfo *hw)
{
iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) | 1, &hw->mmio_vaddr[DB_IRQ]);
}
static inline void ilo_disable_interrupts(struct ilo_hwinfo *hw)
{
iowrite8(ioread8(&hw->mmio_vaddr[DB_IRQ]) & ~1,
&hw->mmio_vaddr[DB_IRQ]);
}
static void ilo_set_reset(struct ilo_hwinfo *hw)
{
int slot;
/*
* Mapped memory is zeroed on ilo reset, so set a per ccb flag
* to indicate that this ccb needs to be closed and reopened.
*/
for (slot = 0; slot < max_ccb; slot++) {
if (!hw->ccb_alloc[slot])
continue;
set_channel_reset(&hw->ccb_alloc[slot]->driver_ccb);
}
}
static ssize_t ilo_read(struct file *fp, char __user *buf,
size_t len, loff_t *off)
{
int err, found, cnt, pkt_id, pkt_len;
struct ccb_data *data = fp->private_data;
struct ccb *driver_ccb = &data->driver_ccb;
struct ilo_hwinfo *hw = data->ilo_hw;
void *pkt;
if (is_channel_reset(driver_ccb)) {
/*
* If the device has been reset, applications
* need to close and reopen all ccbs.
*/
return -ENODEV;
}
/*
* This function is to be called when data is expected
* in the channel, and will return an error if no packet is found
* during the loop below. The sleep/retry logic is to allow
* applications to call read() immediately post write(),
* and give iLO some time to process the sent packet.
*/
cnt = 20;
do {
/* look for a received packet */
found = ilo_pkt_dequeue(hw, driver_ccb, RECVQ, &pkt_id,
&pkt_len, &pkt);
if (found)
break;
cnt--;
msleep(100);
} while (!found && cnt);
if (!found)
return -EAGAIN;
/* only copy the length of the received packet */
if (pkt_len < len)
len = pkt_len;
err = copy_to_user(buf, pkt, len);
/* return the received packet to the queue */
ilo_pkt_enqueue(hw, driver_ccb, RECVQ, pkt_id, desc_mem_sz(1));
return err ? -EFAULT : len;
}
static ssize_t ilo_write(struct file *fp, const char __user *buf,
size_t len, loff_t *off)
{
int err, pkt_id, pkt_len;
struct ccb_data *data = fp->private_data;
struct ccb *driver_ccb = &data->driver_ccb;
struct ilo_hwinfo *hw = data->ilo_hw;
void *pkt;
if (is_channel_reset(driver_ccb))
return -ENODEV;
/* get a packet to send the user command */
if (!ilo_pkt_dequeue(hw, driver_ccb, SENDQ, &pkt_id, &pkt_len, &pkt))
return -EBUSY;
/* limit the length to the length of the packet */
if (pkt_len < len)
len = pkt_len;
/* on failure, set the len to 0 to return empty packet to the device */
err = copy_from_user(pkt, buf, len);
if (err)
len = 0;
/* send the packet */
ilo_pkt_enqueue(hw, driver_ccb, SENDQ, pkt_id, len);
doorbell_set(driver_ccb);
return err ? -EFAULT : len;
}
static __poll_t ilo_poll(struct file *fp, poll_table *wait)
{
struct ccb_data *data = fp->private_data;
struct ccb *driver_ccb = &data->driver_ccb;
poll_wait(fp, &data->ccb_waitq, wait);
if (is_channel_reset(driver_ccb))
return EPOLLERR;
else if (ilo_pkt_recv(data->ilo_hw, driver_ccb))
return EPOLLIN | EPOLLRDNORM;
return 0;
}
static int ilo_close(struct inode *ip, struct file *fp)
{
int slot;
struct ccb_data *data;
struct ilo_hwinfo *hw;
unsigned long flags;
slot = iminor(ip) % max_ccb;
hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
spin_lock(&hw->open_lock);
if (hw->ccb_alloc[slot]->ccb_cnt == 1) {
data = fp->private_data;
spin_lock_irqsave(&hw->alloc_lock, flags);
hw->ccb_alloc[slot] = NULL;
spin_unlock_irqrestore(&hw->alloc_lock, flags);
ilo_ccb_close(hw->ilo_dev, data);
kfree(data);
} else
hw->ccb_alloc[slot]->ccb_cnt--;
spin_unlock(&hw->open_lock);
return 0;
}
static int ilo_open(struct inode *ip, struct file *fp)
{
int slot, error;
struct ccb_data *data;
struct ilo_hwinfo *hw;
unsigned long flags;
slot = iminor(ip) % max_ccb;
hw = container_of(ip->i_cdev, struct ilo_hwinfo, cdev);
/* new ccb allocation */
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
spin_lock(&hw->open_lock);
/* each fd private_data holds sw/hw view of ccb */
if (hw->ccb_alloc[slot] == NULL) {
/* create a channel control block for this minor */
error = ilo_ccb_setup(hw, data, slot);
if (error) {
kfree(data);
goto out;
}
data->ccb_cnt = 1;
data->ccb_excl = fp->f_flags & O_EXCL;
data->ilo_hw = hw;
init_waitqueue_head(&data->ccb_waitq);
/* write the ccb to hw */
spin_lock_irqsave(&hw->alloc_lock, flags);
ilo_ccb_open(hw, data, slot);
hw->ccb_alloc[slot] = data;
spin_unlock_irqrestore(&hw->alloc_lock, flags);
/* make sure the channel is functional */
error = ilo_ccb_verify(hw, data);
if (error) {
spin_lock_irqsave(&hw->alloc_lock, flags);
hw->ccb_alloc[slot] = NULL;
spin_unlock_irqrestore(&hw->alloc_lock, flags);
ilo_ccb_close(hw->ilo_dev, data);
kfree(data);
goto out;
}
} else {
kfree(data);
if (fp->f_flags & O_EXCL || hw->ccb_alloc[slot]->ccb_excl) {
/*
* The channel exists, and either this open
* or a previous open of this channel wants
* exclusive access.
*/
error = -EBUSY;
} else {
hw->ccb_alloc[slot]->ccb_cnt++;
error = 0;
}
}
out:
spin_unlock(&hw->open_lock);
if (!error)
fp->private_data = hw->ccb_alloc[slot];
return error;
}
static const struct file_operations ilo_fops = {
.owner = THIS_MODULE,
.read = ilo_read,
.write = ilo_write,
.poll = ilo_poll,
.open = ilo_open,
.release = ilo_close,
.llseek = noop_llseek,
};
static irqreturn_t ilo_isr(int irq, void *data)
{
struct ilo_hwinfo *hw = data;
int pending, i;
spin_lock(&hw->alloc_lock);
/* check for ccbs which have data */
pending = get_device_outbound(hw);
if (!pending) {
spin_unlock(&hw->alloc_lock);
return IRQ_NONE;
}
if (is_db_reset(pending)) {
/* wake up all ccbs if the device was reset */
pending = -1;
ilo_set_reset(hw);
}
for (i = 0; i < max_ccb; i++) {
if (!hw->ccb_alloc[i])
continue;
if (pending & (1 << i))
wake_up_interruptible(&hw->ccb_alloc[i]->ccb_waitq);
}
/* clear the device of the channels that have been handled */
clear_pending_db(hw, pending);
spin_unlock(&hw->alloc_lock);
return IRQ_HANDLED;
}
static void ilo_unmap_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
{
pci_iounmap(pdev, hw->db_vaddr);
pci_iounmap(pdev, hw->ram_vaddr);
pci_iounmap(pdev, hw->mmio_vaddr);
}
static int ilo_map_device(struct pci_dev *pdev, struct ilo_hwinfo *hw)
{
int bar;
unsigned long off;
u8 pci_rev_id;
int rc;
/* map the memory mapped i/o registers */
hw->mmio_vaddr = pci_iomap(pdev, 1, 0);
if (hw->mmio_vaddr == NULL) {
dev_err(&pdev->dev, "Error mapping mmio\n");
goto out;
}
/* map the adapter shared memory region */
rc = pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev_id);
if (rc != 0) {
dev_err(&pdev->dev, "Error reading PCI rev id: %d\n", rc);
goto out;
}
if (pci_rev_id >= PCI_REV_ID_NECHES) {
bar = 5;
/* Last 8k is reserved for CCBs */
off = pci_resource_len(pdev, bar) - 0x2000;
} else {
bar = 2;
off = 0;
}
hw->ram_vaddr = pci_iomap_range(pdev, bar, off, max_ccb * ILOHW_CCB_SZ);
if (hw->ram_vaddr == NULL) {
dev_err(&pdev->dev, "Error mapping shared mem\n");
goto mmio_free;
}
/* map the doorbell aperture */
hw->db_vaddr = pci_iomap(pdev, 3, max_ccb * ONE_DB_SIZE);
if (hw->db_vaddr == NULL) {
dev_err(&pdev->dev, "Error mapping doorbell\n");
goto ram_free;
}
return 0;
ram_free:
pci_iounmap(pdev, hw->ram_vaddr);
mmio_free:
pci_iounmap(pdev, hw->mmio_vaddr);
out:
return -ENOMEM;
}
static void ilo_remove(struct pci_dev *pdev)
{
int i, minor;
struct ilo_hwinfo *ilo_hw = pci_get_drvdata(pdev);
if (!ilo_hw)
return;
clear_device(ilo_hw);
minor = MINOR(ilo_hw->cdev.dev);
for (i = minor; i < minor + max_ccb; i++)
device_destroy(&ilo_class, MKDEV(ilo_major, i));
cdev_del(&ilo_hw->cdev);
ilo_disable_interrupts(ilo_hw);
free_irq(pdev->irq, ilo_hw);
ilo_unmap_device(pdev, ilo_hw);
pci_release_regions(pdev);
/*
* pci_disable_device(pdev) used to be here. But this PCI device has
* two functions with interrupt lines connected to a single pin. The
* other one is a USB host controller. So when we disable the PIN here
* e.g. by rmmod hpilo, the controller stops working. It is because
* the interrupt link is disabled in ACPI since it is not refcounted
* yet. See acpi_pci_link_free_irq called from acpi_pci_irq_disable.
*/
kfree(ilo_hw);
ilo_hwdev[(minor / max_ccb)] = 0;
}
static int ilo_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int devnum, minor, start, error = 0;
struct ilo_hwinfo *ilo_hw;
if (pci_match_id(ilo_blacklist, pdev)) {
dev_dbg(&pdev->dev, "Not supported on this device\n");
return -ENODEV;
}
if (max_ccb > MAX_CCB)
max_ccb = MAX_CCB;
else if (max_ccb < MIN_CCB)
max_ccb = MIN_CCB;
/* find a free range for device files */
for (devnum = 0; devnum < MAX_ILO_DEV; devnum++) {
if (ilo_hwdev[devnum] == 0) {
ilo_hwdev[devnum] = 1;
break;
}
}
if (devnum == MAX_ILO_DEV) {
dev_err(&pdev->dev, "Error finding free device\n");
return -ENODEV;
}
/* track global allocations for this device */
error = -ENOMEM;
ilo_hw = kzalloc(sizeof(*ilo_hw), GFP_KERNEL);
if (!ilo_hw)
goto out;
ilo_hw->ilo_dev = pdev;
spin_lock_init(&ilo_hw->alloc_lock);
spin_lock_init(&ilo_hw->fifo_lock);
spin_lock_init(&ilo_hw->open_lock);
error = pci_enable_device(pdev);
if (error)
goto free;
pci_set_master(pdev);
error = pci_request_regions(pdev, ILO_NAME);
if (error)
goto disable;
error = ilo_map_device(pdev, ilo_hw);
if (error)
goto free_regions;
pci_set_drvdata(pdev, ilo_hw);
clear_device(ilo_hw);
error = request_irq(pdev->irq, ilo_isr, IRQF_SHARED, "hpilo", ilo_hw);
if (error)
goto unmap;
ilo_enable_interrupts(ilo_hw);
cdev_init(&ilo_hw->cdev, &ilo_fops);
ilo_hw->cdev.owner = THIS_MODULE;
start = devnum * max_ccb;
error = cdev_add(&ilo_hw->cdev, MKDEV(ilo_major, start), max_ccb);
if (error) {
dev_err(&pdev->dev, "Could not add cdev\n");
goto remove_isr;
}
for (minor = 0 ; minor < max_ccb; minor++) {
struct device *dev;
dev = device_create(&ilo_class, &pdev->dev,
MKDEV(ilo_major, minor), NULL,
"hpilo!d%dccb%d", devnum, minor);
if (IS_ERR(dev))
dev_err(&pdev->dev, "Could not create files\n");
}
return 0;
remove_isr:
ilo_disable_interrupts(ilo_hw);
free_irq(pdev->irq, ilo_hw);
unmap:
ilo_unmap_device(pdev, ilo_hw);
free_regions:
pci_release_regions(pdev);
disable:
/* pci_disable_device(pdev); see comment in ilo_remove */
free:
kfree(ilo_hw);
out:
ilo_hwdev[devnum] = 0;
return error;
}
static const struct pci_device_id ilo_devices[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB204) },
{ PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3307) },
{ }
};
MODULE_DEVICE_TABLE(pci, ilo_devices);
static struct pci_driver ilo_driver = {
.name = ILO_NAME,
.id_table = ilo_devices,
.probe = ilo_probe,
.remove = ilo_remove,
};
static int __init ilo_init(void)
{
int error;
dev_t dev;
error = class_register(&ilo_class);
if (error)
goto out;
error = alloc_chrdev_region(&dev, 0, MAX_OPEN, ILO_NAME);
if (error)
goto class_destroy;
ilo_major = MAJOR(dev);
error = pci_register_driver(&ilo_driver);
if (error)
goto chr_remove;
return 0;
chr_remove:
unregister_chrdev_region(dev, MAX_OPEN);
class_destroy:
class_unregister(&ilo_class);
out:
return error;
}
static void __exit ilo_exit(void)
{
pci_unregister_driver(&ilo_driver);
unregister_chrdev_region(MKDEV(ilo_major, 0), MAX_OPEN);
class_unregister(&ilo_class);
}
MODULE_VERSION("1.5.0");
MODULE_ALIAS(ILO_NAME);
MODULE_DESCRIPTION(ILO_NAME);
MODULE_AUTHOR("David Altobelli <[email protected]>");
MODULE_LICENSE("GPL v2");
module_param(max_ccb, uint, 0444);
MODULE_PARM_DESC(max_ccb, "Maximum number of HP iLO channels to attach (8-24)(default=16)");
module_init(ilo_init);
module_exit(ilo_exit);
| linux-master | drivers/misc/hpilo.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* ad525x_dpot: Driver for the Analog Devices digital potentiometers
* Copyright (c) 2009-2010 Analog Devices, Inc.
* Author: Michael Hennerich <[email protected]>
*
* DEVID #Wipers #Positions Resistor Options (kOhm)
* AD5258 1 64 1, 10, 50, 100
* AD5259 1 256 5, 10, 50, 100
* AD5251 2 64 1, 10, 50, 100
* AD5252 2 256 1, 10, 50, 100
* AD5255 3 512 25, 250
* AD5253 4 64 1, 10, 50, 100
* AD5254 4 256 1, 10, 50, 100
* AD5160 1 256 5, 10, 50, 100
* AD5161 1 256 5, 10, 50, 100
* AD5162 2 256 2.5, 10, 50, 100
* AD5165 1 256 100
* AD5200 1 256 10, 50
* AD5201 1 33 10, 50
* AD5203 4 64 10, 100
* AD5204 4 256 10, 50, 100
* AD5206 6 256 10, 50, 100
* AD5207 2 256 10, 50, 100
* AD5231 1 1024 10, 50, 100
* AD5232 2 256 10, 50, 100
* AD5233 4 64 10, 50, 100
* AD5235 2 1024 25, 250
* AD5260 1 256 20, 50, 200
* AD5262 2 256 20, 50, 200
* AD5263 4 256 20, 50, 200
* AD5290 1 256 10, 50, 100
* AD5291 1 256 20, 50, 100 (20-TP)
* AD5292 1 1024 20, 50, 100 (20-TP)
* AD5293 1 1024 20, 50, 100
* AD7376 1 128 10, 50, 100, 1M
* AD8400 1 256 1, 10, 50, 100
* AD8402 2 256 1, 10, 50, 100
* AD8403 4 256 1, 10, 50, 100
* ADN2850 3 512 25, 250
* AD5241 1 256 10, 100, 1M
* AD5246 1 128 5, 10, 50, 100
* AD5247 1 128 5, 10, 50, 100
* AD5245 1 256 5, 10, 50, 100
* AD5243 2 256 2.5, 10, 50, 100
* AD5248 2 256 2.5, 10, 50, 100
* AD5242 2 256 20, 50, 200
* AD5280 1 256 20, 50, 200
* AD5282 2 256 20, 50, 200
* ADN2860 3 512 25, 250
* AD5273 1 64 1, 10, 50, 100 (OTP)
* AD5171 1 64 5, 10, 50, 100 (OTP)
* AD5170 1 256 2.5, 10, 50, 100 (OTP)
* AD5172 2 256 2.5, 10, 50, 100 (OTP)
* AD5173 2 256 2.5, 10, 50, 100 (OTP)
* AD5270 1 1024 20, 50, 100 (50-TP)
* AD5271 1 256 20, 50, 100 (50-TP)
* AD5272 1 1024 20, 50, 100 (50-TP)
* AD5274 1 256 20, 50, 100 (50-TP)
*
* See Documentation/misc-devices/ad525x_dpot.rst for more info.
*
* derived from ad5258.c
* Copyright (c) 2009 Cyber Switching, Inc.
* Author: Chris Verges <[email protected]>
*
* derived from ad5252.c
* Copyright (c) 2006-2011 Michael Hennerich <[email protected]>
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "ad525x_dpot.h"
/*
* Client data (each client gets its own)
*/
struct dpot_data {
struct ad_dpot_bus_data bdata;
struct mutex update_lock;
unsigned int rdac_mask;
unsigned int max_pos;
unsigned long devid;
unsigned int uid;
unsigned int feat;
unsigned int wipers;
u16 rdac_cache[MAX_RDACS];
DECLARE_BITMAP(otp_en_mask, MAX_RDACS);
};
static inline int dpot_read_d8(struct dpot_data *dpot)
{
return dpot->bdata.bops->read_d8(dpot->bdata.client);
}
static inline int dpot_read_r8d8(struct dpot_data *dpot, u8 reg)
{
return dpot->bdata.bops->read_r8d8(dpot->bdata.client, reg);
}
static inline int dpot_read_r8d16(struct dpot_data *dpot, u8 reg)
{
return dpot->bdata.bops->read_r8d16(dpot->bdata.client, reg);
}
static inline int dpot_write_d8(struct dpot_data *dpot, u8 val)
{
return dpot->bdata.bops->write_d8(dpot->bdata.client, val);
}
static inline int dpot_write_r8d8(struct dpot_data *dpot, u8 reg, u16 val)
{
return dpot->bdata.bops->write_r8d8(dpot->bdata.client, reg, val);
}
static inline int dpot_write_r8d16(struct dpot_data *dpot, u8 reg, u16 val)
{
return dpot->bdata.bops->write_r8d16(dpot->bdata.client, reg, val);
}
static s32 dpot_read_spi(struct dpot_data *dpot, u8 reg)
{
unsigned int ctrl = 0;
int value;
if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD))) {
if (dpot->feat & F_RDACS_WONLY)
return dpot->rdac_cache[reg & DPOT_RDAC_MASK];
if (dpot->uid == DPOT_UID(AD5291_ID) ||
dpot->uid == DPOT_UID(AD5292_ID) ||
dpot->uid == DPOT_UID(AD5293_ID)) {
value = dpot_read_r8d8(dpot,
DPOT_AD5291_READ_RDAC << 2);
if (value < 0)
return value;
if (dpot->uid == DPOT_UID(AD5291_ID))
value = value >> 2;
return value;
} else if (dpot->uid == DPOT_UID(AD5270_ID) ||
dpot->uid == DPOT_UID(AD5271_ID)) {
value = dpot_read_r8d8(dpot,
DPOT_AD5270_1_2_4_READ_RDAC << 2);
if (value < 0)
return value;
if (dpot->uid == DPOT_UID(AD5271_ID))
value = value >> 2;
return value;
}
ctrl = DPOT_SPI_READ_RDAC;
} else if (reg & DPOT_ADDR_EEPROM) {
ctrl = DPOT_SPI_READ_EEPROM;
}
if (dpot->feat & F_SPI_16BIT)
return dpot_read_r8d8(dpot, ctrl);
else if (dpot->feat & F_SPI_24BIT)
return dpot_read_r8d16(dpot, ctrl);
return -EFAULT;
}
static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
{
int value;
unsigned int ctrl = 0;
switch (dpot->uid) {
case DPOT_UID(AD5246_ID):
case DPOT_UID(AD5247_ID):
return dpot_read_d8(dpot);
case DPOT_UID(AD5245_ID):
case DPOT_UID(AD5241_ID):
case DPOT_UID(AD5242_ID):
case DPOT_UID(AD5243_ID):
case DPOT_UID(AD5248_ID):
case DPOT_UID(AD5280_ID):
case DPOT_UID(AD5282_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
0 : DPOT_AD5282_RDAC_AB;
return dpot_read_r8d8(dpot, ctrl);
case DPOT_UID(AD5170_ID):
case DPOT_UID(AD5171_ID):
case DPOT_UID(AD5273_ID):
return dpot_read_d8(dpot);
case DPOT_UID(AD5172_ID):
case DPOT_UID(AD5173_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
0 : DPOT_AD5172_3_A0;
return dpot_read_r8d8(dpot, ctrl);
case DPOT_UID(AD5272_ID):
case DPOT_UID(AD5274_ID):
dpot_write_r8d8(dpot,
(DPOT_AD5270_1_2_4_READ_RDAC << 2), 0);
value = dpot_read_r8d16(dpot, DPOT_AD5270_1_2_4_RDAC << 2);
if (value < 0)
return value;
/*
* AD5272/AD5274 returns high byte first, however
* underling smbus expects low byte first.
*/
value = swab16(value);
if (dpot->uid == DPOT_UID(AD5274_ID))
value = value >> 2;
return value;
default:
if ((reg & DPOT_REG_TOL) || (dpot->max_pos > 256))
return dpot_read_r8d16(dpot, (reg & 0xF8) |
((reg & 0x7) << 1));
else
return dpot_read_r8d8(dpot, reg);
}
}
static s32 dpot_read(struct dpot_data *dpot, u8 reg)
{
if (dpot->feat & F_SPI)
return dpot_read_spi(dpot, reg);
else
return dpot_read_i2c(dpot, reg);
}
static s32 dpot_write_spi(struct dpot_data *dpot, u8 reg, u16 value)
{
unsigned int val = 0;
if (!(reg & (DPOT_ADDR_EEPROM | DPOT_ADDR_CMD | DPOT_ADDR_OTP))) {
if (dpot->feat & F_RDACS_WONLY)
dpot->rdac_cache[reg & DPOT_RDAC_MASK] = value;
if (dpot->feat & F_AD_APPDATA) {
if (dpot->feat & F_SPI_8BIT) {
val = ((reg & DPOT_RDAC_MASK) <<
DPOT_MAX_POS(dpot->devid)) |
value;
return dpot_write_d8(dpot, val);
} else if (dpot->feat & F_SPI_16BIT) {
val = ((reg & DPOT_RDAC_MASK) <<
DPOT_MAX_POS(dpot->devid)) |
value;
return dpot_write_r8d8(dpot, val >> 8,
val & 0xFF);
} else
BUG();
} else {
if (dpot->uid == DPOT_UID(AD5291_ID) ||
dpot->uid == DPOT_UID(AD5292_ID) ||
dpot->uid == DPOT_UID(AD5293_ID)) {
dpot_write_r8d8(dpot, DPOT_AD5291_CTRLREG << 2,
DPOT_AD5291_UNLOCK_CMD);
if (dpot->uid == DPOT_UID(AD5291_ID))
value = value << 2;
return dpot_write_r8d8(dpot,
(DPOT_AD5291_RDAC << 2) |
(value >> 8), value & 0xFF);
} else if (dpot->uid == DPOT_UID(AD5270_ID) ||
dpot->uid == DPOT_UID(AD5271_ID)) {
dpot_write_r8d8(dpot,
DPOT_AD5270_1_2_4_CTRLREG << 2,
DPOT_AD5270_1_2_4_UNLOCK_CMD);
if (dpot->uid == DPOT_UID(AD5271_ID))
value = value << 2;
return dpot_write_r8d8(dpot,
(DPOT_AD5270_1_2_4_RDAC << 2) |
(value >> 8), value & 0xFF);
}
val = DPOT_SPI_RDAC | (reg & DPOT_RDAC_MASK);
}
} else if (reg & DPOT_ADDR_EEPROM) {
val = DPOT_SPI_EEPROM | (reg & DPOT_RDAC_MASK);
} else if (reg & DPOT_ADDR_CMD) {
switch (reg) {
case DPOT_DEC_ALL_6DB:
val = DPOT_SPI_DEC_ALL_6DB;
break;
case DPOT_INC_ALL_6DB:
val = DPOT_SPI_INC_ALL_6DB;
break;
case DPOT_DEC_ALL:
val = DPOT_SPI_DEC_ALL;
break;
case DPOT_INC_ALL:
val = DPOT_SPI_INC_ALL;
break;
}
} else if (reg & DPOT_ADDR_OTP) {
if (dpot->uid == DPOT_UID(AD5291_ID) ||
dpot->uid == DPOT_UID(AD5292_ID)) {
return dpot_write_r8d8(dpot,
DPOT_AD5291_STORE_XTPM << 2, 0);
} else if (dpot->uid == DPOT_UID(AD5270_ID) ||
dpot->uid == DPOT_UID(AD5271_ID)) {
return dpot_write_r8d8(dpot,
DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
}
} else
BUG();
if (dpot->feat & F_SPI_16BIT)
return dpot_write_r8d8(dpot, val, value);
else if (dpot->feat & F_SPI_24BIT)
return dpot_write_r8d16(dpot, val, value);
return -EFAULT;
}
static s32 dpot_write_i2c(struct dpot_data *dpot, u8 reg, u16 value)
{
/* Only write the instruction byte for certain commands */
unsigned int tmp = 0, ctrl = 0;
switch (dpot->uid) {
case DPOT_UID(AD5246_ID):
case DPOT_UID(AD5247_ID):
return dpot_write_d8(dpot, value);
case DPOT_UID(AD5245_ID):
case DPOT_UID(AD5241_ID):
case DPOT_UID(AD5242_ID):
case DPOT_UID(AD5243_ID):
case DPOT_UID(AD5248_ID):
case DPOT_UID(AD5280_ID):
case DPOT_UID(AD5282_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
0 : DPOT_AD5282_RDAC_AB;
return dpot_write_r8d8(dpot, ctrl, value);
case DPOT_UID(AD5171_ID):
case DPOT_UID(AD5273_ID):
if (reg & DPOT_ADDR_OTP) {
tmp = dpot_read_d8(dpot);
if (tmp >> 6) /* Ready to Program? */
return -EFAULT;
ctrl = DPOT_AD5273_FUSE;
}
return dpot_write_r8d8(dpot, ctrl, value);
case DPOT_UID(AD5172_ID):
case DPOT_UID(AD5173_ID):
ctrl = ((reg & DPOT_RDAC_MASK) == DPOT_RDAC0) ?
0 : DPOT_AD5172_3_A0;
if (reg & DPOT_ADDR_OTP) {
tmp = dpot_read_r8d16(dpot, ctrl);
if (tmp >> 14) /* Ready to Program? */
return -EFAULT;
ctrl |= DPOT_AD5170_2_3_FUSE;
}
return dpot_write_r8d8(dpot, ctrl, value);
case DPOT_UID(AD5170_ID):
if (reg & DPOT_ADDR_OTP) {
tmp = dpot_read_r8d16(dpot, tmp);
if (tmp >> 14) /* Ready to Program? */
return -EFAULT;
ctrl = DPOT_AD5170_2_3_FUSE;
}
return dpot_write_r8d8(dpot, ctrl, value);
case DPOT_UID(AD5272_ID):
case DPOT_UID(AD5274_ID):
dpot_write_r8d8(dpot, DPOT_AD5270_1_2_4_CTRLREG << 2,
DPOT_AD5270_1_2_4_UNLOCK_CMD);
if (reg & DPOT_ADDR_OTP)
return dpot_write_r8d8(dpot,
DPOT_AD5270_1_2_4_STORE_XTPM << 2, 0);
if (dpot->uid == DPOT_UID(AD5274_ID))
value = value << 2;
return dpot_write_r8d8(dpot, (DPOT_AD5270_1_2_4_RDAC << 2) |
(value >> 8), value & 0xFF);
default:
if (reg & DPOT_ADDR_CMD)
return dpot_write_d8(dpot, reg);
if (dpot->max_pos > 256)
return dpot_write_r8d16(dpot, (reg & 0xF8) |
((reg & 0x7) << 1), value);
else
/* All other registers require instruction + data bytes */
return dpot_write_r8d8(dpot, reg, value);
}
}
static s32 dpot_write(struct dpot_data *dpot, u8 reg, u16 value)
{
if (dpot->feat & F_SPI)
return dpot_write_spi(dpot, reg, value);
else
return dpot_write_i2c(dpot, reg, value);
}
/* sysfs functions */
static ssize_t sysfs_show_reg(struct device *dev,
struct device_attribute *attr,
char *buf, u32 reg)
{
struct dpot_data *data = dev_get_drvdata(dev);
s32 value;
if (reg & DPOT_ADDR_OTP_EN)
return sprintf(buf, "%s\n",
test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ?
"enabled" : "disabled");
mutex_lock(&data->update_lock);
value = dpot_read(data, reg);
mutex_unlock(&data->update_lock);
if (value < 0)
return -EINVAL;
/*
* Let someone else deal with converting this ...
* the tolerance is a two-byte value where the MSB
* is a sign + integer value, and the LSB is a
* decimal value. See page 18 of the AD5258
* datasheet (Rev. A) for more details.
*/
if (reg & DPOT_REG_TOL)
return sprintf(buf, "0x%04x\n", value & 0xFFFF);
else
return sprintf(buf, "%u\n", value & data->rdac_mask);
}
static ssize_t sysfs_set_reg(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count, u32 reg)
{
struct dpot_data *data = dev_get_drvdata(dev);
unsigned long value;
int err;
if (reg & DPOT_ADDR_OTP_EN) {
if (sysfs_streq(buf, "enabled"))
set_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
else
clear_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask);
return count;
}
if ((reg & DPOT_ADDR_OTP) &&
!test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask))
return -EPERM;
err = kstrtoul(buf, 10, &value);
if (err)
return err;
if (value > data->rdac_mask)
value = data->rdac_mask;
mutex_lock(&data->update_lock);
dpot_write(data, reg, value);
if (reg & DPOT_ADDR_EEPROM)
msleep(26); /* Sleep while the EEPROM updates */
else if (reg & DPOT_ADDR_OTP)
msleep(400); /* Sleep while the OTP updates */
mutex_unlock(&data->update_lock);
return count;
}
static ssize_t sysfs_do_cmd(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count, u32 reg)
{
struct dpot_data *data = dev_get_drvdata(dev);
mutex_lock(&data->update_lock);
dpot_write(data, reg, 0);
mutex_unlock(&data->update_lock);
return count;
}
/* ------------------------------------------------------------------------- */
#define DPOT_DEVICE_SHOW(_name, _reg) static ssize_t \
show_##_name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
return sysfs_show_reg(dev, attr, buf, _reg); \
}
#define DPOT_DEVICE_SET(_name, _reg) static ssize_t \
set_##_name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
return sysfs_set_reg(dev, attr, buf, count, _reg); \
}
#define DPOT_DEVICE_SHOW_SET(name, reg) \
DPOT_DEVICE_SHOW(name, reg) \
DPOT_DEVICE_SET(name, reg) \
static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, set_##name)
#define DPOT_DEVICE_SHOW_ONLY(name, reg) \
DPOT_DEVICE_SHOW(name, reg) \
static DEVICE_ATTR(name, S_IWUSR | S_IRUGO, show_##name, NULL)
DPOT_DEVICE_SHOW_SET(rdac0, DPOT_ADDR_RDAC | DPOT_RDAC0);
DPOT_DEVICE_SHOW_SET(eeprom0, DPOT_ADDR_EEPROM | DPOT_RDAC0);
DPOT_DEVICE_SHOW_ONLY(tolerance0, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC0);
DPOT_DEVICE_SHOW_SET(otp0, DPOT_ADDR_OTP | DPOT_RDAC0);
DPOT_DEVICE_SHOW_SET(otp0en, DPOT_ADDR_OTP_EN | DPOT_RDAC0);
DPOT_DEVICE_SHOW_SET(rdac1, DPOT_ADDR_RDAC | DPOT_RDAC1);
DPOT_DEVICE_SHOW_SET(eeprom1, DPOT_ADDR_EEPROM | DPOT_RDAC1);
DPOT_DEVICE_SHOW_ONLY(tolerance1, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC1);
DPOT_DEVICE_SHOW_SET(otp1, DPOT_ADDR_OTP | DPOT_RDAC1);
DPOT_DEVICE_SHOW_SET(otp1en, DPOT_ADDR_OTP_EN | DPOT_RDAC1);
DPOT_DEVICE_SHOW_SET(rdac2, DPOT_ADDR_RDAC | DPOT_RDAC2);
DPOT_DEVICE_SHOW_SET(eeprom2, DPOT_ADDR_EEPROM | DPOT_RDAC2);
DPOT_DEVICE_SHOW_ONLY(tolerance2, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC2);
DPOT_DEVICE_SHOW_SET(otp2, DPOT_ADDR_OTP | DPOT_RDAC2);
DPOT_DEVICE_SHOW_SET(otp2en, DPOT_ADDR_OTP_EN | DPOT_RDAC2);
DPOT_DEVICE_SHOW_SET(rdac3, DPOT_ADDR_RDAC | DPOT_RDAC3);
DPOT_DEVICE_SHOW_SET(eeprom3, DPOT_ADDR_EEPROM | DPOT_RDAC3);
DPOT_DEVICE_SHOW_ONLY(tolerance3, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC3);
DPOT_DEVICE_SHOW_SET(otp3, DPOT_ADDR_OTP | DPOT_RDAC3);
DPOT_DEVICE_SHOW_SET(otp3en, DPOT_ADDR_OTP_EN | DPOT_RDAC3);
DPOT_DEVICE_SHOW_SET(rdac4, DPOT_ADDR_RDAC | DPOT_RDAC4);
DPOT_DEVICE_SHOW_SET(eeprom4, DPOT_ADDR_EEPROM | DPOT_RDAC4);
DPOT_DEVICE_SHOW_ONLY(tolerance4, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC4);
DPOT_DEVICE_SHOW_SET(otp4, DPOT_ADDR_OTP | DPOT_RDAC4);
DPOT_DEVICE_SHOW_SET(otp4en, DPOT_ADDR_OTP_EN | DPOT_RDAC4);
DPOT_DEVICE_SHOW_SET(rdac5, DPOT_ADDR_RDAC | DPOT_RDAC5);
DPOT_DEVICE_SHOW_SET(eeprom5, DPOT_ADDR_EEPROM | DPOT_RDAC5);
DPOT_DEVICE_SHOW_ONLY(tolerance5, DPOT_ADDR_EEPROM | DPOT_TOL_RDAC5);
DPOT_DEVICE_SHOW_SET(otp5, DPOT_ADDR_OTP | DPOT_RDAC5);
DPOT_DEVICE_SHOW_SET(otp5en, DPOT_ADDR_OTP_EN | DPOT_RDAC5);
static const struct attribute *dpot_attrib_wipers[] = {
&dev_attr_rdac0.attr,
&dev_attr_rdac1.attr,
&dev_attr_rdac2.attr,
&dev_attr_rdac3.attr,
&dev_attr_rdac4.attr,
&dev_attr_rdac5.attr,
NULL
};
static const struct attribute *dpot_attrib_eeprom[] = {
&dev_attr_eeprom0.attr,
&dev_attr_eeprom1.attr,
&dev_attr_eeprom2.attr,
&dev_attr_eeprom3.attr,
&dev_attr_eeprom4.attr,
&dev_attr_eeprom5.attr,
NULL
};
static const struct attribute *dpot_attrib_otp[] = {
&dev_attr_otp0.attr,
&dev_attr_otp1.attr,
&dev_attr_otp2.attr,
&dev_attr_otp3.attr,
&dev_attr_otp4.attr,
&dev_attr_otp5.attr,
NULL
};
static const struct attribute *dpot_attrib_otp_en[] = {
&dev_attr_otp0en.attr,
&dev_attr_otp1en.attr,
&dev_attr_otp2en.attr,
&dev_attr_otp3en.attr,
&dev_attr_otp4en.attr,
&dev_attr_otp5en.attr,
NULL
};
static const struct attribute *dpot_attrib_tolerance[] = {
&dev_attr_tolerance0.attr,
&dev_attr_tolerance1.attr,
&dev_attr_tolerance2.attr,
&dev_attr_tolerance3.attr,
&dev_attr_tolerance4.attr,
&dev_attr_tolerance5.attr,
NULL
};
/* ------------------------------------------------------------------------- */
#define DPOT_DEVICE_DO_CMD(_name, _cmd) static ssize_t \
set_##_name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
return sysfs_do_cmd(dev, attr, buf, count, _cmd); \
} \
static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, NULL, set_##_name)
DPOT_DEVICE_DO_CMD(inc_all, DPOT_INC_ALL);
DPOT_DEVICE_DO_CMD(dec_all, DPOT_DEC_ALL);
DPOT_DEVICE_DO_CMD(inc_all_6db, DPOT_INC_ALL_6DB);
DPOT_DEVICE_DO_CMD(dec_all_6db, DPOT_DEC_ALL_6DB);
static struct attribute *ad525x_attributes_commands[] = {
&dev_attr_inc_all.attr,
&dev_attr_dec_all.attr,
&dev_attr_inc_all_6db.attr,
&dev_attr_dec_all_6db.attr,
NULL
};
static const struct attribute_group ad525x_group_commands = {
.attrs = ad525x_attributes_commands,
};
static int ad_dpot_add_files(struct device *dev,
unsigned int features, unsigned int rdac)
{
int err = sysfs_create_file(&dev->kobj,
dpot_attrib_wipers[rdac]);
if (features & F_CMD_EEP)
err |= sysfs_create_file(&dev->kobj,
dpot_attrib_eeprom[rdac]);
if (features & F_CMD_TOL)
err |= sysfs_create_file(&dev->kobj,
dpot_attrib_tolerance[rdac]);
if (features & F_CMD_OTP) {
err |= sysfs_create_file(&dev->kobj,
dpot_attrib_otp_en[rdac]);
err |= sysfs_create_file(&dev->kobj,
dpot_attrib_otp[rdac]);
}
if (err)
dev_err(dev, "failed to register sysfs hooks for RDAC%d\n",
rdac);
return err;
}
static inline void ad_dpot_remove_files(struct device *dev,
unsigned int features, unsigned int rdac)
{
sysfs_remove_file(&dev->kobj,
dpot_attrib_wipers[rdac]);
if (features & F_CMD_EEP)
sysfs_remove_file(&dev->kobj,
dpot_attrib_eeprom[rdac]);
if (features & F_CMD_TOL)
sysfs_remove_file(&dev->kobj,
dpot_attrib_tolerance[rdac]);
if (features & F_CMD_OTP) {
sysfs_remove_file(&dev->kobj,
dpot_attrib_otp_en[rdac]);
sysfs_remove_file(&dev->kobj,
dpot_attrib_otp[rdac]);
}
}
int ad_dpot_probe(struct device *dev,
struct ad_dpot_bus_data *bdata, unsigned long devid,
const char *name)
{
struct dpot_data *data;
int i, err = 0;
data = kzalloc(sizeof(struct dpot_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
dev_set_drvdata(dev, data);
mutex_init(&data->update_lock);
data->bdata = *bdata;
data->devid = devid;
data->max_pos = 1 << DPOT_MAX_POS(devid);
data->rdac_mask = data->max_pos - 1;
data->feat = DPOT_FEAT(devid);
data->uid = DPOT_UID(devid);
data->wipers = DPOT_WIPERS(devid);
for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
if (data->wipers & (1 << i)) {
err = ad_dpot_add_files(dev, data->feat, i);
if (err)
goto exit_remove_files;
/* power-up midscale */
if (data->feat & F_RDACS_WONLY)
data->rdac_cache[i] = data->max_pos / 2;
}
if (data->feat & F_CMD_INC)
err = sysfs_create_group(&dev->kobj, &ad525x_group_commands);
if (err) {
dev_err(dev, "failed to register sysfs hooks\n");
goto exit_free;
}
dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
name, data->max_pos);
return 0;
exit_remove_files:
for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
if (data->wipers & (1 << i))
ad_dpot_remove_files(dev, data->feat, i);
exit_free:
kfree(data);
dev_set_drvdata(dev, NULL);
exit:
dev_err(dev, "failed to create client for %s ID 0x%lX\n",
name, devid);
return err;
}
EXPORT_SYMBOL(ad_dpot_probe);
void ad_dpot_remove(struct device *dev)
{
struct dpot_data *data = dev_get_drvdata(dev);
int i;
for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
if (data->wipers & (1 << i))
ad_dpot_remove_files(dev, data->feat, i);
kfree(data);
}
EXPORT_SYMBOL(ad_dpot_remove);
MODULE_AUTHOR("Chris Verges <[email protected]>, "
"Michael Hennerich <[email protected]>");
MODULE_DESCRIPTION("Digital potentiometer driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/ad525x_dpot.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tifm_core.c - TI FlashMedia driver
*
* Copyright (C) 2006 Alex Dubov <[email protected]>
*/
#include <linux/tifm.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/idr.h>
#include <linux/module.h>
#define DRIVER_NAME "tifm_core"
#define DRIVER_VERSION "0.8"
static struct workqueue_struct *workqueue;
static DEFINE_IDR(tifm_adapter_idr);
static DEFINE_SPINLOCK(tifm_adapter_lock);
static const char *tifm_media_type_name(unsigned char type, unsigned char nt)
{
const char *card_type_name[3][3] = {
{ "SmartMedia/xD", "MemoryStick", "MMC/SD" },
{ "XD", "MS", "SD"},
{ "xd", "ms", "sd"}
};
if (nt > 2 || type < 1 || type > 3)
return NULL;
return card_type_name[nt][type - 1];
}
static int tifm_dev_match(struct tifm_dev *sock, struct tifm_device_id *id)
{
if (sock->type == id->type)
return 1;
return 0;
}
static int tifm_bus_match(struct device *dev, struct device_driver *drv)
{
struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
struct tifm_driver *fm_drv = container_of(drv, struct tifm_driver,
driver);
struct tifm_device_id *ids = fm_drv->id_table;
if (ids) {
while (ids->type) {
if (tifm_dev_match(sock, ids))
return 1;
++ids;
}
}
return 0;
}
static int tifm_uevent(const struct device *dev, struct kobj_uevent_env *env)
{
const struct tifm_dev *sock = container_of_const(dev, struct tifm_dev, dev);
if (add_uevent_var(env, "TIFM_CARD_TYPE=%s", tifm_media_type_name(sock->type, 1)))
return -ENOMEM;
return 0;
}
static int tifm_device_probe(struct device *dev)
{
struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver,
driver);
int rc = -ENODEV;
get_device(dev);
if (dev->driver && drv->probe) {
rc = drv->probe(sock);
if (!rc)
return 0;
}
put_device(dev);
return rc;
}
static void tifm_dummy_event(struct tifm_dev *sock)
{
return;
}
static void tifm_device_remove(struct device *dev)
{
struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver,
driver);
if (dev->driver && drv->remove) {
sock->card_event = tifm_dummy_event;
sock->data_event = tifm_dummy_event;
drv->remove(sock);
sock->dev.driver = NULL;
}
put_device(dev);
}
#ifdef CONFIG_PM
static int tifm_device_suspend(struct device *dev, pm_message_t state)
{
struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver,
driver);
if (dev->driver && drv->suspend)
return drv->suspend(sock, state);
return 0;
}
static int tifm_device_resume(struct device *dev)
{
struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver,
driver);
if (dev->driver && drv->resume)
return drv->resume(sock);
return 0;
}
#else
#define tifm_device_suspend NULL
#define tifm_device_resume NULL
#endif /* CONFIG_PM */
static ssize_t type_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
return sprintf(buf, "%x", sock->type);
}
static DEVICE_ATTR_RO(type);
static struct attribute *tifm_dev_attrs[] = {
&dev_attr_type.attr,
NULL,
};
ATTRIBUTE_GROUPS(tifm_dev);
static struct bus_type tifm_bus_type = {
.name = "tifm",
.dev_groups = tifm_dev_groups,
.match = tifm_bus_match,
.uevent = tifm_uevent,
.probe = tifm_device_probe,
.remove = tifm_device_remove,
.suspend = tifm_device_suspend,
.resume = tifm_device_resume
};
static void tifm_free(struct device *dev)
{
struct tifm_adapter *fm = container_of(dev, struct tifm_adapter, dev);
kfree(fm);
}
static struct class tifm_adapter_class = {
.name = "tifm_adapter",
.dev_release = tifm_free
};
struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets,
struct device *dev)
{
struct tifm_adapter *fm;
fm = kzalloc(struct_size(fm, sockets, num_sockets), GFP_KERNEL);
if (fm) {
fm->dev.class = &tifm_adapter_class;
fm->dev.parent = dev;
device_initialize(&fm->dev);
spin_lock_init(&fm->lock);
fm->num_sockets = num_sockets;
}
return fm;
}
EXPORT_SYMBOL(tifm_alloc_adapter);
int tifm_add_adapter(struct tifm_adapter *fm)
{
int rc;
idr_preload(GFP_KERNEL);
spin_lock(&tifm_adapter_lock);
rc = idr_alloc(&tifm_adapter_idr, fm, 0, 0, GFP_NOWAIT);
if (rc >= 0)
fm->id = rc;
spin_unlock(&tifm_adapter_lock);
idr_preload_end();
if (rc < 0)
return rc;
dev_set_name(&fm->dev, "tifm%u", fm->id);
rc = device_add(&fm->dev);
if (rc) {
spin_lock(&tifm_adapter_lock);
idr_remove(&tifm_adapter_idr, fm->id);
spin_unlock(&tifm_adapter_lock);
}
return rc;
}
EXPORT_SYMBOL(tifm_add_adapter);
void tifm_remove_adapter(struct tifm_adapter *fm)
{
unsigned int cnt;
flush_workqueue(workqueue);
for (cnt = 0; cnt < fm->num_sockets; ++cnt) {
if (fm->sockets[cnt])
device_unregister(&fm->sockets[cnt]->dev);
}
spin_lock(&tifm_adapter_lock);
idr_remove(&tifm_adapter_idr, fm->id);
spin_unlock(&tifm_adapter_lock);
device_del(&fm->dev);
}
EXPORT_SYMBOL(tifm_remove_adapter);
void tifm_free_adapter(struct tifm_adapter *fm)
{
put_device(&fm->dev);
}
EXPORT_SYMBOL(tifm_free_adapter);
void tifm_free_device(struct device *dev)
{
struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
kfree(sock);
}
EXPORT_SYMBOL(tifm_free_device);
struct tifm_dev *tifm_alloc_device(struct tifm_adapter *fm, unsigned int id,
unsigned char type)
{
struct tifm_dev *sock = NULL;
if (!tifm_media_type_name(type, 0))
return sock;
sock = kzalloc(sizeof(struct tifm_dev), GFP_KERNEL);
if (sock) {
spin_lock_init(&sock->lock);
sock->type = type;
sock->socket_id = id;
sock->card_event = tifm_dummy_event;
sock->data_event = tifm_dummy_event;
sock->dev.parent = fm->dev.parent;
sock->dev.bus = &tifm_bus_type;
sock->dev.dma_mask = fm->dev.parent->dma_mask;
sock->dev.release = tifm_free_device;
dev_set_name(&sock->dev, "tifm_%s%u:%u",
tifm_media_type_name(type, 2), fm->id, id);
printk(KERN_INFO DRIVER_NAME
": %s card detected in socket %u:%u\n",
tifm_media_type_name(type, 0), fm->id, id);
}
return sock;
}
EXPORT_SYMBOL(tifm_alloc_device);
void tifm_eject(struct tifm_dev *sock)
{
struct tifm_adapter *fm = dev_get_drvdata(sock->dev.parent);
fm->eject(fm, sock);
}
EXPORT_SYMBOL(tifm_eject);
int tifm_has_ms_pif(struct tifm_dev *sock)
{
struct tifm_adapter *fm = dev_get_drvdata(sock->dev.parent);
return fm->has_ms_pif(fm, sock);
}
EXPORT_SYMBOL(tifm_has_ms_pif);
int tifm_map_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
int direction)
{
return dma_map_sg(&to_pci_dev(sock->dev.parent)->dev, sg, nents,
direction);
}
EXPORT_SYMBOL(tifm_map_sg);
void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
int direction)
{
dma_unmap_sg(&to_pci_dev(sock->dev.parent)->dev, sg, nents, direction);
}
EXPORT_SYMBOL(tifm_unmap_sg);
void tifm_queue_work(struct work_struct *work)
{
queue_work(workqueue, work);
}
EXPORT_SYMBOL(tifm_queue_work);
int tifm_register_driver(struct tifm_driver *drv)
{
drv->driver.bus = &tifm_bus_type;
return driver_register(&drv->driver);
}
EXPORT_SYMBOL(tifm_register_driver);
void tifm_unregister_driver(struct tifm_driver *drv)
{
driver_unregister(&drv->driver);
}
EXPORT_SYMBOL(tifm_unregister_driver);
static int __init tifm_init(void)
{
int rc;
workqueue = create_freezable_workqueue("tifm");
if (!workqueue)
return -ENOMEM;
rc = bus_register(&tifm_bus_type);
if (rc)
goto err_out_wq;
rc = class_register(&tifm_adapter_class);
if (!rc)
return 0;
bus_unregister(&tifm_bus_type);
err_out_wq:
destroy_workqueue(workqueue);
return rc;
}
static void __exit tifm_exit(void)
{
class_unregister(&tifm_adapter_class);
bus_unregister(&tifm_bus_type);
destroy_workqueue(workqueue);
}
subsys_initcall(tifm_init);
module_exit(tifm_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alex Dubov");
MODULE_DESCRIPTION("TI FlashMedia core driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
| linux-master | drivers/misc/tifm_core.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* isl29003.c - Linux kernel module for
* Intersil ISL29003 ambient light sensor
*
* See file:Documentation/misc-devices/isl29003.rst
*
* Copyright (c) 2009 Daniel Mack <[email protected]>
*
* Based on code written by
* Rodolfo Giometti <[email protected]>
* Eurotech S.p.A. <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#define ISL29003_DRV_NAME "isl29003"
#define DRIVER_VERSION "1.0"
#define ISL29003_REG_COMMAND 0x00
#define ISL29003_ADC_ENABLED (1 << 7)
#define ISL29003_ADC_PD (1 << 6)
#define ISL29003_TIMING_INT (1 << 5)
#define ISL29003_MODE_SHIFT (2)
#define ISL29003_MODE_MASK (0x3 << ISL29003_MODE_SHIFT)
#define ISL29003_RES_SHIFT (0)
#define ISL29003_RES_MASK (0x3 << ISL29003_RES_SHIFT)
#define ISL29003_REG_CONTROL 0x01
#define ISL29003_INT_FLG (1 << 5)
#define ISL29003_RANGE_SHIFT (2)
#define ISL29003_RANGE_MASK (0x3 << ISL29003_RANGE_SHIFT)
#define ISL29003_INT_PERSISTS_SHIFT (0)
#define ISL29003_INT_PERSISTS_MASK (0xf << ISL29003_INT_PERSISTS_SHIFT)
#define ISL29003_REG_IRQ_THRESH_HI 0x02
#define ISL29003_REG_IRQ_THRESH_LO 0x03
#define ISL29003_REG_LSB_SENSOR 0x04
#define ISL29003_REG_MSB_SENSOR 0x05
#define ISL29003_REG_LSB_TIMER 0x06
#define ISL29003_REG_MSB_TIMER 0x07
#define ISL29003_NUM_CACHABLE_REGS 4
struct isl29003_data {
struct i2c_client *client;
struct mutex lock;
u8 reg_cache[ISL29003_NUM_CACHABLE_REGS];
u8 power_state_before_suspend;
};
static int gain_range[] = {
1000, 4000, 16000, 64000
};
/*
* register access helpers
*/
static int __isl29003_read_reg(struct i2c_client *client,
u32 reg, u8 mask, u8 shift)
{
struct isl29003_data *data = i2c_get_clientdata(client);
return (data->reg_cache[reg] & mask) >> shift;
}
static int __isl29003_write_reg(struct i2c_client *client,
u32 reg, u8 mask, u8 shift, u8 val)
{
struct isl29003_data *data = i2c_get_clientdata(client);
int ret = 0;
u8 tmp;
if (reg >= ISL29003_NUM_CACHABLE_REGS)
return -EINVAL;
mutex_lock(&data->lock);
tmp = data->reg_cache[reg];
tmp &= ~mask;
tmp |= val << shift;
ret = i2c_smbus_write_byte_data(client, reg, tmp);
if (!ret)
data->reg_cache[reg] = tmp;
mutex_unlock(&data->lock);
return ret;
}
/*
* internally used functions
*/
/* range */
static int isl29003_get_range(struct i2c_client *client)
{
return __isl29003_read_reg(client, ISL29003_REG_CONTROL,
ISL29003_RANGE_MASK, ISL29003_RANGE_SHIFT);
}
static int isl29003_set_range(struct i2c_client *client, int range)
{
return __isl29003_write_reg(client, ISL29003_REG_CONTROL,
ISL29003_RANGE_MASK, ISL29003_RANGE_SHIFT, range);
}
/* resolution */
static int isl29003_get_resolution(struct i2c_client *client)
{
return __isl29003_read_reg(client, ISL29003_REG_COMMAND,
ISL29003_RES_MASK, ISL29003_RES_SHIFT);
}
static int isl29003_set_resolution(struct i2c_client *client, int res)
{
return __isl29003_write_reg(client, ISL29003_REG_COMMAND,
ISL29003_RES_MASK, ISL29003_RES_SHIFT, res);
}
/* mode */
static int isl29003_get_mode(struct i2c_client *client)
{
return __isl29003_read_reg(client, ISL29003_REG_COMMAND,
ISL29003_MODE_MASK, ISL29003_MODE_SHIFT);
}
static int isl29003_set_mode(struct i2c_client *client, int mode)
{
return __isl29003_write_reg(client, ISL29003_REG_COMMAND,
ISL29003_MODE_MASK, ISL29003_MODE_SHIFT, mode);
}
/* power_state */
static int isl29003_set_power_state(struct i2c_client *client, int state)
{
return __isl29003_write_reg(client, ISL29003_REG_COMMAND,
ISL29003_ADC_ENABLED | ISL29003_ADC_PD, 0,
state ? ISL29003_ADC_ENABLED : ISL29003_ADC_PD);
}
static int isl29003_get_power_state(struct i2c_client *client)
{
struct isl29003_data *data = i2c_get_clientdata(client);
u8 cmdreg = data->reg_cache[ISL29003_REG_COMMAND];
return ~cmdreg & ISL29003_ADC_PD;
}
static int isl29003_get_adc_value(struct i2c_client *client)
{
struct isl29003_data *data = i2c_get_clientdata(client);
int lsb, msb, range, bitdepth;
mutex_lock(&data->lock);
lsb = i2c_smbus_read_byte_data(client, ISL29003_REG_LSB_SENSOR);
if (lsb < 0) {
mutex_unlock(&data->lock);
return lsb;
}
msb = i2c_smbus_read_byte_data(client, ISL29003_REG_MSB_SENSOR);
mutex_unlock(&data->lock);
if (msb < 0)
return msb;
range = isl29003_get_range(client);
bitdepth = (4 - isl29003_get_resolution(client)) * 4;
return (((msb << 8) | lsb) * gain_range[range]) >> bitdepth;
}
/*
* sysfs layer
*/
/* range */
static ssize_t isl29003_show_range(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
return sysfs_emit(buf, "%i\n", isl29003_get_range(client));
}
static ssize_t isl29003_store_range(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 3)
return -EINVAL;
ret = isl29003_set_range(client, val);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(range, S_IWUSR | S_IRUGO,
isl29003_show_range, isl29003_store_range);
/* resolution */
static ssize_t isl29003_show_resolution(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
return sysfs_emit(buf, "%d\n", isl29003_get_resolution(client));
}
static ssize_t isl29003_store_resolution(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 3)
return -EINVAL;
ret = isl29003_set_resolution(client, val);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(resolution, S_IWUSR | S_IRUGO,
isl29003_show_resolution, isl29003_store_resolution);
/* mode */
static ssize_t isl29003_show_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
return sysfs_emit(buf, "%d\n", isl29003_get_mode(client));
}
static ssize_t isl29003_store_mode(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 2)
return -EINVAL;
ret = isl29003_set_mode(client, val);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO,
isl29003_show_mode, isl29003_store_mode);
/* power state */
static ssize_t isl29003_show_power_state(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
return sysfs_emit(buf, "%d\n", isl29003_get_power_state(client));
}
static ssize_t isl29003_store_power_state(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned long val;
int ret;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val > 1)
return -EINVAL;
ret = isl29003_set_power_state(client, val);
return ret ? ret : count;
}
static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
isl29003_show_power_state, isl29003_store_power_state);
/* lux */
static ssize_t isl29003_show_lux(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
/* No LUX data if not operational */
if (!isl29003_get_power_state(client))
return -EBUSY;
return sysfs_emit(buf, "%d\n", isl29003_get_adc_value(client));
}
static DEVICE_ATTR(lux, S_IRUGO, isl29003_show_lux, NULL);
static struct attribute *isl29003_attributes[] = {
&dev_attr_range.attr,
&dev_attr_resolution.attr,
&dev_attr_mode.attr,
&dev_attr_power_state.attr,
&dev_attr_lux.attr,
NULL
};
static const struct attribute_group isl29003_attr_group = {
.attrs = isl29003_attributes,
};
static int isl29003_init_client(struct i2c_client *client)
{
struct isl29003_data *data = i2c_get_clientdata(client);
int i;
/* read all the registers once to fill the cache.
* if one of the reads fails, we consider the init failed */
for (i = 0; i < ARRAY_SIZE(data->reg_cache); i++) {
int v = i2c_smbus_read_byte_data(client, i);
if (v < 0)
return -ENODEV;
data->reg_cache[i] = v;
}
/* set defaults */
isl29003_set_range(client, 0);
isl29003_set_resolution(client, 0);
isl29003_set_mode(client, 0);
isl29003_set_power_state(client, 0);
return 0;
}
/*
* I2C layer
*/
static int isl29003_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct isl29003_data *data;
int err = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
return -EIO;
data = kzalloc(sizeof(struct isl29003_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
data->client = client;
i2c_set_clientdata(client, data);
mutex_init(&data->lock);
/* initialize the ISL29003 chip */
err = isl29003_init_client(client);
if (err)
goto exit_kfree;
/* register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &isl29003_attr_group);
if (err)
goto exit_kfree;
dev_info(&client->dev, "driver version %s enabled\n", DRIVER_VERSION);
return 0;
exit_kfree:
kfree(data);
return err;
}
static void isl29003_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &isl29003_attr_group);
isl29003_set_power_state(client, 0);
kfree(i2c_get_clientdata(client));
}
#ifdef CONFIG_PM_SLEEP
static int isl29003_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct isl29003_data *data = i2c_get_clientdata(client);
data->power_state_before_suspend = isl29003_get_power_state(client);
return isl29003_set_power_state(client, 0);
}
static int isl29003_resume(struct device *dev)
{
int i;
struct i2c_client *client = to_i2c_client(dev);
struct isl29003_data *data = i2c_get_clientdata(client);
/* restore registers from cache */
for (i = 0; i < ARRAY_SIZE(data->reg_cache); i++)
if (i2c_smbus_write_byte_data(client, i, data->reg_cache[i]))
return -EIO;
return isl29003_set_power_state(client,
data->power_state_before_suspend);
}
static SIMPLE_DEV_PM_OPS(isl29003_pm_ops, isl29003_suspend, isl29003_resume);
#define ISL29003_PM_OPS (&isl29003_pm_ops)
#else
#define ISL29003_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static const struct i2c_device_id isl29003_id[] = {
{ "isl29003", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, isl29003_id);
static struct i2c_driver isl29003_driver = {
.driver = {
.name = ISL29003_DRV_NAME,
.pm = ISL29003_PM_OPS,
},
.probe = isl29003_probe,
.remove = isl29003_remove,
.id_table = isl29003_id,
};
module_i2c_driver(isl29003_driver);
MODULE_AUTHOR("Daniel Mack <[email protected]>");
MODULE_DESCRIPTION("ISL29003 ambient light sensor driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRIVER_VERSION);
| linux-master | drivers/misc/isl29003.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SRAM protect-exec region helper functions
*
* Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/
* Dave Gerlach
*/
#include <linux/device.h>
#include <linux/genalloc.h>
#include <linux/mm.h>
#include <linux/sram.h>
#include <linux/set_memory.h>
#include <asm/fncpy.h>
#include "sram.h"
static DEFINE_MUTEX(exec_pool_list_mutex);
static LIST_HEAD(exec_pool_list);
int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block,
struct sram_partition *part)
{
unsigned long base = (unsigned long)part->base;
unsigned long end = base + block->size;
if (!PAGE_ALIGNED(base) || !PAGE_ALIGNED(end)) {
dev_err(sram->dev,
"SRAM pool marked with 'protect-exec' is not page aligned and will not be created.\n");
return -ENOMEM;
}
return 0;
}
int sram_add_protect_exec(struct sram_partition *part)
{
mutex_lock(&exec_pool_list_mutex);
list_add_tail(&part->list, &exec_pool_list);
mutex_unlock(&exec_pool_list_mutex);
return 0;
}
/**
* sram_exec_copy - copy data to a protected executable region of sram
*
* @pool: struct gen_pool retrieved that is part of this sram
* @dst: Destination address for the copy, that must be inside pool
* @src: Source address for the data to copy
* @size: Size of copy to perform, which starting from dst, must reside in pool
*
* Return: Address for copied data that can safely be called through function
* pointer, or NULL if problem.
*
* This helper function allows sram driver to act as central control location
* of 'protect-exec' pools which are normal sram pools but are always set
* read-only and executable except when copying data to them, at which point
* they are set to read-write non-executable, to make sure no memory is
* writeable and executable at the same time. This region must be page-aligned
* and is checked during probe, otherwise page attribute manipulation would
* not be possible. Care must be taken to only call the returned address as
* dst address is not guaranteed to be safely callable.
*
* NOTE: This function uses the fncpy macro to move code to the executable
* region. Some architectures have strict requirements for relocating
* executable code, so fncpy is a macro that must be defined by any arch
* making use of this functionality that guarantees a safe copy of exec
* data and returns a safe address that can be called as a C function
* pointer.
*/
void *sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
size_t size)
{
struct sram_partition *part = NULL, *p;
unsigned long base;
int pages;
void *dst_cpy;
int ret;
mutex_lock(&exec_pool_list_mutex);
list_for_each_entry(p, &exec_pool_list, list) {
if (p->pool == pool)
part = p;
}
mutex_unlock(&exec_pool_list_mutex);
if (!part)
return NULL;
if (!gen_pool_has_addr(pool, (unsigned long)dst, size))
return NULL;
base = (unsigned long)part->base;
pages = PAGE_ALIGN(size) / PAGE_SIZE;
mutex_lock(&part->lock);
ret = set_memory_nx((unsigned long)base, pages);
if (ret)
goto error_out;
ret = set_memory_rw((unsigned long)base, pages);
if (ret)
goto error_out;
dst_cpy = fncpy(dst, src, size);
ret = set_memory_rox((unsigned long)base, pages);
if (ret)
goto error_out;
mutex_unlock(&part->lock);
return dst_cpy;
error_out:
mutex_unlock(&part->lock);
return NULL;
}
EXPORT_SYMBOL_GPL(sram_exec_copy);
| linux-master | drivers/misc/sram-exec.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* tsl2550.c - Linux kernel modules for ambient light sensor
*
* Copyright (C) 2007 Rodolfo Giometti <[email protected]>
* Copyright (C) 2007 Eurotech S.p.A. <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
#define TSL2550_DRV_NAME "tsl2550"
#define DRIVER_VERSION "1.2"
/*
* Defines
*/
#define TSL2550_POWER_DOWN 0x00
#define TSL2550_POWER_UP 0x03
#define TSL2550_STANDARD_RANGE 0x18
#define TSL2550_EXTENDED_RANGE 0x1d
#define TSL2550_READ_ADC0 0x43
#define TSL2550_READ_ADC1 0x83
/*
* Structs
*/
struct tsl2550_data {
struct i2c_client *client;
struct mutex update_lock;
unsigned int power_state:1;
unsigned int operating_mode:1;
};
/*
* Global data
*/
static const u8 TSL2550_MODE_RANGE[2] = {
TSL2550_STANDARD_RANGE, TSL2550_EXTENDED_RANGE,
};
/*
* Management functions
*/
static int tsl2550_set_operating_mode(struct i2c_client *client, int mode)
{
struct tsl2550_data *data = i2c_get_clientdata(client);
int ret = i2c_smbus_write_byte(client, TSL2550_MODE_RANGE[mode]);
data->operating_mode = mode;
return ret;
}
static int tsl2550_set_power_state(struct i2c_client *client, int state)
{
struct tsl2550_data *data = i2c_get_clientdata(client);
int ret;
if (state == 0)
ret = i2c_smbus_write_byte(client, TSL2550_POWER_DOWN);
else {
ret = i2c_smbus_write_byte(client, TSL2550_POWER_UP);
/* On power up we should reset operating mode also... */
tsl2550_set_operating_mode(client, data->operating_mode);
}
data->power_state = state;
return ret;
}
static int tsl2550_get_adc_value(struct i2c_client *client, u8 cmd)
{
int ret;
ret = i2c_smbus_read_byte_data(client, cmd);
if (ret < 0)
return ret;
if (!(ret & 0x80))
return -EAGAIN;
return ret & 0x7f; /* remove the "valid" bit */
}
/*
* LUX calculation
*/
#define TSL2550_MAX_LUX 1846
static const u8 ratio_lut[] = {
100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 98, 98, 98, 98, 98,
98, 98, 97, 97, 97, 97, 97, 96,
96, 96, 96, 95, 95, 95, 94, 94,
93, 93, 93, 92, 92, 91, 91, 90,
89, 89, 88, 87, 87, 86, 85, 84,
83, 82, 81, 80, 79, 78, 77, 75,
74, 73, 71, 69, 68, 66, 64, 62,
60, 58, 56, 54, 52, 49, 47, 44,
42, 41, 40, 40, 39, 39, 38, 38,
37, 37, 37, 36, 36, 36, 35, 35,
35, 35, 34, 34, 34, 34, 33, 33,
33, 33, 32, 32, 32, 32, 32, 31,
31, 31, 31, 31, 30, 30, 30, 30,
30,
};
static const u16 count_lut[] = {
0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15,
16, 18, 20, 22, 24, 26, 28, 30,
32, 34, 36, 38, 40, 42, 44, 46,
49, 53, 57, 61, 65, 69, 73, 77,
81, 85, 89, 93, 97, 101, 105, 109,
115, 123, 131, 139, 147, 155, 163, 171,
179, 187, 195, 203, 211, 219, 227, 235,
247, 263, 279, 295, 311, 327, 343, 359,
375, 391, 407, 423, 439, 455, 471, 487,
511, 543, 575, 607, 639, 671, 703, 735,
767, 799, 831, 863, 895, 927, 959, 991,
1039, 1103, 1167, 1231, 1295, 1359, 1423, 1487,
1551, 1615, 1679, 1743, 1807, 1871, 1935, 1999,
2095, 2223, 2351, 2479, 2607, 2735, 2863, 2991,
3119, 3247, 3375, 3503, 3631, 3759, 3887, 4015,
};
/*
* This function is described into Taos TSL2550 Designer's Notebook
* pages 2, 3.
*/
static int tsl2550_calculate_lux(u8 ch0, u8 ch1)
{
unsigned int lux;
/* Look up count from channel values */
u16 c0 = count_lut[ch0];
u16 c1 = count_lut[ch1];
/* Avoid division by 0 and count 1 cannot be greater than count 0 */
if (c1 <= c0)
if (c0) {
/*
* Calculate ratio.
* Note: the "128" is a scaling factor
*/
u8 r = c1 * 128 / c0;
/* Calculate LUX */
lux = ((c0 - c1) * ratio_lut[r]) / 256;
} else
lux = 0;
else
return 0;
/* LUX range check */
return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
}
/*
* SysFS support
*/
static ssize_t tsl2550_show_power_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev));
return sprintf(buf, "%u\n", data->power_state);
}
static ssize_t tsl2550_store_power_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct tsl2550_data *data = i2c_get_clientdata(client);
unsigned long val = simple_strtoul(buf, NULL, 10);
int ret;
if (val > 1)
return -EINVAL;
mutex_lock(&data->update_lock);
ret = tsl2550_set_power_state(client, val);
mutex_unlock(&data->update_lock);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(power_state, S_IWUSR | S_IRUGO,
tsl2550_show_power_state, tsl2550_store_power_state);
static ssize_t tsl2550_show_operating_mode(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tsl2550_data *data = i2c_get_clientdata(to_i2c_client(dev));
return sprintf(buf, "%u\n", data->operating_mode);
}
static ssize_t tsl2550_store_operating_mode(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct tsl2550_data *data = i2c_get_clientdata(client);
unsigned long val = simple_strtoul(buf, NULL, 10);
int ret;
if (val > 1)
return -EINVAL;
if (data->power_state == 0)
return -EBUSY;
mutex_lock(&data->update_lock);
ret = tsl2550_set_operating_mode(client, val);
mutex_unlock(&data->update_lock);
if (ret < 0)
return ret;
return count;
}
static DEVICE_ATTR(operating_mode, S_IWUSR | S_IRUGO,
tsl2550_show_operating_mode, tsl2550_store_operating_mode);
static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf)
{
struct tsl2550_data *data = i2c_get_clientdata(client);
u8 ch0, ch1;
int ret;
ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC0);
if (ret < 0)
return ret;
ch0 = ret;
ret = tsl2550_get_adc_value(client, TSL2550_READ_ADC1);
if (ret < 0)
return ret;
ch1 = ret;
/* Do the job */
ret = tsl2550_calculate_lux(ch0, ch1);
if (ret < 0)
return ret;
if (data->operating_mode == 1)
ret *= 5;
return sprintf(buf, "%d\n", ret);
}
static ssize_t tsl2550_show_lux1_input(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct tsl2550_data *data = i2c_get_clientdata(client);
int ret;
/* No LUX data if not operational */
if (!data->power_state)
return -EBUSY;
mutex_lock(&data->update_lock);
ret = __tsl2550_show_lux(client, buf);
mutex_unlock(&data->update_lock);
return ret;
}
static DEVICE_ATTR(lux1_input, S_IRUGO,
tsl2550_show_lux1_input, NULL);
static struct attribute *tsl2550_attributes[] = {
&dev_attr_power_state.attr,
&dev_attr_operating_mode.attr,
&dev_attr_lux1_input.attr,
NULL
};
static const struct attribute_group tsl2550_attr_group = {
.attrs = tsl2550_attributes,
};
/*
* Initialization function
*/
static int tsl2550_init_client(struct i2c_client *client)
{
struct tsl2550_data *data = i2c_get_clientdata(client);
int err;
/*
* Probe the chip. To do so we try to power up the device and then to
* read back the 0x03 code
*/
err = i2c_smbus_read_byte_data(client, TSL2550_POWER_UP);
if (err < 0)
return err;
if (err != TSL2550_POWER_UP)
return -ENODEV;
data->power_state = 1;
/* Set the default operating mode */
err = i2c_smbus_write_byte(client,
TSL2550_MODE_RANGE[data->operating_mode]);
if (err < 0)
return err;
return 0;
}
/*
* I2C init/probing/exit functions
*/
static struct i2c_driver tsl2550_driver;
static int tsl2550_probe(struct i2c_client *client)
{
struct i2c_adapter *adapter = client->adapter;
struct tsl2550_data *data;
int *opmode, err = 0;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE
| I2C_FUNC_SMBUS_READ_BYTE_DATA)) {
err = -EIO;
goto exit;
}
data = kzalloc(sizeof(struct tsl2550_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
data->client = client;
i2c_set_clientdata(client, data);
/* Check platform data */
opmode = client->dev.platform_data;
if (opmode) {
if (*opmode < 0 || *opmode > 1) {
dev_err(&client->dev, "invalid operating_mode (%d)\n",
*opmode);
err = -EINVAL;
goto exit_kfree;
}
data->operating_mode = *opmode;
} else
data->operating_mode = 0; /* default mode is standard */
dev_info(&client->dev, "%s operating mode\n",
data->operating_mode ? "extended" : "standard");
mutex_init(&data->update_lock);
/* Initialize the TSL2550 chip */
err = tsl2550_init_client(client);
if (err)
goto exit_kfree;
/* Register sysfs hooks */
err = sysfs_create_group(&client->dev.kobj, &tsl2550_attr_group);
if (err)
goto exit_kfree;
dev_info(&client->dev, "support ver. %s enabled\n", DRIVER_VERSION);
return 0;
exit_kfree:
kfree(data);
exit:
return err;
}
static void tsl2550_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &tsl2550_attr_group);
/* Power down the device */
tsl2550_set_power_state(client, 0);
kfree(i2c_get_clientdata(client));
}
#ifdef CONFIG_PM_SLEEP
static int tsl2550_suspend(struct device *dev)
{
return tsl2550_set_power_state(to_i2c_client(dev), 0);
}
static int tsl2550_resume(struct device *dev)
{
return tsl2550_set_power_state(to_i2c_client(dev), 1);
}
static SIMPLE_DEV_PM_OPS(tsl2550_pm_ops, tsl2550_suspend, tsl2550_resume);
#define TSL2550_PM_OPS (&tsl2550_pm_ops)
#else
#define TSL2550_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static const struct i2c_device_id tsl2550_id[] = {
{ "tsl2550", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tsl2550_id);
static const struct of_device_id tsl2550_of_match[] = {
{ .compatible = "taos,tsl2550" },
{ }
};
MODULE_DEVICE_TABLE(of, tsl2550_of_match);
static struct i2c_driver tsl2550_driver = {
.driver = {
.name = TSL2550_DRV_NAME,
.of_match_table = tsl2550_of_match,
.pm = TSL2550_PM_OPS,
},
.probe = tsl2550_probe,
.remove = tsl2550_remove,
.id_table = tsl2550_id,
};
module_i2c_driver(tsl2550_driver);
MODULE_AUTHOR("Rodolfo Giometti <[email protected]>");
MODULE_DESCRIPTION("TSL2550 ambient light sensor driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
| linux-master | drivers/misc/tsl2550.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Xilinx TMR Inject IP.
*
* Copyright (C) 2022 Advanced Micro Devices, Inc.
*
* Description:
* This driver is developed for TMR Inject IP,The Triple Modular Redundancy(TMR)
* Inject provides fault injection.
*/
#include <asm/xilinx_mb_manager.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/fault-inject.h>
/* TMR Inject Register offsets */
#define XTMR_INJECT_CR_OFFSET 0x0
#define XTMR_INJECT_AIR_OFFSET 0x4
#define XTMR_INJECT_IIR_OFFSET 0xC
#define XTMR_INJECT_EAIR_OFFSET 0x10
#define XTMR_INJECT_ERR_OFFSET 0x204
/* Register Bitmasks/shifts */
#define XTMR_INJECT_CR_CPUID_SHIFT 8
#define XTMR_INJECT_CR_IE_SHIFT 10
#define XTMR_INJECT_IIR_ADDR_MASK GENMASK(31, 16)
#define XTMR_INJECT_MAGIC_MAX_VAL 255
/**
* struct xtmr_inject_dev - Driver data for TMR Inject
* @regs: device physical base address
* @magic: Magic hardware configuration value
*/
struct xtmr_inject_dev {
void __iomem *regs;
u32 magic;
};
static DECLARE_FAULT_ATTR(inject_fault);
static char *inject_request;
module_param(inject_request, charp, 0);
MODULE_PARM_DESC(inject_request, "default fault injection attributes");
static struct dentry *dbgfs_root;
/* IO accessors */
static inline void xtmr_inject_write(struct xtmr_inject_dev *xtmr_inject,
u32 addr, u32 value)
{
iowrite32(value, xtmr_inject->regs + addr);
}
static inline u32 xtmr_inject_read(struct xtmr_inject_dev *xtmr_inject,
u32 addr)
{
return ioread32(xtmr_inject->regs + addr);
}
static int xtmr_inject_set(void *data, u64 val)
{
if (val != 1)
return -EINVAL;
xmb_inject_err();
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(xtmr_inject_fops, NULL, xtmr_inject_set, "%llu\n");
static void xtmr_init_debugfs(struct xtmr_inject_dev *xtmr_inject)
{
struct dentry *dir;
dbgfs_root = debugfs_create_dir("xtmr_inject", NULL);
dir = fault_create_debugfs_attr("inject_fault", dbgfs_root,
&inject_fault);
debugfs_create_file("inject_fault", 0200, dir, NULL,
&xtmr_inject_fops);
}
static void xtmr_inject_init(struct xtmr_inject_dev *xtmr_inject)
{
u32 cr_val;
if (inject_request)
setup_fault_attr(&inject_fault, inject_request);
/* Allow fault injection */
cr_val = xtmr_inject->magic |
(1 << XTMR_INJECT_CR_IE_SHIFT) |
(1 << XTMR_INJECT_CR_CPUID_SHIFT);
xtmr_inject_write(xtmr_inject, XTMR_INJECT_CR_OFFSET,
cr_val);
/* Initialize the address inject and instruction inject registers */
xtmr_inject_write(xtmr_inject, XTMR_INJECT_AIR_OFFSET,
XMB_INJECT_ERR_OFFSET);
xtmr_inject_write(xtmr_inject, XTMR_INJECT_IIR_OFFSET,
XMB_INJECT_ERR_OFFSET & XTMR_INJECT_IIR_ADDR_MASK);
}
/**
* xtmr_inject_probe - Driver probe function
* @pdev: Pointer to the platform_device structure
*
* This is the driver probe routine. It does all the memory
* allocation for the device.
*
* Return: 0 on success and failure value on error
*/
static int xtmr_inject_probe(struct platform_device *pdev)
{
struct xtmr_inject_dev *xtmr_inject;
int err;
xtmr_inject = devm_kzalloc(&pdev->dev, sizeof(*xtmr_inject),
GFP_KERNEL);
if (!xtmr_inject)
return -ENOMEM;
xtmr_inject->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xtmr_inject->regs))
return PTR_ERR(xtmr_inject->regs);
err = of_property_read_u32(pdev->dev.of_node, "xlnx,magic",
&xtmr_inject->magic);
if (err < 0) {
dev_err(&pdev->dev, "unable to read xlnx,magic property");
return err;
}
if (xtmr_inject->magic > XTMR_INJECT_MAGIC_MAX_VAL) {
dev_err(&pdev->dev, "invalid xlnx,magic property value");
return -EINVAL;
}
/* Initialize TMR Inject */
xtmr_inject_init(xtmr_inject);
xtmr_init_debugfs(xtmr_inject);
platform_set_drvdata(pdev, xtmr_inject);
return 0;
}
static int xtmr_inject_remove(struct platform_device *pdev)
{
debugfs_remove_recursive(dbgfs_root);
dbgfs_root = NULL;
return 0;
}
static const struct of_device_id xtmr_inject_of_match[] = {
{
.compatible = "xlnx,tmr-inject-1.0",
},
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, xtmr_inject_of_match);
static struct platform_driver xtmr_inject_driver = {
.driver = {
.name = "xilinx-tmr_inject",
.of_match_table = xtmr_inject_of_match,
},
.probe = xtmr_inject_probe,
.remove = xtmr_inject_remove,
};
module_platform_driver(xtmr_inject_driver);
MODULE_AUTHOR("Advanced Micro Devices, Inc");
MODULE_DESCRIPTION("Xilinx TMR Inject Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/xilinx_tmr_inject.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the Analog Devices digital potentiometers (SPI bus)
*
* Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
*/
#include <linux/spi/spi.h>
#include <linux/module.h>
#include "ad525x_dpot.h"
/* SPI bus functions */
static int write8(void *client, u8 val)
{
u8 data = val;
return spi_write(client, &data, 1);
}
static int write16(void *client, u8 reg, u8 val)
{
u8 data[2] = {reg, val};
return spi_write(client, data, 2);
}
static int write24(void *client, u8 reg, u16 val)
{
u8 data[3] = {reg, val >> 8, val};
return spi_write(client, data, 3);
}
static int read8(void *client)
{
int ret;
u8 data;
ret = spi_read(client, &data, 1);
if (ret < 0)
return ret;
return data;
}
static int read16(void *client, u8 reg)
{
int ret;
u8 buf_rx[2];
write16(client, reg, 0);
ret = spi_read(client, buf_rx, 2);
if (ret < 0)
return ret;
return (buf_rx[0] << 8) | buf_rx[1];
}
static int read24(void *client, u8 reg)
{
int ret;
u8 buf_rx[3];
write24(client, reg, 0);
ret = spi_read(client, buf_rx, 3);
if (ret < 0)
return ret;
return (buf_rx[1] << 8) | buf_rx[2];
}
static const struct ad_dpot_bus_ops bops = {
.read_d8 = read8,
.read_r8d8 = read16,
.read_r8d16 = read24,
.write_d8 = write8,
.write_r8d8 = write16,
.write_r8d16 = write24,
};
static int ad_dpot_spi_probe(struct spi_device *spi)
{
struct ad_dpot_bus_data bdata = {
.client = spi,
.bops = &bops,
};
return ad_dpot_probe(&spi->dev, &bdata,
spi_get_device_id(spi)->driver_data,
spi_get_device_id(spi)->name);
}
static void ad_dpot_spi_remove(struct spi_device *spi)
{
ad_dpot_remove(&spi->dev);
}
static const struct spi_device_id ad_dpot_spi_id[] = {
{"ad5160", AD5160_ID},
{"ad5161", AD5161_ID},
{"ad5162", AD5162_ID},
{"ad5165", AD5165_ID},
{"ad5200", AD5200_ID},
{"ad5201", AD5201_ID},
{"ad5203", AD5203_ID},
{"ad5204", AD5204_ID},
{"ad5206", AD5206_ID},
{"ad5207", AD5207_ID},
{"ad5231", AD5231_ID},
{"ad5232", AD5232_ID},
{"ad5233", AD5233_ID},
{"ad5235", AD5235_ID},
{"ad5260", AD5260_ID},
{"ad5262", AD5262_ID},
{"ad5263", AD5263_ID},
{"ad5290", AD5290_ID},
{"ad5291", AD5291_ID},
{"ad5292", AD5292_ID},
{"ad5293", AD5293_ID},
{"ad7376", AD7376_ID},
{"ad8400", AD8400_ID},
{"ad8402", AD8402_ID},
{"ad8403", AD8403_ID},
{"adn2850", ADN2850_ID},
{"ad5270", AD5270_ID},
{"ad5271", AD5271_ID},
{}
};
MODULE_DEVICE_TABLE(spi, ad_dpot_spi_id);
static struct spi_driver ad_dpot_spi_driver = {
.driver = {
.name = "ad_dpot",
},
.probe = ad_dpot_spi_probe,
.remove = ad_dpot_spi_remove,
.id_table = ad_dpot_spi_id,
};
module_spi_driver(ad_dpot_spi_driver);
MODULE_AUTHOR("Michael Hennerich <[email protected]>");
MODULE_DESCRIPTION("digital potentiometer SPI bus driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:ad_dpot");
| linux-master | drivers/misc/ad525x_dpot-spi.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of the APDS990x sensor driver.
* Chip is combined proximity and ambient light sensor.
*
* Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
*
* Contact: Samu Onkalo <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/slab.h>
#include <linux/platform_data/apds990x.h>
/* Register map */
#define APDS990X_ENABLE 0x00 /* Enable of states and interrupts */
#define APDS990X_ATIME 0x01 /* ALS ADC time */
#define APDS990X_PTIME 0x02 /* Proximity ADC time */
#define APDS990X_WTIME 0x03 /* Wait time */
#define APDS990X_AILTL 0x04 /* ALS interrupt low threshold low byte */
#define APDS990X_AILTH 0x05 /* ALS interrupt low threshold hi byte */
#define APDS990X_AIHTL 0x06 /* ALS interrupt hi threshold low byte */
#define APDS990X_AIHTH 0x07 /* ALS interrupt hi threshold hi byte */
#define APDS990X_PILTL 0x08 /* Proximity interrupt low threshold low byte */
#define APDS990X_PILTH 0x09 /* Proximity interrupt low threshold hi byte */
#define APDS990X_PIHTL 0x0a /* Proximity interrupt hi threshold low byte */
#define APDS990X_PIHTH 0x0b /* Proximity interrupt hi threshold hi byte */
#define APDS990X_PERS 0x0c /* Interrupt persistence filters */
#define APDS990X_CONFIG 0x0d /* Configuration */
#define APDS990X_PPCOUNT 0x0e /* Proximity pulse count */
#define APDS990X_CONTROL 0x0f /* Gain control register */
#define APDS990X_REV 0x11 /* Revision Number */
#define APDS990X_ID 0x12 /* Device ID */
#define APDS990X_STATUS 0x13 /* Device status */
#define APDS990X_CDATAL 0x14 /* Clear ADC low data register */
#define APDS990X_CDATAH 0x15 /* Clear ADC high data register */
#define APDS990X_IRDATAL 0x16 /* IR ADC low data register */
#define APDS990X_IRDATAH 0x17 /* IR ADC high data register */
#define APDS990X_PDATAL 0x18 /* Proximity ADC low data register */
#define APDS990X_PDATAH 0x19 /* Proximity ADC high data register */
/* Control */
#define APDS990X_MAX_AGAIN 3
/* Enable register */
#define APDS990X_EN_PIEN (0x1 << 5)
#define APDS990X_EN_AIEN (0x1 << 4)
#define APDS990X_EN_WEN (0x1 << 3)
#define APDS990X_EN_PEN (0x1 << 2)
#define APDS990X_EN_AEN (0x1 << 1)
#define APDS990X_EN_PON (0x1 << 0)
#define APDS990X_EN_DISABLE_ALL 0
/* Status register */
#define APDS990X_ST_PINT (0x1 << 5)
#define APDS990X_ST_AINT (0x1 << 4)
/* I2C access types */
#define APDS990x_CMD_TYPE_MASK (0x03 << 5)
#define APDS990x_CMD_TYPE_RB (0x00 << 5) /* Repeated byte */
#define APDS990x_CMD_TYPE_INC (0x01 << 5) /* Auto increment */
#define APDS990x_CMD_TYPE_SPE (0x03 << 5) /* Special function */
#define APDS990x_ADDR_SHIFT 0
#define APDS990x_CMD 0x80
/* Interrupt ack commands */
#define APDS990X_INT_ACK_ALS 0x6
#define APDS990X_INT_ACK_PS 0x5
#define APDS990X_INT_ACK_BOTH 0x7
/* ptime */
#define APDS990X_PTIME_DEFAULT 0xff /* Recommended conversion time 2.7ms*/
/* wtime */
#define APDS990X_WTIME_DEFAULT 0xee /* ~50ms wait time */
#define APDS990X_TIME_TO_ADC 1024 /* One timetick as ADC count value */
/* Persistence */
#define APDS990X_APERS_SHIFT 0
#define APDS990X_PPERS_SHIFT 4
/* Supported ID:s */
#define APDS990X_ID_0 0x0
#define APDS990X_ID_4 0x4
#define APDS990X_ID_29 0x29
/* pgain and pdiode settings */
#define APDS_PGAIN_1X 0x0
#define APDS_PDIODE_IR 0x2
#define APDS990X_LUX_OUTPUT_SCALE 10
/* Reverse chip factors for threshold calculation */
struct reverse_factors {
u32 afactor;
int cf1;
int irf1;
int cf2;
int irf2;
};
struct apds990x_chip {
struct apds990x_platform_data *pdata;
struct i2c_client *client;
struct mutex mutex; /* avoid parallel access */
struct regulator_bulk_data regs[2];
wait_queue_head_t wait;
int prox_en;
bool prox_continuous_mode;
bool lux_wait_fresh_res;
/* Chip parameters */
struct apds990x_chip_factors cf;
struct reverse_factors rcf;
u16 atime; /* als integration time */
u16 arate; /* als reporting rate */
u16 a_max_result; /* Max possible ADC value with current atime */
u8 again_meas; /* Gain used in last measurement */
u8 again_next; /* Next calculated gain */
u8 pgain;
u8 pdiode;
u8 pdrive;
u8 lux_persistence;
u8 prox_persistence;
u32 lux_raw;
u32 lux;
u16 lux_clear;
u16 lux_ir;
u16 lux_calib;
u32 lux_thres_hi;
u32 lux_thres_lo;
u32 prox_thres;
u16 prox_data;
u16 prox_calib;
char chipname[10];
u8 revision;
};
#define APDS_CALIB_SCALER 8192
#define APDS_LUX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER)
#define APDS_PROX_NEUTRAL_CALIB_VALUE (1 * APDS_CALIB_SCALER)
#define APDS_PROX_DEF_THRES 600
#define APDS_PROX_HYSTERESIS 50
#define APDS_LUX_DEF_THRES_HI 101
#define APDS_LUX_DEF_THRES_LO 100
#define APDS_DEFAULT_PROX_PERS 1
#define APDS_TIMEOUT 2000
#define APDS_STARTUP_DELAY 25000 /* us */
#define APDS_RANGE 65535
#define APDS_PROX_RANGE 1023
#define APDS_LUX_GAIN_LO_LIMIT 100
#define APDS_LUX_GAIN_LO_LIMIT_STRICT 25
#define TIMESTEP 87 /* 2.7ms is about 87 / 32 */
#define TIME_STEP_SCALER 32
#define APDS_LUX_AVERAGING_TIME 50 /* tolerates 50/60Hz ripple */
#define APDS_LUX_DEFAULT_RATE 200
static const u8 again[] = {1, 8, 16, 120}; /* ALS gain steps */
/* Following two tables must match i.e 10Hz rate means 1 as persistence value */
static const u16 arates_hz[] = {10, 5, 2, 1};
static const u8 apersis[] = {1, 2, 4, 5};
/* Regulators */
static const char reg_vcc[] = "Vdd";
static const char reg_vled[] = "Vled";
static int apds990x_read_byte(struct apds990x_chip *chip, u8 reg, u8 *data)
{
struct i2c_client *client = chip->client;
s32 ret;
reg &= ~APDS990x_CMD_TYPE_MASK;
reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB;
ret = i2c_smbus_read_byte_data(client, reg);
*data = ret;
return (int)ret;
}
static int apds990x_read_word(struct apds990x_chip *chip, u8 reg, u16 *data)
{
struct i2c_client *client = chip->client;
s32 ret;
reg &= ~APDS990x_CMD_TYPE_MASK;
reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC;
ret = i2c_smbus_read_word_data(client, reg);
*data = ret;
return (int)ret;
}
static int apds990x_write_byte(struct apds990x_chip *chip, u8 reg, u8 data)
{
struct i2c_client *client = chip->client;
s32 ret;
reg &= ~APDS990x_CMD_TYPE_MASK;
reg |= APDS990x_CMD | APDS990x_CMD_TYPE_RB;
ret = i2c_smbus_write_byte_data(client, reg, data);
return (int)ret;
}
static int apds990x_write_word(struct apds990x_chip *chip, u8 reg, u16 data)
{
struct i2c_client *client = chip->client;
s32 ret;
reg &= ~APDS990x_CMD_TYPE_MASK;
reg |= APDS990x_CMD | APDS990x_CMD_TYPE_INC;
ret = i2c_smbus_write_word_data(client, reg, data);
return (int)ret;
}
static int apds990x_mode_on(struct apds990x_chip *chip)
{
/* ALS is mandatory, proximity optional */
u8 reg = APDS990X_EN_AIEN | APDS990X_EN_PON | APDS990X_EN_AEN |
APDS990X_EN_WEN;
if (chip->prox_en)
reg |= APDS990X_EN_PIEN | APDS990X_EN_PEN;
return apds990x_write_byte(chip, APDS990X_ENABLE, reg);
}
static u16 apds990x_lux_to_threshold(struct apds990x_chip *chip, u32 lux)
{
u32 thres;
u32 cpl;
u32 ir;
if (lux == 0)
return 0;
else if (lux == APDS_RANGE)
return APDS_RANGE;
/*
* Reported LUX value is a combination of the IR and CLEAR channel
* values. However, interrupt threshold is only for clear channel.
* This function approximates needed HW threshold value for a given
* LUX value in the current lightning type.
* IR level compared to visible light varies heavily depending on the
* source of the light
*
* Calculate threshold value for the next measurement period.
* Math: threshold = lux * cpl where
* cpl = atime * again / (glass_attenuation * device_factor)
* (count-per-lux)
*
* First remove calibration. Division by four is to avoid overflow
*/
lux = lux * (APDS_CALIB_SCALER / 4) / (chip->lux_calib / 4);
/* Multiplication by 64 is to increase accuracy */
cpl = ((u32)chip->atime * (u32)again[chip->again_next] *
APDS_PARAM_SCALE * 64) / (chip->cf.ga * chip->cf.df);
thres = lux * cpl / 64;
/*
* Convert IR light from the latest result to match with
* new gain step. This helps to adapt with the current
* source of light.
*/
ir = (u32)chip->lux_ir * (u32)again[chip->again_next] /
(u32)again[chip->again_meas];
/*
* Compensate count with IR light impact
* IAC1 > IAC2 (see apds990x_get_lux for formulas)
*/
if (chip->lux_clear * APDS_PARAM_SCALE >=
chip->rcf.afactor * chip->lux_ir)
thres = (chip->rcf.cf1 * thres + chip->rcf.irf1 * ir) /
APDS_PARAM_SCALE;
else
thres = (chip->rcf.cf2 * thres + chip->rcf.irf2 * ir) /
APDS_PARAM_SCALE;
if (thres >= chip->a_max_result)
thres = chip->a_max_result - 1;
return thres;
}
static inline int apds990x_set_atime(struct apds990x_chip *chip, u32 time_ms)
{
u8 reg_value;
chip->atime = time_ms;
/* Formula is specified in the data sheet */
reg_value = 256 - ((time_ms * TIME_STEP_SCALER) / TIMESTEP);
/* Calculate max ADC value for given integration time */
chip->a_max_result = (u16)(256 - reg_value) * APDS990X_TIME_TO_ADC;
return apds990x_write_byte(chip, APDS990X_ATIME, reg_value);
}
/* Called always with mutex locked */
static int apds990x_refresh_pthres(struct apds990x_chip *chip, int data)
{
int ret, lo, hi;
/* If the chip is not in use, don't try to access it */
if (pm_runtime_suspended(&chip->client->dev))
return 0;
if (data < chip->prox_thres) {
lo = 0;
hi = chip->prox_thres;
} else {
lo = chip->prox_thres - APDS_PROX_HYSTERESIS;
if (chip->prox_continuous_mode)
hi = chip->prox_thres;
else
hi = APDS_RANGE;
}
ret = apds990x_write_word(chip, APDS990X_PILTL, lo);
ret |= apds990x_write_word(chip, APDS990X_PIHTL, hi);
return ret;
}
/* Called always with mutex locked */
static int apds990x_refresh_athres(struct apds990x_chip *chip)
{
int ret;
/* If the chip is not in use, don't try to access it */
if (pm_runtime_suspended(&chip->client->dev))
return 0;
ret = apds990x_write_word(chip, APDS990X_AILTL,
apds990x_lux_to_threshold(chip, chip->lux_thres_lo));
ret |= apds990x_write_word(chip, APDS990X_AIHTL,
apds990x_lux_to_threshold(chip, chip->lux_thres_hi));
return ret;
}
/* Called always with mutex locked */
static void apds990x_force_a_refresh(struct apds990x_chip *chip)
{
/* This will force ALS interrupt after the next measurement. */
apds990x_write_word(chip, APDS990X_AILTL, APDS_LUX_DEF_THRES_LO);
apds990x_write_word(chip, APDS990X_AIHTL, APDS_LUX_DEF_THRES_HI);
}
/* Called always with mutex locked */
static void apds990x_force_p_refresh(struct apds990x_chip *chip)
{
/* This will force proximity interrupt after the next measurement. */
apds990x_write_word(chip, APDS990X_PILTL, APDS_PROX_DEF_THRES - 1);
apds990x_write_word(chip, APDS990X_PIHTL, APDS_PROX_DEF_THRES);
}
/* Called always with mutex locked */
static int apds990x_calc_again(struct apds990x_chip *chip)
{
int curr_again = chip->again_meas;
int next_again = chip->again_meas;
int ret = 0;
/* Calculate suitable als gain */
if (chip->lux_clear == chip->a_max_result)
next_again -= 2; /* ALS saturated. Decrease gain by 2 steps */
else if (chip->lux_clear > chip->a_max_result / 2)
next_again--;
else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT)
next_again += 2; /* Too dark. Increase gain by 2 steps */
else if (chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT)
next_again++;
/* Limit gain to available range */
if (next_again < 0)
next_again = 0;
else if (next_again > APDS990X_MAX_AGAIN)
next_again = APDS990X_MAX_AGAIN;
/* Let's check can we trust the measured result */
if (chip->lux_clear == chip->a_max_result)
/* Result can be totally garbage due to saturation */
ret = -ERANGE;
else if (next_again != curr_again &&
chip->lux_clear < APDS_LUX_GAIN_LO_LIMIT_STRICT)
/*
* Gain is changed and measurement result is very small.
* Result can be totally garbage due to underflow
*/
ret = -ERANGE;
chip->again_next = next_again;
apds990x_write_byte(chip, APDS990X_CONTROL,
(chip->pdrive << 6) |
(chip->pdiode << 4) |
(chip->pgain << 2) |
(chip->again_next << 0));
/*
* Error means bad result -> re-measurement is needed. The forced
* refresh uses fastest possible persistence setting to get result
* as soon as possible.
*/
if (ret < 0)
apds990x_force_a_refresh(chip);
else
apds990x_refresh_athres(chip);
return ret;
}
/* Called always with mutex locked */
static int apds990x_get_lux(struct apds990x_chip *chip, int clear, int ir)
{
int iac, iac1, iac2; /* IR adjusted counts */
u32 lpc; /* Lux per count */
/* Formulas:
* iac1 = CF1 * CLEAR_CH - IRF1 * IR_CH
* iac2 = CF2 * CLEAR_CH - IRF2 * IR_CH
*/
iac1 = (chip->cf.cf1 * clear - chip->cf.irf1 * ir) / APDS_PARAM_SCALE;
iac2 = (chip->cf.cf2 * clear - chip->cf.irf2 * ir) / APDS_PARAM_SCALE;
iac = max(iac1, iac2);
iac = max(iac, 0);
lpc = APDS990X_LUX_OUTPUT_SCALE * (chip->cf.df * chip->cf.ga) /
(u32)(again[chip->again_meas] * (u32)chip->atime);
return (iac * lpc) / APDS_PARAM_SCALE;
}
static int apds990x_ack_int(struct apds990x_chip *chip, u8 mode)
{
struct i2c_client *client = chip->client;
s32 ret;
u8 reg = APDS990x_CMD | APDS990x_CMD_TYPE_SPE;
switch (mode & (APDS990X_ST_AINT | APDS990X_ST_PINT)) {
case APDS990X_ST_AINT:
reg |= APDS990X_INT_ACK_ALS;
break;
case APDS990X_ST_PINT:
reg |= APDS990X_INT_ACK_PS;
break;
default:
reg |= APDS990X_INT_ACK_BOTH;
break;
}
ret = i2c_smbus_read_byte_data(client, reg);
return (int)ret;
}
static irqreturn_t apds990x_irq(int irq, void *data)
{
struct apds990x_chip *chip = data;
u8 status;
apds990x_read_byte(chip, APDS990X_STATUS, &status);
apds990x_ack_int(chip, status);
mutex_lock(&chip->mutex);
if (!pm_runtime_suspended(&chip->client->dev)) {
if (status & APDS990X_ST_AINT) {
apds990x_read_word(chip, APDS990X_CDATAL,
&chip->lux_clear);
apds990x_read_word(chip, APDS990X_IRDATAL,
&chip->lux_ir);
/* Store used gain for calculations */
chip->again_meas = chip->again_next;
chip->lux_raw = apds990x_get_lux(chip,
chip->lux_clear,
chip->lux_ir);
if (apds990x_calc_again(chip) == 0) {
/* Result is valid */
chip->lux = chip->lux_raw;
chip->lux_wait_fresh_res = false;
wake_up(&chip->wait);
sysfs_notify(&chip->client->dev.kobj,
NULL, "lux0_input");
}
}
if ((status & APDS990X_ST_PINT) && chip->prox_en) {
u16 clr_ch;
apds990x_read_word(chip, APDS990X_CDATAL, &clr_ch);
/*
* If ALS channel is saturated at min gain,
* proximity gives false posivite values.
* Just ignore them.
*/
if (chip->again_meas == 0 &&
clr_ch == chip->a_max_result)
chip->prox_data = 0;
else
apds990x_read_word(chip,
APDS990X_PDATAL,
&chip->prox_data);
apds990x_refresh_pthres(chip, chip->prox_data);
if (chip->prox_data < chip->prox_thres)
chip->prox_data = 0;
else if (!chip->prox_continuous_mode)
chip->prox_data = APDS_PROX_RANGE;
sysfs_notify(&chip->client->dev.kobj,
NULL, "prox0_raw");
}
}
mutex_unlock(&chip->mutex);
return IRQ_HANDLED;
}
static int apds990x_configure(struct apds990x_chip *chip)
{
/* It is recommended to use disabled mode during these operations */
apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL);
/* conversion and wait times for different state machince states */
apds990x_write_byte(chip, APDS990X_PTIME, APDS990X_PTIME_DEFAULT);
apds990x_write_byte(chip, APDS990X_WTIME, APDS990X_WTIME_DEFAULT);
apds990x_set_atime(chip, APDS_LUX_AVERAGING_TIME);
apds990x_write_byte(chip, APDS990X_CONFIG, 0);
/* Persistence levels */
apds990x_write_byte(chip, APDS990X_PERS,
(chip->lux_persistence << APDS990X_APERS_SHIFT) |
(chip->prox_persistence << APDS990X_PPERS_SHIFT));
apds990x_write_byte(chip, APDS990X_PPCOUNT, chip->pdata->ppcount);
/* Start with relatively small gain */
chip->again_meas = 1;
chip->again_next = 1;
apds990x_write_byte(chip, APDS990X_CONTROL,
(chip->pdrive << 6) |
(chip->pdiode << 4) |
(chip->pgain << 2) |
(chip->again_next << 0));
return 0;
}
static int apds990x_detect(struct apds990x_chip *chip)
{
struct i2c_client *client = chip->client;
int ret;
u8 id;
ret = apds990x_read_byte(chip, APDS990X_ID, &id);
if (ret < 0) {
dev_err(&client->dev, "ID read failed\n");
return ret;
}
ret = apds990x_read_byte(chip, APDS990X_REV, &chip->revision);
if (ret < 0) {
dev_err(&client->dev, "REV read failed\n");
return ret;
}
switch (id) {
case APDS990X_ID_0:
case APDS990X_ID_4:
case APDS990X_ID_29:
snprintf(chip->chipname, sizeof(chip->chipname), "APDS-990x");
break;
default:
ret = -ENODEV;
break;
}
return ret;
}
#ifdef CONFIG_PM
static int apds990x_chip_on(struct apds990x_chip *chip)
{
int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
chip->regs);
if (err < 0)
return err;
usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY);
/* Refresh all configs in case of regulators were off */
chip->prox_data = 0;
apds990x_configure(chip);
apds990x_mode_on(chip);
return 0;
}
#endif
static int apds990x_chip_off(struct apds990x_chip *chip)
{
apds990x_write_byte(chip, APDS990X_ENABLE, APDS990X_EN_DISABLE_ALL);
regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
return 0;
}
static ssize_t apds990x_lux_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
ssize_t ret;
u32 result;
long timeout;
if (pm_runtime_suspended(dev))
return -EIO;
timeout = wait_event_interruptible_timeout(chip->wait,
!chip->lux_wait_fresh_res,
msecs_to_jiffies(APDS_TIMEOUT));
if (!timeout)
return -EIO;
mutex_lock(&chip->mutex);
result = (chip->lux * chip->lux_calib) / APDS_CALIB_SCALER;
if (result > (APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE))
result = APDS_RANGE * APDS990X_LUX_OUTPUT_SCALE;
ret = sprintf(buf, "%d.%d\n",
result / APDS990X_LUX_OUTPUT_SCALE,
result % APDS990X_LUX_OUTPUT_SCALE);
mutex_unlock(&chip->mutex);
return ret;
}
static DEVICE_ATTR(lux0_input, S_IRUGO, apds990x_lux_show, NULL);
static ssize_t apds990x_lux_range_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", APDS_RANGE);
}
static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, apds990x_lux_range_show, NULL);
static ssize_t apds990x_lux_calib_format_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", APDS_CALIB_SCALER);
}
static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
apds990x_lux_calib_format_show, NULL);
static ssize_t apds990x_lux_calib_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", chip->lux_calib);
}
static ssize_t apds990x_lux_calib_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
chip->lux_calib = value;
return len;
}
static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, apds990x_lux_calib_show,
apds990x_lux_calib_store);
static ssize_t apds990x_rate_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i;
int pos = 0;
for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
pos += sprintf(buf + pos, "%d ", arates_hz[i]);
sprintf(buf + pos - 1, "\n");
return pos;
}
static ssize_t apds990x_rate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", chip->arate);
}
static int apds990x_set_arate(struct apds990x_chip *chip, int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(arates_hz); i++)
if (rate >= arates_hz[i])
break;
if (i == ARRAY_SIZE(arates_hz))
return -EINVAL;
/* Pick up corresponding persistence value */
chip->lux_persistence = apersis[i];
chip->arate = arates_hz[i];
/* If the chip is not in use, don't try to access it */
if (pm_runtime_suspended(&chip->client->dev))
return 0;
/* Persistence levels */
return apds990x_write_byte(chip, APDS990X_PERS,
(chip->lux_persistence << APDS990X_APERS_SHIFT) |
(chip->prox_persistence << APDS990X_PPERS_SHIFT));
}
static ssize_t apds990x_rate_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&chip->mutex);
ret = apds990x_set_arate(chip, value);
mutex_unlock(&chip->mutex);
if (ret < 0)
return ret;
return len;
}
static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, apds990x_rate_avail, NULL);
static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, apds990x_rate_show,
apds990x_rate_store);
static ssize_t apds990x_prox_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret;
struct apds990x_chip *chip = dev_get_drvdata(dev);
if (pm_runtime_suspended(dev) || !chip->prox_en)
return -EIO;
mutex_lock(&chip->mutex);
ret = sprintf(buf, "%d\n", chip->prox_data);
mutex_unlock(&chip->mutex);
return ret;
}
static DEVICE_ATTR(prox0_raw, S_IRUGO, apds990x_prox_show, NULL);
static ssize_t apds990x_prox_range_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", APDS_PROX_RANGE);
}
static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, apds990x_prox_range_show, NULL);
static ssize_t apds990x_prox_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", chip->prox_en);
}
static ssize_t apds990x_prox_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&chip->mutex);
if (!chip->prox_en)
chip->prox_data = 0;
if (value)
chip->prox_en++;
else if (chip->prox_en > 0)
chip->prox_en--;
if (!pm_runtime_suspended(dev))
apds990x_mode_on(chip);
mutex_unlock(&chip->mutex);
return len;
}
static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, apds990x_prox_enable_show,
apds990x_prox_enable_store);
static const char *reporting_modes[] = {"trigger", "periodic"};
static ssize_t apds990x_prox_reporting_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%s\n",
reporting_modes[!!chip->prox_continuous_mode]);
}
static ssize_t apds990x_prox_reporting_mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
int ret;
ret = sysfs_match_string(reporting_modes, buf);
if (ret < 0)
return ret;
chip->prox_continuous_mode = ret;
return len;
}
static DEVICE_ATTR(prox0_reporting_mode, S_IRUGO | S_IWUSR,
apds990x_prox_reporting_mode_show,
apds990x_prox_reporting_mode_store);
static ssize_t apds990x_prox_reporting_avail_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s %s\n", reporting_modes[0], reporting_modes[1]);
}
static DEVICE_ATTR(prox0_reporting_mode_avail, S_IRUGO | S_IWUSR,
apds990x_prox_reporting_avail_show, NULL);
static ssize_t apds990x_lux_thresh_above_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", chip->lux_thres_hi);
}
static ssize_t apds990x_lux_thresh_below_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", chip->lux_thres_lo);
}
static ssize_t apds990x_set_lux_thresh(struct apds990x_chip *chip, u32 *target,
const char *buf)
{
unsigned long thresh;
int ret;
ret = kstrtoul(buf, 0, &thresh);
if (ret)
return ret;
if (thresh > APDS_RANGE)
return -EINVAL;
mutex_lock(&chip->mutex);
*target = thresh;
/*
* Don't update values in HW if we are still waiting for
* first interrupt to come after device handle open call.
*/
if (!chip->lux_wait_fresh_res)
apds990x_refresh_athres(chip);
mutex_unlock(&chip->mutex);
return ret;
}
static ssize_t apds990x_lux_thresh_above_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_hi, buf);
if (ret < 0)
return ret;
return len;
}
static ssize_t apds990x_lux_thresh_below_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
int ret = apds990x_set_lux_thresh(chip, &chip->lux_thres_lo, buf);
if (ret < 0)
return ret;
return len;
}
static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
apds990x_lux_thresh_above_show,
apds990x_lux_thresh_above_store);
static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
apds990x_lux_thresh_below_show,
apds990x_lux_thresh_below_store);
static ssize_t apds990x_prox_threshold_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", chip->prox_thres);
}
static ssize_t apds990x_prox_threshold_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
if ((value > APDS_RANGE) || (value == 0) ||
(value < APDS_PROX_HYSTERESIS))
return -EINVAL;
mutex_lock(&chip->mutex);
chip->prox_thres = value;
apds990x_force_p_refresh(chip);
mutex_unlock(&chip->mutex);
return len;
}
static DEVICE_ATTR(prox0_thresh_above_value, S_IRUGO | S_IWUSR,
apds990x_prox_threshold_show,
apds990x_prox_threshold_store);
static ssize_t apds990x_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
return 0;
}
static ssize_t apds990x_power_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
if (value) {
pm_runtime_get_sync(dev);
mutex_lock(&chip->mutex);
chip->lux_wait_fresh_res = true;
apds990x_force_a_refresh(chip);
apds990x_force_p_refresh(chip);
mutex_unlock(&chip->mutex);
} else {
if (!pm_runtime_suspended(dev))
pm_runtime_put(dev);
}
return len;
}
static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR,
apds990x_power_state_show,
apds990x_power_state_store);
static ssize_t apds990x_chip_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct apds990x_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%s %d\n", chip->chipname, chip->revision);
}
static DEVICE_ATTR(chip_id, S_IRUGO, apds990x_chip_id_show, NULL);
static struct attribute *sysfs_attrs_ctrl[] = {
&dev_attr_lux0_calibscale.attr,
&dev_attr_lux0_calibscale_default.attr,
&dev_attr_lux0_input.attr,
&dev_attr_lux0_sensor_range.attr,
&dev_attr_lux0_rate.attr,
&dev_attr_lux0_rate_avail.attr,
&dev_attr_lux0_thresh_above_value.attr,
&dev_attr_lux0_thresh_below_value.attr,
&dev_attr_prox0_raw_en.attr,
&dev_attr_prox0_raw.attr,
&dev_attr_prox0_sensor_range.attr,
&dev_attr_prox0_thresh_above_value.attr,
&dev_attr_prox0_reporting_mode.attr,
&dev_attr_prox0_reporting_mode_avail.attr,
&dev_attr_chip_id.attr,
&dev_attr_power_state.attr,
NULL
};
static const struct attribute_group apds990x_attribute_group[] = {
{.attrs = sysfs_attrs_ctrl },
};
static int apds990x_probe(struct i2c_client *client)
{
struct apds990x_chip *chip;
int err;
chip = kzalloc(sizeof *chip, GFP_KERNEL);
if (!chip)
return -ENOMEM;
i2c_set_clientdata(client, chip);
chip->client = client;
init_waitqueue_head(&chip->wait);
mutex_init(&chip->mutex);
chip->pdata = client->dev.platform_data;
if (chip->pdata == NULL) {
dev_err(&client->dev, "platform data is mandatory\n");
err = -EINVAL;
goto fail1;
}
if (chip->pdata->cf.ga == 0) {
/* set uncovered sensor default parameters */
chip->cf.ga = 1966; /* 0.48 * APDS_PARAM_SCALE */
chip->cf.cf1 = 4096; /* 1.00 * APDS_PARAM_SCALE */
chip->cf.irf1 = 9134; /* 2.23 * APDS_PARAM_SCALE */
chip->cf.cf2 = 2867; /* 0.70 * APDS_PARAM_SCALE */
chip->cf.irf2 = 5816; /* 1.42 * APDS_PARAM_SCALE */
chip->cf.df = 52;
} else {
chip->cf = chip->pdata->cf;
}
/* precalculate inverse chip factors for threshold control */
chip->rcf.afactor =
(chip->cf.irf1 - chip->cf.irf2) * APDS_PARAM_SCALE /
(chip->cf.cf1 - chip->cf.cf2);
chip->rcf.cf1 = APDS_PARAM_SCALE * APDS_PARAM_SCALE /
chip->cf.cf1;
chip->rcf.irf1 = chip->cf.irf1 * APDS_PARAM_SCALE /
chip->cf.cf1;
chip->rcf.cf2 = APDS_PARAM_SCALE * APDS_PARAM_SCALE /
chip->cf.cf2;
chip->rcf.irf2 = chip->cf.irf2 * APDS_PARAM_SCALE /
chip->cf.cf2;
/* Set something to start with */
chip->lux_thres_hi = APDS_LUX_DEF_THRES_HI;
chip->lux_thres_lo = APDS_LUX_DEF_THRES_LO;
chip->lux_calib = APDS_LUX_NEUTRAL_CALIB_VALUE;
chip->prox_thres = APDS_PROX_DEF_THRES;
chip->pdrive = chip->pdata->pdrive;
chip->pdiode = APDS_PDIODE_IR;
chip->pgain = APDS_PGAIN_1X;
chip->prox_calib = APDS_PROX_NEUTRAL_CALIB_VALUE;
chip->prox_persistence = APDS_DEFAULT_PROX_PERS;
chip->prox_continuous_mode = false;
chip->regs[0].supply = reg_vcc;
chip->regs[1].supply = reg_vled;
err = regulator_bulk_get(&client->dev,
ARRAY_SIZE(chip->regs), chip->regs);
if (err < 0) {
dev_err(&client->dev, "Cannot get regulators\n");
goto fail1;
}
err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), chip->regs);
if (err < 0) {
dev_err(&client->dev, "Cannot enable regulators\n");
goto fail2;
}
usleep_range(APDS_STARTUP_DELAY, 2 * APDS_STARTUP_DELAY);
err = apds990x_detect(chip);
if (err < 0) {
dev_err(&client->dev, "APDS990X not found\n");
goto fail3;
}
pm_runtime_set_active(&client->dev);
apds990x_configure(chip);
apds990x_set_arate(chip, APDS_LUX_DEFAULT_RATE);
apds990x_mode_on(chip);
pm_runtime_enable(&client->dev);
if (chip->pdata->setup_resources) {
err = chip->pdata->setup_resources();
if (err) {
err = -EINVAL;
goto fail3;
}
}
err = sysfs_create_group(&chip->client->dev.kobj,
apds990x_attribute_group);
if (err < 0) {
dev_err(&chip->client->dev, "Sysfs registration failed\n");
goto fail4;
}
err = request_threaded_irq(client->irq, NULL,
apds990x_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW |
IRQF_ONESHOT,
"apds990x", chip);
if (err) {
dev_err(&client->dev, "could not get IRQ %d\n",
client->irq);
goto fail5;
}
return err;
fail5:
sysfs_remove_group(&chip->client->dev.kobj,
&apds990x_attribute_group[0]);
fail4:
if (chip->pdata && chip->pdata->release_resources)
chip->pdata->release_resources();
fail3:
regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
fail2:
regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
fail1:
kfree(chip);
return err;
}
static void apds990x_remove(struct i2c_client *client)
{
struct apds990x_chip *chip = i2c_get_clientdata(client);
free_irq(client->irq, chip);
sysfs_remove_group(&chip->client->dev.kobj,
apds990x_attribute_group);
if (chip->pdata && chip->pdata->release_resources)
chip->pdata->release_resources();
if (!pm_runtime_suspended(&client->dev))
apds990x_chip_off(chip);
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
regulator_bulk_free(ARRAY_SIZE(chip->regs), chip->regs);
kfree(chip);
}
#ifdef CONFIG_PM_SLEEP
static int apds990x_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct apds990x_chip *chip = i2c_get_clientdata(client);
apds990x_chip_off(chip);
return 0;
}
static int apds990x_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct apds990x_chip *chip = i2c_get_clientdata(client);
/*
* If we were enabled at suspend time, it is expected
* everything works nice and smoothly. Chip_on is enough
*/
apds990x_chip_on(chip);
return 0;
}
#endif
#ifdef CONFIG_PM
static int apds990x_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct apds990x_chip *chip = i2c_get_clientdata(client);
apds990x_chip_off(chip);
return 0;
}
static int apds990x_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct apds990x_chip *chip = i2c_get_clientdata(client);
apds990x_chip_on(chip);
return 0;
}
#endif
static const struct i2c_device_id apds990x_id[] = {
{"apds990x", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, apds990x_id);
static const struct dev_pm_ops apds990x_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(apds990x_suspend, apds990x_resume)
SET_RUNTIME_PM_OPS(apds990x_runtime_suspend,
apds990x_runtime_resume,
NULL)
};
static struct i2c_driver apds990x_driver = {
.driver = {
.name = "apds990x",
.pm = &apds990x_pm_ops,
},
.probe = apds990x_probe,
.remove = apds990x_remove,
.id_table = apds990x_id,
};
module_i2c_driver(apds990x_driver);
MODULE_DESCRIPTION("APDS990X combined ALS and proximity sensor");
MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/apds990x.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Device driver for irqs in HISI PMIC IC
*
* Copyright (c) 2013 Linaro Ltd.
* Copyright (c) 2011 Hisilicon.
* Copyright (c) 2020-2021 Huawei Technologies Co., Ltd.
*/
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/irqdomain.h>
#include <linux/regmap.h>
struct hi6421v600_irq {
struct device *dev;
struct irq_domain *domain;
int irq;
unsigned int *irqs;
struct regmap *regmap;
/* Protect IRQ mask changes */
spinlock_t lock;
};
enum hi6421v600_irq_list {
OTMP = 0,
VBUS_CONNECT,
VBUS_DISCONNECT,
ALARMON_R,
HOLD_6S,
HOLD_1S,
POWERKEY_UP,
POWERKEY_DOWN,
OCP_SCP_R,
COUL_R,
SIM0_HPD_R,
SIM0_HPD_F,
SIM1_HPD_R,
SIM1_HPD_F,
PMIC_IRQ_LIST_MAX
};
#define HISI_IRQ_BANK_SIZE 2
/*
* IRQ number for the power key button and mask for both UP and DOWN IRQs
*/
#define HISI_POWERKEY_IRQ_NUM 0
#define HISI_IRQ_POWERKEY_UP_DOWN (BIT(POWERKEY_DOWN) | BIT(POWERKEY_UP))
/*
* Registers for IRQ address and IRQ mask bits
*
* Please notice that we need to regmap a larger region, as other
* registers are used by the irqs.
* See drivers/irq/hi6421-irq.c.
*/
#define SOC_PMIC_IRQ_MASK_0_ADDR 0x0202
#define SOC_PMIC_IRQ0_ADDR 0x0212
/*
* The IRQs are mapped as:
*
* ====================== ============= ============ =====
* IRQ MASK REGISTER IRQ REGISTER BIT
* ====================== ============= ============ =====
* OTMP 0x0202 0x212 bit 0
* VBUS_CONNECT 0x0202 0x212 bit 1
* VBUS_DISCONNECT 0x0202 0x212 bit 2
* ALARMON_R 0x0202 0x212 bit 3
* HOLD_6S 0x0202 0x212 bit 4
* HOLD_1S 0x0202 0x212 bit 5
* POWERKEY_UP 0x0202 0x212 bit 6
* POWERKEY_DOWN 0x0202 0x212 bit 7
*
* OCP_SCP_R 0x0203 0x213 bit 0
* COUL_R 0x0203 0x213 bit 1
* SIM0_HPD_R 0x0203 0x213 bit 2
* SIM0_HPD_F 0x0203 0x213 bit 3
* SIM1_HPD_R 0x0203 0x213 bit 4
* SIM1_HPD_F 0x0203 0x213 bit 5
* ====================== ============= ============ =====
*
* Each mask register contains 8 bits. The ancillary macros below
* convert a number from 0 to 14 into a register address and a bit mask
*/
#define HISI_IRQ_MASK_REG(irq_data) (SOC_PMIC_IRQ_MASK_0_ADDR + \
(irqd_to_hwirq(irq_data) / BITS_PER_BYTE))
#define HISI_IRQ_MASK_BIT(irq_data) BIT(irqd_to_hwirq(irq_data) & (BITS_PER_BYTE - 1))
#define HISI_8BITS_MASK 0xff
static irqreturn_t hi6421v600_irq_handler(int irq, void *__priv)
{
struct hi6421v600_irq *priv = __priv;
unsigned long pending;
unsigned int in;
int i, offset;
for (i = 0; i < HISI_IRQ_BANK_SIZE; i++) {
regmap_read(priv->regmap, SOC_PMIC_IRQ0_ADDR + i, &in);
/* Mark pending IRQs as handled */
regmap_write(priv->regmap, SOC_PMIC_IRQ0_ADDR + i, in);
pending = in & HISI_8BITS_MASK;
if (i == HISI_POWERKEY_IRQ_NUM &&
(pending & HISI_IRQ_POWERKEY_UP_DOWN) == HISI_IRQ_POWERKEY_UP_DOWN) {
/*
* If both powerkey down and up IRQs are received,
* handle them at the right order
*/
generic_handle_irq_safe(priv->irqs[POWERKEY_DOWN]);
generic_handle_irq_safe(priv->irqs[POWERKEY_UP]);
pending &= ~HISI_IRQ_POWERKEY_UP_DOWN;
}
if (!pending)
continue;
for_each_set_bit(offset, &pending, BITS_PER_BYTE) {
generic_handle_irq_safe(priv->irqs[offset + i * BITS_PER_BYTE]);
}
}
return IRQ_HANDLED;
}
static void hi6421v600_irq_mask(struct irq_data *d)
{
struct hi6421v600_irq *priv = irq_data_get_irq_chip_data(d);
unsigned long flags;
unsigned int data;
u32 offset;
offset = HISI_IRQ_MASK_REG(d);
spin_lock_irqsave(&priv->lock, flags);
regmap_read(priv->regmap, offset, &data);
data |= HISI_IRQ_MASK_BIT(d);
regmap_write(priv->regmap, offset, data);
spin_unlock_irqrestore(&priv->lock, flags);
}
static void hi6421v600_irq_unmask(struct irq_data *d)
{
struct hi6421v600_irq *priv = irq_data_get_irq_chip_data(d);
u32 data, offset;
unsigned long flags;
offset = HISI_IRQ_MASK_REG(d);
spin_lock_irqsave(&priv->lock, flags);
regmap_read(priv->regmap, offset, &data);
data &= ~HISI_IRQ_MASK_BIT(d);
regmap_write(priv->regmap, offset, data);
spin_unlock_irqrestore(&priv->lock, flags);
}
static struct irq_chip hi6421v600_pmu_irqchip = {
.name = "hi6421v600-irq",
.irq_mask = hi6421v600_irq_mask,
.irq_unmask = hi6421v600_irq_unmask,
.irq_disable = hi6421v600_irq_mask,
.irq_enable = hi6421v600_irq_unmask,
};
static int hi6421v600_irq_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw)
{
struct hi6421v600_irq *priv = d->host_data;
irq_set_chip_and_handler_name(virq, &hi6421v600_pmu_irqchip,
handle_simple_irq, "hi6421v600");
irq_set_chip_data(virq, priv);
irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
static const struct irq_domain_ops hi6421v600_domain_ops = {
.map = hi6421v600_irq_map,
.xlate = irq_domain_xlate_twocell,
};
static void hi6421v600_irq_init(struct hi6421v600_irq *priv)
{
int i;
unsigned int pending;
/* Mask all IRQs */
for (i = 0; i < HISI_IRQ_BANK_SIZE; i++)
regmap_write(priv->regmap, SOC_PMIC_IRQ_MASK_0_ADDR + i,
HISI_8BITS_MASK);
/* Mark all IRQs as handled */
for (i = 0; i < HISI_IRQ_BANK_SIZE; i++) {
regmap_read(priv->regmap, SOC_PMIC_IRQ0_ADDR + i, &pending);
regmap_write(priv->regmap, SOC_PMIC_IRQ0_ADDR + i,
HISI_8BITS_MASK);
}
}
static int hi6421v600_irq_probe(struct platform_device *pdev)
{
struct device *pmic_dev = pdev->dev.parent;
struct device_node *np = pmic_dev->of_node;
struct platform_device *pmic_pdev;
struct device *dev = &pdev->dev;
struct hi6421v600_irq *priv;
struct regmap *regmap;
unsigned int virq;
int i, ret;
/*
* This driver is meant to be called by hi6421-spmi-core,
* which should first set drvdata. If this doesn't happen, hit
* a warn on and return.
*/
regmap = dev_get_drvdata(pmic_dev);
if (WARN_ON(!regmap))
return -ENODEV;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = dev;
priv->regmap = regmap;
spin_lock_init(&priv->lock);
pmic_pdev = container_of(pmic_dev, struct platform_device, dev);
priv->irq = platform_get_irq(pmic_pdev, 0);
if (priv->irq < 0)
return priv->irq;
platform_set_drvdata(pdev, priv);
hi6421v600_irq_init(priv);
priv->irqs = devm_kzalloc(dev, PMIC_IRQ_LIST_MAX * sizeof(int), GFP_KERNEL);
if (!priv->irqs)
return -ENOMEM;
priv->domain = irq_domain_add_simple(np, PMIC_IRQ_LIST_MAX, 0,
&hi6421v600_domain_ops, priv);
if (!priv->domain) {
dev_err(dev, "Failed to create IRQ domain\n");
return -ENODEV;
}
for (i = 0; i < PMIC_IRQ_LIST_MAX; i++) {
virq = irq_create_mapping(priv->domain, i);
if (!virq) {
dev_err(dev, "Failed to map H/W IRQ\n");
return -ENODEV;
}
priv->irqs[i] = virq;
}
ret = devm_request_threaded_irq(dev,
priv->irq, hi6421v600_irq_handler,
NULL,
IRQF_TRIGGER_LOW | IRQF_SHARED | IRQF_NO_SUSPEND,
"pmic", priv);
if (ret < 0) {
dev_err(dev, "Failed to start IRQ handling thread: error %d\n",
ret);
return ret;
}
return 0;
}
static const struct platform_device_id hi6421v600_irq_table[] = {
{ .name = "hi6421v600-irq" },
{},
};
MODULE_DEVICE_TABLE(platform, hi6421v600_irq_table);
static struct platform_driver hi6421v600_irq_driver = {
.id_table = hi6421v600_irq_table,
.driver = {
.name = "hi6421v600-irq",
},
.probe = hi6421v600_irq_probe,
};
module_platform_driver(hi6421v600_irq_driver);
MODULE_DESCRIPTION("HiSilicon Hi6421v600 IRQ driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/hi6421v600-irq.c |
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
// Copyright (c) 2018, Linaro Limited
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <linux/dma-resv.h>
#include <linux/idr.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sort.h>
#include <linux/of_platform.h>
#include <linux/rpmsg.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/firmware/qcom/qcom_scm.h>
#include <uapi/misc/fastrpc.h>
#include <linux/of_reserved_mem.h>
#define ADSP_DOMAIN_ID (0)
#define MDSP_DOMAIN_ID (1)
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
#define FASTRPC_MAX_SESSIONS 14
#define FASTRPC_MAX_VMIDS 16
#define FASTRPC_ALIGN 128
#define FASTRPC_MAX_FDLIST 16
#define FASTRPC_MAX_CRCLIST 64
#define FASTRPC_PHYS(p) ((p) & 0xffffffff)
#define FASTRPC_CTX_MAX (256)
#define FASTRPC_INIT_HANDLE 1
#define FASTRPC_DSP_UTILITIES_HANDLE 2
#define FASTRPC_CTXID_MASK (0xFF0)
#define INIT_FILELEN_MAX (2 * 1024 * 1024)
#define INIT_FILE_NAMELEN_MAX (128)
#define FASTRPC_DEVICE_NAME "fastrpc"
/* Add memory to static PD pool, protection thru XPU */
#define ADSP_MMAP_HEAP_ADDR 4
/* MAP static DMA buffer on DSP User PD */
#define ADSP_MMAP_DMA_BUFFER 6
/* Add memory to static PD pool protection thru hypervisor */
#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
/* Add memory to userPD pool, for user heap */
#define ADSP_MMAP_ADD_PAGES 0x1000
/* Add memory to userPD pool, for LLC heap */
#define ADSP_MMAP_ADD_PAGES_LLC 0x3000,
#define DSP_UNSUPPORTED_API (0x80000414)
/* MAX NUMBER of DSP ATTRIBUTES SUPPORTED */
#define FASTRPC_MAX_DSP_ATTRIBUTES (256)
#define FASTRPC_MAX_DSP_ATTRIBUTES_LEN (sizeof(u32) * FASTRPC_MAX_DSP_ATTRIBUTES)
/* Retrives number of input buffers from the scalars parameter */
#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff)
/* Retrives number of output buffers from the scalars parameter */
#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff)
/* Retrives number of input handles from the scalars parameter */
#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f)
/* Retrives number of output handles from the scalars parameter */
#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f)
#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \
REMOTE_SCALARS_OUTBUFS(sc) + \
REMOTE_SCALARS_INHANDLES(sc)+ \
REMOTE_SCALARS_OUTHANDLES(sc))
#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \
(((attr & 0x07) << 29) | \
((method & 0x1f) << 24) | \
((in & 0xff) << 16) | \
((out & 0xff) << 8) | \
((oin & 0x0f) << 4) | \
(oout & 0x0f))
#define FASTRPC_SCALARS(method, in, out) \
FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0)
#define FASTRPC_CREATE_PROCESS_NARGS 6
#define FASTRPC_CREATE_STATIC_PROCESS_NARGS 3
/* Remote Method id table */
#define FASTRPC_RMID_INIT_ATTACH 0
#define FASTRPC_RMID_INIT_RELEASE 1
#define FASTRPC_RMID_INIT_MMAP 4
#define FASTRPC_RMID_INIT_MUNMAP 5
#define FASTRPC_RMID_INIT_CREATE 6
#define FASTRPC_RMID_INIT_CREATE_ATTR 7
#define FASTRPC_RMID_INIT_CREATE_STATIC 8
#define FASTRPC_RMID_INIT_MEM_MAP 10
#define FASTRPC_RMID_INIT_MEM_UNMAP 11
/* Protection Domain(PD) ids */
#define ROOT_PD (0)
#define USER_PD (1)
#define SENSORS_PD (2)
#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
"sdsp", "cdsp"};
struct fastrpc_phy_page {
u64 addr; /* physical address */
u64 size; /* size of contiguous region */
};
struct fastrpc_invoke_buf {
u32 num; /* number of contiguous regions */
u32 pgidx; /* index to start of contiguous region */
};
struct fastrpc_remote_dmahandle {
s32 fd; /* dma handle fd */
u32 offset; /* dma handle offset */
u32 len; /* dma handle length */
};
struct fastrpc_remote_buf {
u64 pv; /* buffer pointer */
u64 len; /* length of buffer */
};
union fastrpc_remote_arg {
struct fastrpc_remote_buf buf;
struct fastrpc_remote_dmahandle dma;
};
struct fastrpc_mmap_rsp_msg {
u64 vaddr;
};
struct fastrpc_mmap_req_msg {
s32 pgid;
u32 flags;
u64 vaddr;
s32 num;
};
struct fastrpc_mem_map_req_msg {
s32 pgid;
s32 fd;
s32 offset;
u32 flags;
u64 vaddrin;
s32 num;
s32 data_len;
};
struct fastrpc_munmap_req_msg {
s32 pgid;
u64 vaddr;
u64 size;
};
struct fastrpc_mem_unmap_req_msg {
s32 pgid;
s32 fd;
u64 vaddrin;
u64 len;
};
struct fastrpc_msg {
int pid; /* process group id */
int tid; /* thread id */
u64 ctx; /* invoke caller context */
u32 handle; /* handle to invoke */
u32 sc; /* scalars structure describing the data */
u64 addr; /* physical address */
u64 size; /* size of contiguous region */
};
struct fastrpc_invoke_rsp {
u64 ctx; /* invoke caller context */
int retval; /* invoke return value */
};
struct fastrpc_buf_overlap {
u64 start;
u64 end;
int raix;
u64 mstart;
u64 mend;
u64 offset;
};
struct fastrpc_buf {
struct fastrpc_user *fl;
struct dma_buf *dmabuf;
struct device *dev;
void *virt;
u64 phys;
u64 size;
/* Lock for dma buf attachments */
struct mutex lock;
struct list_head attachments;
/* mmap support */
struct list_head node; /* list of user requested mmaps */
uintptr_t raddr;
};
struct fastrpc_dma_buf_attachment {
struct device *dev;
struct sg_table sgt;
struct list_head node;
};
struct fastrpc_map {
struct list_head node;
struct fastrpc_user *fl;
int fd;
struct dma_buf *buf;
struct sg_table *table;
struct dma_buf_attachment *attach;
u64 phys;
u64 size;
void *va;
u64 len;
u64 raddr;
u32 attr;
struct kref refcount;
};
struct fastrpc_invoke_ctx {
int nscalars;
int nbufs;
int retval;
int pid;
int tgid;
u32 sc;
u32 *crc;
u64 ctxid;
u64 msg_sz;
struct kref refcount;
struct list_head node; /* list of ctxs */
struct completion work;
struct work_struct put_work;
struct fastrpc_msg msg;
struct fastrpc_user *fl;
union fastrpc_remote_arg *rpra;
struct fastrpc_map **maps;
struct fastrpc_buf *buf;
struct fastrpc_invoke_args *args;
struct fastrpc_buf_overlap *olaps;
struct fastrpc_channel_ctx *cctx;
};
struct fastrpc_session_ctx {
struct device *dev;
int sid;
bool used;
bool valid;
};
struct fastrpc_channel_ctx {
int domain_id;
int sesscount;
int vmcount;
u64 perms;
struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS];
struct rpmsg_device *rpdev;
struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS];
spinlock_t lock;
struct idr ctx_idr;
struct list_head users;
struct kref refcount;
/* Flag if dsp attributes are cached */
bool valid_attributes;
u32 dsp_attributes[FASTRPC_MAX_DSP_ATTRIBUTES];
struct fastrpc_device *secure_fdevice;
struct fastrpc_device *fdevice;
struct fastrpc_buf *remote_heap;
struct list_head invoke_interrupted_mmaps;
bool secure;
bool unsigned_support;
u64 dma_mask;
};
struct fastrpc_device {
struct fastrpc_channel_ctx *cctx;
struct miscdevice miscdev;
bool secure;
};
struct fastrpc_user {
struct list_head user;
struct list_head maps;
struct list_head pending;
struct list_head mmaps;
struct fastrpc_channel_ctx *cctx;
struct fastrpc_session_ctx *sctx;
struct fastrpc_buf *init_mem;
int tgid;
int pd;
bool is_secure_dev;
/* Lock for lists */
spinlock_t lock;
/* lock for allocations */
struct mutex mutex;
};
static void fastrpc_free_map(struct kref *ref)
{
struct fastrpc_map *map;
map = container_of(ref, struct fastrpc_map, refcount);
if (map->table) {
if (map->attr & FASTRPC_ATTR_SECUREMAP) {
struct qcom_scm_vmperm perm;
int vmid = map->fl->cctx->vmperms[0].vmid;
u64 src_perms = BIT(QCOM_SCM_VMID_HLOS) | BIT(vmid);
int err = 0;
perm.vmid = QCOM_SCM_VMID_HLOS;
perm.perm = QCOM_SCM_PERM_RWX;
err = qcom_scm_assign_mem(map->phys, map->size,
&src_perms, &perm, 1);
if (err) {
dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
map->phys, map->size, err);
return;
}
}
dma_buf_unmap_attachment_unlocked(map->attach, map->table,
DMA_BIDIRECTIONAL);
dma_buf_detach(map->buf, map->attach);
dma_buf_put(map->buf);
}
if (map->fl) {
spin_lock(&map->fl->lock);
list_del(&map->node);
spin_unlock(&map->fl->lock);
map->fl = NULL;
}
kfree(map);
}
static void fastrpc_map_put(struct fastrpc_map *map)
{
if (map)
kref_put(&map->refcount, fastrpc_free_map);
}
static int fastrpc_map_get(struct fastrpc_map *map)
{
if (!map)
return -ENOENT;
return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
}
static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
struct fastrpc_map **ppmap, bool take_ref)
{
struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
int ret = -ENOENT;
spin_lock(&fl->lock);
list_for_each_entry(map, &fl->maps, node) {
if (map->fd != fd)
continue;
if (take_ref) {
ret = fastrpc_map_get(map);
if (ret) {
dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
__func__, fd, ret);
break;
}
}
*ppmap = map;
ret = 0;
break;
}
spin_unlock(&fl->lock);
return ret;
}
static void fastrpc_buf_free(struct fastrpc_buf *buf)
{
dma_free_coherent(buf->dev, buf->size, buf->virt,
FASTRPC_PHYS(buf->phys));
kfree(buf);
}
static int __fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
u64 size, struct fastrpc_buf **obuf)
{
struct fastrpc_buf *buf;
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (!buf)
return -ENOMEM;
INIT_LIST_HEAD(&buf->attachments);
INIT_LIST_HEAD(&buf->node);
mutex_init(&buf->lock);
buf->fl = fl;
buf->virt = NULL;
buf->phys = 0;
buf->size = size;
buf->dev = dev;
buf->raddr = 0;
buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys,
GFP_KERNEL);
if (!buf->virt) {
mutex_destroy(&buf->lock);
kfree(buf);
return -ENOMEM;
}
*obuf = buf;
return 0;
}
static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev,
u64 size, struct fastrpc_buf **obuf)
{
int ret;
struct fastrpc_buf *buf;
ret = __fastrpc_buf_alloc(fl, dev, size, obuf);
if (ret)
return ret;
buf = *obuf;
if (fl->sctx && fl->sctx->sid)
buf->phys += ((u64)fl->sctx->sid << 32);
return 0;
}
static int fastrpc_remote_heap_alloc(struct fastrpc_user *fl, struct device *dev,
u64 size, struct fastrpc_buf **obuf)
{
struct device *rdev = &fl->cctx->rpdev->dev;
return __fastrpc_buf_alloc(fl, rdev, size, obuf);
}
static void fastrpc_channel_ctx_free(struct kref *ref)
{
struct fastrpc_channel_ctx *cctx;
cctx = container_of(ref, struct fastrpc_channel_ctx, refcount);
kfree(cctx);
}
static void fastrpc_channel_ctx_get(struct fastrpc_channel_ctx *cctx)
{
kref_get(&cctx->refcount);
}
static void fastrpc_channel_ctx_put(struct fastrpc_channel_ctx *cctx)
{
kref_put(&cctx->refcount, fastrpc_channel_ctx_free);
}
static void fastrpc_context_free(struct kref *ref)
{
struct fastrpc_invoke_ctx *ctx;
struct fastrpc_channel_ctx *cctx;
unsigned long flags;
int i;
ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount);
cctx = ctx->cctx;
for (i = 0; i < ctx->nbufs; i++)
fastrpc_map_put(ctx->maps[i]);
if (ctx->buf)
fastrpc_buf_free(ctx->buf);
spin_lock_irqsave(&cctx->lock, flags);
idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4);
spin_unlock_irqrestore(&cctx->lock, flags);
kfree(ctx->maps);
kfree(ctx->olaps);
kfree(ctx);
fastrpc_channel_ctx_put(cctx);
}
static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx)
{
kref_get(&ctx->refcount);
}
static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx)
{
kref_put(&ctx->refcount, fastrpc_context_free);
}
static void fastrpc_context_put_wq(struct work_struct *work)
{
struct fastrpc_invoke_ctx *ctx =
container_of(work, struct fastrpc_invoke_ctx, put_work);
fastrpc_context_put(ctx);
}
#define CMP(aa, bb) ((aa) == (bb) ? 0 : (aa) < (bb) ? -1 : 1)
static int olaps_cmp(const void *a, const void *b)
{
struct fastrpc_buf_overlap *pa = (struct fastrpc_buf_overlap *)a;
struct fastrpc_buf_overlap *pb = (struct fastrpc_buf_overlap *)b;
/* sort with lowest starting buffer first */
int st = CMP(pa->start, pb->start);
/* sort with highest ending buffer first */
int ed = CMP(pb->end, pa->end);
return st == 0 ? ed : st;
}
static void fastrpc_get_buff_overlaps(struct fastrpc_invoke_ctx *ctx)
{
u64 max_end = 0;
int i;
for (i = 0; i < ctx->nbufs; ++i) {
ctx->olaps[i].start = ctx->args[i].ptr;
ctx->olaps[i].end = ctx->olaps[i].start + ctx->args[i].length;
ctx->olaps[i].raix = i;
}
sort(ctx->olaps, ctx->nbufs, sizeof(*ctx->olaps), olaps_cmp, NULL);
for (i = 0; i < ctx->nbufs; ++i) {
/* Falling inside previous range */
if (ctx->olaps[i].start < max_end) {
ctx->olaps[i].mstart = max_end;
ctx->olaps[i].mend = ctx->olaps[i].end;
ctx->olaps[i].offset = max_end - ctx->olaps[i].start;
if (ctx->olaps[i].end > max_end) {
max_end = ctx->olaps[i].end;
} else {
ctx->olaps[i].mend = 0;
ctx->olaps[i].mstart = 0;
}
} else {
ctx->olaps[i].mend = ctx->olaps[i].end;
ctx->olaps[i].mstart = ctx->olaps[i].start;
ctx->olaps[i].offset = 0;
max_end = ctx->olaps[i].end;
}
}
}
static struct fastrpc_invoke_ctx *fastrpc_context_alloc(
struct fastrpc_user *user, u32 kernel, u32 sc,
struct fastrpc_invoke_args *args)
{
struct fastrpc_channel_ctx *cctx = user->cctx;
struct fastrpc_invoke_ctx *ctx = NULL;
unsigned long flags;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&ctx->node);
ctx->fl = user;
ctx->nscalars = REMOTE_SCALARS_LENGTH(sc);
ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) +
REMOTE_SCALARS_OUTBUFS(sc);
if (ctx->nscalars) {
ctx->maps = kcalloc(ctx->nscalars,
sizeof(*ctx->maps), GFP_KERNEL);
if (!ctx->maps) {
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
ctx->olaps = kcalloc(ctx->nscalars,
sizeof(*ctx->olaps), GFP_KERNEL);
if (!ctx->olaps) {
kfree(ctx->maps);
kfree(ctx);
return ERR_PTR(-ENOMEM);
}
ctx->args = args;
fastrpc_get_buff_overlaps(ctx);
}
/* Released in fastrpc_context_put() */
fastrpc_channel_ctx_get(cctx);
ctx->sc = sc;
ctx->retval = -1;
ctx->pid = current->pid;
ctx->tgid = user->tgid;
ctx->cctx = cctx;
init_completion(&ctx->work);
INIT_WORK(&ctx->put_work, fastrpc_context_put_wq);
spin_lock(&user->lock);
list_add_tail(&ctx->node, &user->pending);
spin_unlock(&user->lock);
spin_lock_irqsave(&cctx->lock, flags);
ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1,
FASTRPC_CTX_MAX, GFP_ATOMIC);
if (ret < 0) {
spin_unlock_irqrestore(&cctx->lock, flags);
goto err_idr;
}
ctx->ctxid = ret << 4;
spin_unlock_irqrestore(&cctx->lock, flags);
kref_init(&ctx->refcount);
return ctx;
err_idr:
spin_lock(&user->lock);
list_del(&ctx->node);
spin_unlock(&user->lock);
fastrpc_channel_ctx_put(cctx);
kfree(ctx->maps);
kfree(ctx->olaps);
kfree(ctx);
return ERR_PTR(ret);
}
static struct sg_table *
fastrpc_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
struct fastrpc_dma_buf_attachment *a = attachment->priv;
struct sg_table *table;
int ret;
table = &a->sgt;
ret = dma_map_sgtable(attachment->dev, table, dir, 0);
if (ret)
table = ERR_PTR(ret);
return table;
}
static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *table,
enum dma_data_direction dir)
{
dma_unmap_sgtable(attach->dev, table, dir, 0);
}
static void fastrpc_release(struct dma_buf *dmabuf)
{
struct fastrpc_buf *buffer = dmabuf->priv;
fastrpc_buf_free(buffer);
}
static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct fastrpc_dma_buf_attachment *a;
struct fastrpc_buf *buffer = dmabuf->priv;
int ret;
a = kzalloc(sizeof(*a), GFP_KERNEL);
if (!a)
return -ENOMEM;
ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt,
FASTRPC_PHYS(buffer->phys), buffer->size);
if (ret < 0) {
dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
kfree(a);
return -EINVAL;
}
a->dev = attachment->dev;
INIT_LIST_HEAD(&a->node);
attachment->priv = a;
mutex_lock(&buffer->lock);
list_add(&a->node, &buffer->attachments);
mutex_unlock(&buffer->lock);
return 0;
}
static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
struct dma_buf_attachment *attachment)
{
struct fastrpc_dma_buf_attachment *a = attachment->priv;
struct fastrpc_buf *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
list_del(&a->node);
mutex_unlock(&buffer->lock);
sg_free_table(&a->sgt);
kfree(a);
}
static int fastrpc_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
struct fastrpc_buf *buf = dmabuf->priv;
iosys_map_set_vaddr(map, buf->virt);
return 0;
}
static int fastrpc_mmap(struct dma_buf *dmabuf,
struct vm_area_struct *vma)
{
struct fastrpc_buf *buf = dmabuf->priv;
size_t size = vma->vm_end - vma->vm_start;
dma_resv_assert_held(dmabuf->resv);
return dma_mmap_coherent(buf->dev, vma, buf->virt,
FASTRPC_PHYS(buf->phys), size);
}
static const struct dma_buf_ops fastrpc_dma_buf_ops = {
.attach = fastrpc_dma_buf_attach,
.detach = fastrpc_dma_buf_detatch,
.map_dma_buf = fastrpc_map_dma_buf,
.unmap_dma_buf = fastrpc_unmap_dma_buf,
.mmap = fastrpc_mmap,
.vmap = fastrpc_vmap,
.release = fastrpc_release,
};
static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
u64 len, u32 attr, struct fastrpc_map **ppmap)
{
struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
struct sg_table *table;
int err = 0;
if (!fastrpc_map_lookup(fl, fd, ppmap, true))
return 0;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (!map)
return -ENOMEM;
INIT_LIST_HEAD(&map->node);
kref_init(&map->refcount);
map->fl = fl;
map->fd = fd;
map->buf = dma_buf_get(fd);
if (IS_ERR(map->buf)) {
err = PTR_ERR(map->buf);
goto get_err;
}
map->attach = dma_buf_attach(map->buf, sess->dev);
if (IS_ERR(map->attach)) {
dev_err(sess->dev, "Failed to attach dmabuf\n");
err = PTR_ERR(map->attach);
goto attach_err;
}
table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
if (IS_ERR(table)) {
err = PTR_ERR(table);
goto map_err;
}
map->table = table;
if (attr & FASTRPC_ATTR_SECUREMAP) {
map->phys = sg_phys(map->table->sgl);
} else {
map->phys = sg_dma_address(map->table->sgl);
map->phys += ((u64)fl->sctx->sid << 32);
}
map->size = len;
map->va = sg_virt(map->table->sgl);
map->len = len;
if (attr & FASTRPC_ATTR_SECUREMAP) {
/*
* If subsystem VMIDs are defined in DTSI, then do
* hyp_assign from HLOS to those VM(s)
*/
u64 src_perms = BIT(QCOM_SCM_VMID_HLOS);
struct qcom_scm_vmperm dst_perms[2] = {0};
dst_perms[0].vmid = QCOM_SCM_VMID_HLOS;
dst_perms[0].perm = QCOM_SCM_PERM_RW;
dst_perms[1].vmid = fl->cctx->vmperms[0].vmid;
dst_perms[1].perm = QCOM_SCM_PERM_RWX;
map->attr = attr;
err = qcom_scm_assign_mem(map->phys, (u64)map->size, &src_perms, dst_perms, 2);
if (err) {
dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
map->phys, map->size, err);
goto map_err;
}
}
spin_lock(&fl->lock);
list_add_tail(&map->node, &fl->maps);
spin_unlock(&fl->lock);
*ppmap = map;
return 0;
map_err:
dma_buf_detach(map->buf, map->attach);
attach_err:
dma_buf_put(map->buf);
get_err:
fastrpc_map_put(map);
return err;
}
/*
* Fastrpc payload buffer with metadata looks like:
*
* >>>>>> START of METADATA <<<<<<<<<
* +---------------------------------+
* | Arguments |
* | type:(union fastrpc_remote_arg)|
* | (0 - N) |
* +---------------------------------+
* | Invoke Buffer list |
* | type:(struct fastrpc_invoke_buf)|
* | (0 - N) |
* +---------------------------------+
* | Page info list |
* | type:(struct fastrpc_phy_page) |
* | (0 - N) |
* +---------------------------------+
* | Optional info |
* |(can be specific to SoC/Firmware)|
* +---------------------------------+
* >>>>>>>> END of METADATA <<<<<<<<<
* +---------------------------------+
* | Inline ARGS |
* | (0-N) |
* +---------------------------------+
*/
static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx)
{
int size = 0;
size = (sizeof(struct fastrpc_remote_buf) +
sizeof(struct fastrpc_invoke_buf) +
sizeof(struct fastrpc_phy_page)) * ctx->nscalars +
sizeof(u64) * FASTRPC_MAX_FDLIST +
sizeof(u32) * FASTRPC_MAX_CRCLIST;
return size;
}
static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen)
{
u64 size = 0;
int oix;
size = ALIGN(metalen, FASTRPC_ALIGN);
for (oix = 0; oix < ctx->nbufs; oix++) {
int i = ctx->olaps[oix].raix;
if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) {
if (ctx->olaps[oix].offset == 0)
size = ALIGN(size, FASTRPC_ALIGN);
size += (ctx->olaps[oix].mend - ctx->olaps[oix].mstart);
}
}
return size;
}
static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx)
{
struct device *dev = ctx->fl->sctx->dev;
int i, err;
for (i = 0; i < ctx->nscalars; ++i) {
if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 ||
ctx->args[i].length == 0)
continue;
err = fastrpc_map_create(ctx->fl, ctx->args[i].fd,
ctx->args[i].length, ctx->args[i].attr, &ctx->maps[i]);
if (err) {
dev_err(dev, "Error Creating map %d\n", err);
return -EINVAL;
}
}
return 0;
}
static struct fastrpc_invoke_buf *fastrpc_invoke_buf_start(union fastrpc_remote_arg *pra, int len)
{
return (struct fastrpc_invoke_buf *)(&pra[len]);
}
static struct fastrpc_phy_page *fastrpc_phy_page_start(struct fastrpc_invoke_buf *buf, int len)
{
return (struct fastrpc_phy_page *)(&buf[len]);
}
static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
{
struct device *dev = ctx->fl->sctx->dev;
union fastrpc_remote_arg *rpra;
struct fastrpc_invoke_buf *list;
struct fastrpc_phy_page *pages;
int inbufs, i, oix, err = 0;
u64 len, rlen, pkt_size;
u64 pg_start, pg_end;
uintptr_t args;
int metalen;
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
metalen = fastrpc_get_meta_size(ctx);
pkt_size = fastrpc_get_payload_size(ctx, metalen);
err = fastrpc_create_maps(ctx);
if (err)
return err;
ctx->msg_sz = pkt_size;
err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf);
if (err)
return err;
rpra = ctx->buf->virt;
list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
pages = fastrpc_phy_page_start(list, ctx->nscalars);
args = (uintptr_t)ctx->buf->virt + metalen;
rlen = pkt_size - metalen;
ctx->rpra = rpra;
for (oix = 0; oix < ctx->nbufs; ++oix) {
int mlen;
i = ctx->olaps[oix].raix;
len = ctx->args[i].length;
rpra[i].buf.pv = 0;
rpra[i].buf.len = len;
list[i].num = len ? 1 : 0;
list[i].pgidx = i;
if (!len)
continue;
if (ctx->maps[i]) {
struct vm_area_struct *vma = NULL;
rpra[i].buf.pv = (u64) ctx->args[i].ptr;
pages[i].addr = ctx->maps[i]->phys;
mmap_read_lock(current->mm);
vma = find_vma(current->mm, ctx->args[i].ptr);
if (vma)
pages[i].addr += ctx->args[i].ptr -
vma->vm_start;
mmap_read_unlock(current->mm);
pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
PAGE_SHIFT;
pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
} else {
if (ctx->olaps[oix].offset == 0) {
rlen -= ALIGN(args, FASTRPC_ALIGN) - args;
args = ALIGN(args, FASTRPC_ALIGN);
}
mlen = ctx->olaps[oix].mend - ctx->olaps[oix].mstart;
if (rlen < mlen)
goto bail;
rpra[i].buf.pv = args - ctx->olaps[oix].offset;
pages[i].addr = ctx->buf->phys -
ctx->olaps[oix].offset +
(pkt_size - rlen);
pages[i].addr = pages[i].addr & PAGE_MASK;
pg_start = (args & PAGE_MASK) >> PAGE_SHIFT;
pg_end = ((args + len - 1) & PAGE_MASK) >> PAGE_SHIFT;
pages[i].size = (pg_end - pg_start + 1) * PAGE_SIZE;
args = args + mlen;
rlen -= mlen;
}
if (i < inbufs && !ctx->maps[i]) {
void *dst = (void *)(uintptr_t)rpra[i].buf.pv;
void *src = (void *)(uintptr_t)ctx->args[i].ptr;
if (!kernel) {
if (copy_from_user(dst, (void __user *)src,
len)) {
err = -EFAULT;
goto bail;
}
} else {
memcpy(dst, src, len);
}
}
}
for (i = ctx->nbufs; i < ctx->nscalars; ++i) {
list[i].num = ctx->args[i].length ? 1 : 0;
list[i].pgidx = i;
if (ctx->maps[i]) {
pages[i].addr = ctx->maps[i]->phys;
pages[i].size = ctx->maps[i]->size;
}
rpra[i].dma.fd = ctx->args[i].fd;
rpra[i].dma.len = ctx->args[i].length;
rpra[i].dma.offset = (u64) ctx->args[i].ptr;
}
bail:
if (err)
dev_err(dev, "Error: get invoke args failed:%d\n", err);
return err;
}
static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
u32 kernel)
{
union fastrpc_remote_arg *rpra = ctx->rpra;
struct fastrpc_user *fl = ctx->fl;
struct fastrpc_map *mmap = NULL;
struct fastrpc_invoke_buf *list;
struct fastrpc_phy_page *pages;
u64 *fdlist;
int i, inbufs, outbufs, handles;
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
outbufs = REMOTE_SCALARS_OUTBUFS(ctx->sc);
handles = REMOTE_SCALARS_INHANDLES(ctx->sc) + REMOTE_SCALARS_OUTHANDLES(ctx->sc);
list = fastrpc_invoke_buf_start(rpra, ctx->nscalars);
pages = fastrpc_phy_page_start(list, ctx->nscalars);
fdlist = (uint64_t *)(pages + inbufs + outbufs + handles);
for (i = inbufs; i < ctx->nbufs; ++i) {
if (!ctx->maps[i]) {
void *src = (void *)(uintptr_t)rpra[i].buf.pv;
void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
u64 len = rpra[i].buf.len;
if (!kernel) {
if (copy_to_user((void __user *)dst, src, len))
return -EFAULT;
} else {
memcpy(dst, src, len);
}
}
}
for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
if (!fdlist[i])
break;
if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
fastrpc_map_put(mmap);
}
return 0;
}
static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx,
struct fastrpc_invoke_ctx *ctx,
u32 kernel, uint32_t handle)
{
struct fastrpc_channel_ctx *cctx;
struct fastrpc_user *fl = ctx->fl;
struct fastrpc_msg *msg = &ctx->msg;
int ret;
cctx = fl->cctx;
msg->pid = fl->tgid;
msg->tid = current->pid;
if (kernel)
msg->pid = 0;
msg->ctx = ctx->ctxid | fl->pd;
msg->handle = handle;
msg->sc = ctx->sc;
msg->addr = ctx->buf ? ctx->buf->phys : 0;
msg->size = roundup(ctx->msg_sz, PAGE_SIZE);
fastrpc_context_get(ctx);
ret = rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg));
if (ret)
fastrpc_context_put(ctx);
return ret;
}
static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel,
u32 handle, u32 sc,
struct fastrpc_invoke_args *args)
{
struct fastrpc_invoke_ctx *ctx = NULL;
struct fastrpc_buf *buf, *b;
int err = 0;
if (!fl->sctx)
return -EINVAL;
if (!fl->cctx->rpdev)
return -EPIPE;
if (handle == FASTRPC_INIT_HANDLE && !kernel) {
dev_warn_ratelimited(fl->sctx->dev, "user app trying to send a kernel RPC message (%d)\n", handle);
return -EPERM;
}
ctx = fastrpc_context_alloc(fl, kernel, sc, args);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
if (ctx->nscalars) {
err = fastrpc_get_args(kernel, ctx);
if (err)
goto bail;
}
/* make sure that all CPU memory writes are seen by DSP */
dma_wmb();
/* Send invoke buffer to remote dsp */
err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle);
if (err)
goto bail;
if (kernel) {
if (!wait_for_completion_timeout(&ctx->work, 10 * HZ))
err = -ETIMEDOUT;
} else {
err = wait_for_completion_interruptible(&ctx->work);
}
if (err)
goto bail;
/* Check the response from remote dsp */
err = ctx->retval;
if (err)
goto bail;
if (ctx->nscalars) {
/* make sure that all memory writes by DSP are seen by CPU */
dma_rmb();
/* populate all the output buffers with results */
err = fastrpc_put_args(ctx, kernel);
if (err)
goto bail;
}
bail:
if (err != -ERESTARTSYS && err != -ETIMEDOUT) {
/* We are done with this compute context */
spin_lock(&fl->lock);
list_del(&ctx->node);
spin_unlock(&fl->lock);
fastrpc_context_put(ctx);
}
if (err == -ERESTARTSYS) {
list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
list_del(&buf->node);
list_add_tail(&buf->node, &fl->cctx->invoke_interrupted_mmaps);
}
}
if (err)
dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err);
return err;
}
static bool is_session_rejected(struct fastrpc_user *fl, bool unsigned_pd_request)
{
/* Check if the device node is non-secure and channel is secure*/
if (!fl->is_secure_dev && fl->cctx->secure) {
/*
* Allow untrusted applications to offload only to Unsigned PD when
* channel is configured as secure and block untrusted apps on channel
* that does not support unsigned PD offload
*/
if (!fl->cctx->unsigned_support || !unsigned_pd_request) {
dev_err(&fl->cctx->rpdev->dev, "Error: Untrusted application trying to offload to signed PD");
return true;
}
}
return false;
}
static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
char __user *argp)
{
struct fastrpc_init_create_static init;
struct fastrpc_invoke_args *args;
struct fastrpc_phy_page pages[1];
char *name;
int err;
struct {
int pgid;
u32 namelen;
u32 pageslen;
} inbuf;
u32 sc;
args = kcalloc(FASTRPC_CREATE_STATIC_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
if (!args)
return -ENOMEM;
if (copy_from_user(&init, argp, sizeof(init))) {
err = -EFAULT;
goto err;
}
if (init.namelen > INIT_FILE_NAMELEN_MAX) {
err = -EINVAL;
goto err;
}
name = kzalloc(init.namelen, GFP_KERNEL);
if (!name) {
err = -ENOMEM;
goto err;
}
if (copy_from_user(name, (void __user *)(uintptr_t)init.name, init.namelen)) {
err = -EFAULT;
goto err_name;
}
if (!fl->cctx->remote_heap) {
err = fastrpc_remote_heap_alloc(fl, fl->sctx->dev, init.memlen,
&fl->cctx->remote_heap);
if (err)
goto err_name;
/* Map if we have any heap VMIDs associated with this ADSP Static Process. */
if (fl->cctx->vmcount) {
err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
(u64)fl->cctx->remote_heap->size,
&fl->cctx->perms,
fl->cctx->vmperms, fl->cctx->vmcount);
if (err) {
dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
goto err_map;
}
}
}
inbuf.pgid = fl->tgid;
inbuf.namelen = init.namelen;
inbuf.pageslen = 0;
fl->pd = USER_PD;
args[0].ptr = (u64)(uintptr_t)&inbuf;
args[0].length = sizeof(inbuf);
args[0].fd = -1;
args[1].ptr = (u64)(uintptr_t)name;
args[1].length = inbuf.namelen;
args[1].fd = -1;
pages[0].addr = fl->cctx->remote_heap->phys;
pages[0].size = fl->cctx->remote_heap->size;
args[2].ptr = (u64)(uintptr_t) pages;
args[2].length = sizeof(*pages);
args[2].fd = -1;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_STATIC, 3, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
sc, args);
if (err)
goto err_invoke;
kfree(args);
return 0;
err_invoke:
if (fl->cctx->vmcount) {
u64 src_perms = 0;
struct qcom_scm_vmperm dst_perms;
u32 i;
for (i = 0; i < fl->cctx->vmcount; i++)
src_perms |= BIT(fl->cctx->vmperms[i].vmid);
dst_perms.vmid = QCOM_SCM_VMID_HLOS;
dst_perms.perm = QCOM_SCM_PERM_RWX;
err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
(u64)fl->cctx->remote_heap->size,
&src_perms, &dst_perms, 1);
if (err)
dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
}
err_map:
fastrpc_buf_free(fl->cctx->remote_heap);
err_name:
kfree(name);
err:
kfree(args);
return err;
}
static int fastrpc_init_create_process(struct fastrpc_user *fl,
char __user *argp)
{
struct fastrpc_init_create init;
struct fastrpc_invoke_args *args;
struct fastrpc_phy_page pages[1];
struct fastrpc_map *map = NULL;
struct fastrpc_buf *imem = NULL;
int memlen;
int err;
struct {
int pgid;
u32 namelen;
u32 filelen;
u32 pageslen;
u32 attrs;
u32 siglen;
} inbuf;
u32 sc;
bool unsigned_module = false;
args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL);
if (!args)
return -ENOMEM;
if (copy_from_user(&init, argp, sizeof(init))) {
err = -EFAULT;
goto err;
}
if (init.attrs & FASTRPC_MODE_UNSIGNED_MODULE)
unsigned_module = true;
if (is_session_rejected(fl, unsigned_module)) {
err = -ECONNREFUSED;
goto err;
}
if (init.filelen > INIT_FILELEN_MAX) {
err = -EINVAL;
goto err;
}
inbuf.pgid = fl->tgid;
inbuf.namelen = strlen(current->comm) + 1;
inbuf.filelen = init.filelen;
inbuf.pageslen = 1;
inbuf.attrs = init.attrs;
inbuf.siglen = init.siglen;
fl->pd = USER_PD;
if (init.filelen && init.filefd) {
err = fastrpc_map_create(fl, init.filefd, init.filelen, 0, &map);
if (err)
goto err;
}
memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4),
1024 * 1024);
err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen,
&imem);
if (err)
goto err_alloc;
fl->init_mem = imem;
args[0].ptr = (u64)(uintptr_t)&inbuf;
args[0].length = sizeof(inbuf);
args[0].fd = -1;
args[1].ptr = (u64)(uintptr_t)current->comm;
args[1].length = inbuf.namelen;
args[1].fd = -1;
args[2].ptr = (u64) init.file;
args[2].length = inbuf.filelen;
args[2].fd = init.filefd;
pages[0].addr = imem->phys;
pages[0].size = imem->size;
args[3].ptr = (u64)(uintptr_t) pages;
args[3].length = 1 * sizeof(*pages);
args[3].fd = -1;
args[4].ptr = (u64)(uintptr_t)&inbuf.attrs;
args[4].length = sizeof(inbuf.attrs);
args[4].fd = -1;
args[5].ptr = (u64)(uintptr_t) &inbuf.siglen;
args[5].length = sizeof(inbuf.siglen);
args[5].fd = -1;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0);
if (init.attrs)
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 4, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
sc, args);
if (err)
goto err_invoke;
kfree(args);
return 0;
err_invoke:
fl->init_mem = NULL;
fastrpc_buf_free(imem);
err_alloc:
fastrpc_map_put(map);
err:
kfree(args);
return err;
}
static struct fastrpc_session_ctx *fastrpc_session_alloc(
struct fastrpc_channel_ctx *cctx)
{
struct fastrpc_session_ctx *session = NULL;
unsigned long flags;
int i;
spin_lock_irqsave(&cctx->lock, flags);
for (i = 0; i < cctx->sesscount; i++) {
if (!cctx->session[i].used && cctx->session[i].valid) {
cctx->session[i].used = true;
session = &cctx->session[i];
break;
}
}
spin_unlock_irqrestore(&cctx->lock, flags);
return session;
}
static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx,
struct fastrpc_session_ctx *session)
{
unsigned long flags;
spin_lock_irqsave(&cctx->lock, flags);
session->used = false;
spin_unlock_irqrestore(&cctx->lock, flags);
}
static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl)
{
struct fastrpc_invoke_args args[1];
int tgid = 0;
u32 sc;
tgid = fl->tgid;
args[0].ptr = (u64)(uintptr_t) &tgid;
args[0].length = sizeof(tgid);
args[0].fd = -1;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0);
return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
sc, &args[0]);
}
static int fastrpc_device_release(struct inode *inode, struct file *file)
{
struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
struct fastrpc_channel_ctx *cctx = fl->cctx;
struct fastrpc_invoke_ctx *ctx, *n;
struct fastrpc_map *map, *m;
struct fastrpc_buf *buf, *b;
unsigned long flags;
fastrpc_release_current_dsp_process(fl);
spin_lock_irqsave(&cctx->lock, flags);
list_del(&fl->user);
spin_unlock_irqrestore(&cctx->lock, flags);
if (fl->init_mem)
fastrpc_buf_free(fl->init_mem);
list_for_each_entry_safe(ctx, n, &fl->pending, node) {
list_del(&ctx->node);
fastrpc_context_put(ctx);
}
list_for_each_entry_safe(map, m, &fl->maps, node)
fastrpc_map_put(map);
list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
list_del(&buf->node);
fastrpc_buf_free(buf);
}
fastrpc_session_free(cctx, fl->sctx);
fastrpc_channel_ctx_put(cctx);
mutex_destroy(&fl->mutex);
kfree(fl);
file->private_data = NULL;
return 0;
}
static int fastrpc_device_open(struct inode *inode, struct file *filp)
{
struct fastrpc_channel_ctx *cctx;
struct fastrpc_device *fdevice;
struct fastrpc_user *fl = NULL;
unsigned long flags;
fdevice = miscdev_to_fdevice(filp->private_data);
cctx = fdevice->cctx;
fl = kzalloc(sizeof(*fl), GFP_KERNEL);
if (!fl)
return -ENOMEM;
/* Released in fastrpc_device_release() */
fastrpc_channel_ctx_get(cctx);
filp->private_data = fl;
spin_lock_init(&fl->lock);
mutex_init(&fl->mutex);
INIT_LIST_HEAD(&fl->pending);
INIT_LIST_HEAD(&fl->maps);
INIT_LIST_HEAD(&fl->mmaps);
INIT_LIST_HEAD(&fl->user);
fl->tgid = current->tgid;
fl->cctx = cctx;
fl->is_secure_dev = fdevice->secure;
fl->sctx = fastrpc_session_alloc(cctx);
if (!fl->sctx) {
dev_err(&cctx->rpdev->dev, "No session available\n");
mutex_destroy(&fl->mutex);
kfree(fl);
return -EBUSY;
}
spin_lock_irqsave(&cctx->lock, flags);
list_add_tail(&fl->user, &cctx->users);
spin_unlock_irqrestore(&cctx->lock, flags);
return 0;
}
static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_alloc_dma_buf bp;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct fastrpc_buf *buf = NULL;
int err;
if (copy_from_user(&bp, argp, sizeof(bp)))
return -EFAULT;
err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf);
if (err)
return err;
exp_info.ops = &fastrpc_dma_buf_ops;
exp_info.size = bp.size;
exp_info.flags = O_RDWR;
exp_info.priv = buf;
buf->dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(buf->dmabuf)) {
err = PTR_ERR(buf->dmabuf);
fastrpc_buf_free(buf);
return err;
}
bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE);
if (bp.fd < 0) {
dma_buf_put(buf->dmabuf);
return -EINVAL;
}
if (copy_to_user(argp, &bp, sizeof(bp))) {
/*
* The usercopy failed, but we can't do much about it, as
* dma_buf_fd() already called fd_install() and made the
* file descriptor accessible for the current process. It
* might already be closed and dmabuf no longer valid when
* we reach this point. Therefore "leak" the fd and rely on
* the process exit path to do any required cleanup.
*/
return -EFAULT;
}
return 0;
}
static int fastrpc_init_attach(struct fastrpc_user *fl, int pd)
{
struct fastrpc_invoke_args args[1];
int tgid = fl->tgid;
u32 sc;
args[0].ptr = (u64)(uintptr_t) &tgid;
args[0].length = sizeof(tgid);
args[0].fd = -1;
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0);
fl->pd = pd;
return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE,
sc, &args[0]);
}
static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_invoke_args *args = NULL;
struct fastrpc_invoke inv;
u32 nscalars;
int err;
if (copy_from_user(&inv, argp, sizeof(inv)))
return -EFAULT;
/* nscalars is truncated here to max supported value */
nscalars = REMOTE_SCALARS_LENGTH(inv.sc);
if (nscalars) {
args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL);
if (!args)
return -ENOMEM;
if (copy_from_user(args, (void __user *)(uintptr_t)inv.args,
nscalars * sizeof(*args))) {
kfree(args);
return -EFAULT;
}
}
err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args);
kfree(args);
return err;
}
static int fastrpc_get_info_from_dsp(struct fastrpc_user *fl, uint32_t *dsp_attr_buf,
uint32_t dsp_attr_buf_len)
{
struct fastrpc_invoke_args args[2] = { 0 };
/* Capability filled in userspace */
dsp_attr_buf[0] = 0;
args[0].ptr = (u64)(uintptr_t)&dsp_attr_buf_len;
args[0].length = sizeof(dsp_attr_buf_len);
args[0].fd = -1;
args[1].ptr = (u64)(uintptr_t)&dsp_attr_buf[1];
args[1].length = dsp_attr_buf_len;
args[1].fd = -1;
fl->pd = USER_PD;
return fastrpc_internal_invoke(fl, true, FASTRPC_DSP_UTILITIES_HANDLE,
FASTRPC_SCALARS(0, 1, 1), args);
}
static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
struct fastrpc_user *fl)
{
struct fastrpc_channel_ctx *cctx = fl->cctx;
uint32_t attribute_id = cap->attribute_id;
uint32_t *dsp_attributes;
unsigned long flags;
uint32_t domain = cap->domain;
int err;
spin_lock_irqsave(&cctx->lock, flags);
/* check if we already have queried dsp for attributes */
if (cctx->valid_attributes) {
spin_unlock_irqrestore(&cctx->lock, flags);
goto done;
}
spin_unlock_irqrestore(&cctx->lock, flags);
dsp_attributes = kzalloc(FASTRPC_MAX_DSP_ATTRIBUTES_LEN, GFP_KERNEL);
if (!dsp_attributes)
return -ENOMEM;
err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
if (err == DSP_UNSUPPORTED_API) {
dev_info(&cctx->rpdev->dev,
"Warning: DSP capabilities not supported on domain: %d\n", domain);
kfree(dsp_attributes);
return -EOPNOTSUPP;
} else if (err) {
dev_err(&cctx->rpdev->dev, "Error: dsp information is incorrect err: %d\n", err);
kfree(dsp_attributes);
return err;
}
spin_lock_irqsave(&cctx->lock, flags);
memcpy(cctx->dsp_attributes, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES_LEN);
cctx->valid_attributes = true;
spin_unlock_irqrestore(&cctx->lock, flags);
kfree(dsp_attributes);
done:
cap->capability = cctx->dsp_attributes[attribute_id];
return 0;
}
static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_ioctl_capability cap = {0};
int err = 0;
if (copy_from_user(&cap, argp, sizeof(cap)))
return -EFAULT;
cap.capability = 0;
if (cap.domain >= FASTRPC_DEV_MAX) {
dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n",
cap.domain, err);
return -ECHRNG;
}
/* Fastrpc Capablities does not support modem domain */
if (cap.domain == MDSP_DOMAIN_ID) {
dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err);
return -ECHRNG;
}
if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) {
dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
cap.attribute_id, err);
return -EOVERFLOW;
}
err = fastrpc_get_info_from_kernel(&cap, fl);
if (err)
return err;
if (copy_to_user(argp, &cap.capability, sizeof(cap.capability)))
return -EFAULT;
return 0;
}
static int fastrpc_req_munmap_impl(struct fastrpc_user *fl, struct fastrpc_buf *buf)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
struct fastrpc_munmap_req_msg req_msg;
struct device *dev = fl->sctx->dev;
int err;
u32 sc;
req_msg.pgid = fl->tgid;
req_msg.size = buf->size;
req_msg.vaddr = buf->raddr;
args[0].ptr = (u64) (uintptr_t) &req_msg;
args[0].length = sizeof(req_msg);
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MUNMAP, 1, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
&args[0]);
if (!err) {
dev_dbg(dev, "unmmap\tpt 0x%09lx OK\n", buf->raddr);
spin_lock(&fl->lock);
list_del(&buf->node);
spin_unlock(&fl->lock);
fastrpc_buf_free(buf);
} else {
dev_err(dev, "unmmap\tpt 0x%09lx ERROR\n", buf->raddr);
}
return err;
}
static int fastrpc_req_munmap(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_buf *buf = NULL, *iter, *b;
struct fastrpc_req_munmap req;
struct device *dev = fl->sctx->dev;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
spin_lock(&fl->lock);
list_for_each_entry_safe(iter, b, &fl->mmaps, node) {
if ((iter->raddr == req.vaddrout) && (iter->size == req.size)) {
buf = iter;
break;
}
}
spin_unlock(&fl->lock);
if (!buf) {
dev_err(dev, "mmap\t\tpt 0x%09llx [len 0x%08llx] not in list\n",
req.vaddrout, req.size);
return -EINVAL;
}
return fastrpc_req_munmap_impl(fl, buf);
}
static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_invoke_args args[3] = { [0 ... 2] = { 0 } };
struct fastrpc_buf *buf = NULL;
struct fastrpc_mmap_req_msg req_msg;
struct fastrpc_mmap_rsp_msg rsp_msg;
struct fastrpc_phy_page pages;
struct fastrpc_req_mmap req;
struct device *dev = fl->sctx->dev;
int err;
u32 sc;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
if (req.flags != ADSP_MMAP_ADD_PAGES && req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR) {
dev_err(dev, "flag not supported 0x%x\n", req.flags);
return -EINVAL;
}
if (req.vaddrin) {
dev_err(dev, "adding user allocated pages is not supported\n");
return -EINVAL;
}
if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
err = fastrpc_remote_heap_alloc(fl, dev, req.size, &buf);
else
err = fastrpc_buf_alloc(fl, dev, req.size, &buf);
if (err) {
dev_err(dev, "failed to allocate buffer\n");
return err;
}
req_msg.pgid = fl->tgid;
req_msg.flags = req.flags;
req_msg.vaddr = req.vaddrin;
req_msg.num = sizeof(pages);
args[0].ptr = (u64) (uintptr_t) &req_msg;
args[0].length = sizeof(req_msg);
pages.addr = buf->phys;
pages.size = buf->size;
args[1].ptr = (u64) (uintptr_t) &pages;
args[1].length = sizeof(pages);
args[2].ptr = (u64) (uintptr_t) &rsp_msg;
args[2].length = sizeof(rsp_msg);
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MMAP, 2, 1);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
&args[0]);
if (err) {
dev_err(dev, "mmap error (len 0x%08llx)\n", buf->size);
goto err_invoke;
}
/* update the buffer to be able to deallocate the memory on the DSP */
buf->raddr = (uintptr_t) rsp_msg.vaddr;
/* let the client know the address to use */
req.vaddrout = rsp_msg.vaddr;
/* Add memory to static PD pool, protection thru hypervisor */
if (req.flags == ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
err = qcom_scm_assign_mem(buf->phys, (u64)buf->size,
&fl->cctx->perms, fl->cctx->vmperms, fl->cctx->vmcount);
if (err) {
dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
buf->phys, buf->size, err);
goto err_assign;
}
}
spin_lock(&fl->lock);
list_add_tail(&buf->node, &fl->mmaps);
spin_unlock(&fl->lock);
if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
err = -EFAULT;
goto err_assign;
}
dev_dbg(dev, "mmap\t\tpt 0x%09lx OK [len 0x%08llx]\n",
buf->raddr, buf->size);
return 0;
err_assign:
fastrpc_req_munmap_impl(fl, buf);
err_invoke:
fastrpc_buf_free(buf);
return err;
}
static int fastrpc_req_mem_unmap_impl(struct fastrpc_user *fl, struct fastrpc_mem_unmap *req)
{
struct fastrpc_invoke_args args[1] = { [0] = { 0 } };
struct fastrpc_map *map = NULL, *iter, *m;
struct fastrpc_mem_unmap_req_msg req_msg = { 0 };
int err = 0;
u32 sc;
struct device *dev = fl->sctx->dev;
spin_lock(&fl->lock);
list_for_each_entry_safe(iter, m, &fl->maps, node) {
if ((req->fd < 0 || iter->fd == req->fd) && (iter->raddr == req->vaddr)) {
map = iter;
break;
}
}
spin_unlock(&fl->lock);
if (!map) {
dev_err(dev, "map not in list\n");
return -EINVAL;
}
req_msg.pgid = fl->tgid;
req_msg.len = map->len;
req_msg.vaddrin = map->raddr;
req_msg.fd = map->fd;
args[0].ptr = (u64) (uintptr_t) &req_msg;
args[0].length = sizeof(req_msg);
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_UNMAP, 1, 0);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc,
&args[0]);
fastrpc_map_put(map);
if (err)
dev_err(dev, "unmmap\tpt fd = %d, 0x%09llx error\n", map->fd, map->raddr);
return err;
}
static int fastrpc_req_mem_unmap(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_mem_unmap req;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
return fastrpc_req_mem_unmap_impl(fl, &req);
}
static int fastrpc_req_mem_map(struct fastrpc_user *fl, char __user *argp)
{
struct fastrpc_invoke_args args[4] = { [0 ... 3] = { 0 } };
struct fastrpc_mem_map_req_msg req_msg = { 0 };
struct fastrpc_mmap_rsp_msg rsp_msg = { 0 };
struct fastrpc_mem_unmap req_unmap = { 0 };
struct fastrpc_phy_page pages = { 0 };
struct fastrpc_mem_map req;
struct device *dev = fl->sctx->dev;
struct fastrpc_map *map = NULL;
int err;
u32 sc;
if (copy_from_user(&req, argp, sizeof(req)))
return -EFAULT;
/* create SMMU mapping */
err = fastrpc_map_create(fl, req.fd, req.length, 0, &map);
if (err) {
dev_err(dev, "failed to map buffer, fd = %d\n", req.fd);
return err;
}
req_msg.pgid = fl->tgid;
req_msg.fd = req.fd;
req_msg.offset = req.offset;
req_msg.vaddrin = req.vaddrin;
map->va = (void *) (uintptr_t) req.vaddrin;
req_msg.flags = req.flags;
req_msg.num = sizeof(pages);
req_msg.data_len = 0;
args[0].ptr = (u64) (uintptr_t) &req_msg;
args[0].length = sizeof(req_msg);
pages.addr = map->phys;
pages.size = map->size;
args[1].ptr = (u64) (uintptr_t) &pages;
args[1].length = sizeof(pages);
args[2].ptr = (u64) (uintptr_t) &pages;
args[2].length = 0;
args[3].ptr = (u64) (uintptr_t) &rsp_msg;
args[3].length = sizeof(rsp_msg);
sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_MEM_MAP, 3, 1);
err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, sc, &args[0]);
if (err) {
dev_err(dev, "mem mmap error, fd %d, vaddr %llx, size %lld\n",
req.fd, req.vaddrin, map->size);
goto err_invoke;
}
/* update the buffer to be able to deallocate the memory on the DSP */
map->raddr = rsp_msg.vaddr;
/* let the client know the address to use */
req.vaddrout = rsp_msg.vaddr;
if (copy_to_user((void __user *)argp, &req, sizeof(req))) {
/* unmap the memory and release the buffer */
req_unmap.vaddr = (uintptr_t) rsp_msg.vaddr;
req_unmap.length = map->size;
fastrpc_req_mem_unmap_impl(fl, &req_unmap);
return -EFAULT;
}
return 0;
err_invoke:
fastrpc_map_put(map);
return err;
}
static long fastrpc_device_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data;
char __user *argp = (char __user *)arg;
int err;
switch (cmd) {
case FASTRPC_IOCTL_INVOKE:
err = fastrpc_invoke(fl, argp);
break;
case FASTRPC_IOCTL_INIT_ATTACH:
err = fastrpc_init_attach(fl, ROOT_PD);
break;
case FASTRPC_IOCTL_INIT_ATTACH_SNS:
err = fastrpc_init_attach(fl, SENSORS_PD);
break;
case FASTRPC_IOCTL_INIT_CREATE_STATIC:
err = fastrpc_init_create_static_process(fl, argp);
break;
case FASTRPC_IOCTL_INIT_CREATE:
err = fastrpc_init_create_process(fl, argp);
break;
case FASTRPC_IOCTL_ALLOC_DMA_BUFF:
err = fastrpc_dmabuf_alloc(fl, argp);
break;
case FASTRPC_IOCTL_MMAP:
err = fastrpc_req_mmap(fl, argp);
break;
case FASTRPC_IOCTL_MUNMAP:
err = fastrpc_req_munmap(fl, argp);
break;
case FASTRPC_IOCTL_MEM_MAP:
err = fastrpc_req_mem_map(fl, argp);
break;
case FASTRPC_IOCTL_MEM_UNMAP:
err = fastrpc_req_mem_unmap(fl, argp);
break;
case FASTRPC_IOCTL_GET_DSP_INFO:
err = fastrpc_get_dsp_info(fl, argp);
break;
default:
err = -ENOTTY;
break;
}
return err;
}
static const struct file_operations fastrpc_fops = {
.open = fastrpc_device_open,
.release = fastrpc_device_release,
.unlocked_ioctl = fastrpc_device_ioctl,
.compat_ioctl = fastrpc_device_ioctl,
};
static int fastrpc_cb_probe(struct platform_device *pdev)
{
struct fastrpc_channel_ctx *cctx;
struct fastrpc_session_ctx *sess;
struct device *dev = &pdev->dev;
int i, sessions = 0;
unsigned long flags;
int rc;
cctx = dev_get_drvdata(dev->parent);
if (!cctx)
return -EINVAL;
of_property_read_u32(dev->of_node, "qcom,nsessions", &sessions);
spin_lock_irqsave(&cctx->lock, flags);
if (cctx->sesscount >= FASTRPC_MAX_SESSIONS) {
dev_err(&pdev->dev, "too many sessions\n");
spin_unlock_irqrestore(&cctx->lock, flags);
return -ENOSPC;
}
sess = &cctx->session[cctx->sesscount++];
sess->used = false;
sess->valid = true;
sess->dev = dev;
dev_set_drvdata(dev, sess);
if (of_property_read_u32(dev->of_node, "reg", &sess->sid))
dev_info(dev, "FastRPC Session ID not specified in DT\n");
if (sessions > 0) {
struct fastrpc_session_ctx *dup_sess;
for (i = 1; i < sessions; i++) {
if (cctx->sesscount >= FASTRPC_MAX_SESSIONS)
break;
dup_sess = &cctx->session[cctx->sesscount++];
memcpy(dup_sess, sess, sizeof(*dup_sess));
}
}
spin_unlock_irqrestore(&cctx->lock, flags);
rc = dma_set_mask(dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(dev, "32-bit DMA enable failed\n");
return rc;
}
return 0;
}
static int fastrpc_cb_remove(struct platform_device *pdev)
{
struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
unsigned long flags;
int i;
spin_lock_irqsave(&cctx->lock, flags);
for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) {
if (cctx->session[i].sid == sess->sid) {
cctx->session[i].valid = false;
cctx->sesscount--;
}
}
spin_unlock_irqrestore(&cctx->lock, flags);
return 0;
}
static const struct of_device_id fastrpc_match_table[] = {
{ .compatible = "qcom,fastrpc-compute-cb", },
{}
};
static struct platform_driver fastrpc_cb_driver = {
.probe = fastrpc_cb_probe,
.remove = fastrpc_cb_remove,
.driver = {
.name = "qcom,fastrpc-cb",
.of_match_table = fastrpc_match_table,
.suppress_bind_attrs = true,
},
};
static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ctx *cctx,
bool is_secured, const char *domain)
{
struct fastrpc_device *fdev;
int err;
fdev = devm_kzalloc(dev, sizeof(*fdev), GFP_KERNEL);
if (!fdev)
return -ENOMEM;
fdev->secure = is_secured;
fdev->cctx = cctx;
fdev->miscdev.minor = MISC_DYNAMIC_MINOR;
fdev->miscdev.fops = &fastrpc_fops;
fdev->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "fastrpc-%s%s",
domain, is_secured ? "-secure" : "");
if (!fdev->miscdev.name)
return -ENOMEM;
err = misc_register(&fdev->miscdev);
if (!err) {
if (is_secured)
cctx->secure_fdevice = fdev;
else
cctx->fdevice = fdev;
}
return err;
}
static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
{
struct device *rdev = &rpdev->dev;
struct fastrpc_channel_ctx *data;
int i, err, domain_id = -1, vmcount;
const char *domain;
bool secure_dsp;
unsigned int vmids[FASTRPC_MAX_VMIDS];
err = of_property_read_string(rdev->of_node, "label", &domain);
if (err) {
dev_info(rdev, "FastRPC Domain not specified in DT\n");
return err;
}
for (i = 0; i <= CDSP_DOMAIN_ID; i++) {
if (!strcmp(domains[i], domain)) {
domain_id = i;
break;
}
}
if (domain_id < 0) {
dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
return -EINVAL;
}
if (of_reserved_mem_device_init_by_idx(rdev, rdev->of_node, 0))
dev_info(rdev, "no reserved DMA memory for FASTRPC\n");
vmcount = of_property_read_variable_u32_array(rdev->of_node,
"qcom,vmids", &vmids[0], 0, FASTRPC_MAX_VMIDS);
if (vmcount < 0)
vmcount = 0;
else if (!qcom_scm_is_available())
return -EPROBE_DEFER;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
if (vmcount) {
data->vmcount = vmcount;
data->perms = BIT(QCOM_SCM_VMID_HLOS);
for (i = 0; i < data->vmcount; i++) {
data->vmperms[i].vmid = vmids[i];
data->vmperms[i].perm = QCOM_SCM_PERM_RWX;
}
}
secure_dsp = !(of_property_read_bool(rdev->of_node, "qcom,non-secure-domain"));
data->secure = secure_dsp;
switch (domain_id) {
case ADSP_DOMAIN_ID:
case MDSP_DOMAIN_ID:
case SDSP_DOMAIN_ID:
/* Unsigned PD offloading is only supported on CDSP*/
data->unsigned_support = false;
err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
if (err)
goto fdev_error;
break;
case CDSP_DOMAIN_ID:
data->unsigned_support = true;
/* Create both device nodes so that we can allow both Signed and Unsigned PD */
err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
if (err)
goto fdev_error;
err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
if (err)
goto fdev_error;
break;
default:
err = -EINVAL;
goto fdev_error;
}
kref_init(&data->refcount);
dev_set_drvdata(&rpdev->dev, data);
rdev->dma_mask = &data->dma_mask;
dma_set_mask_and_coherent(rdev, DMA_BIT_MASK(32));
INIT_LIST_HEAD(&data->users);
INIT_LIST_HEAD(&data->invoke_interrupted_mmaps);
spin_lock_init(&data->lock);
idr_init(&data->ctx_idr);
data->domain_id = domain_id;
data->rpdev = rpdev;
err = of_platform_populate(rdev->of_node, NULL, NULL, rdev);
if (err)
goto populate_error;
return 0;
populate_error:
if (data->fdevice)
misc_deregister(&data->fdevice->miscdev);
if (data->secure_fdevice)
misc_deregister(&data->secure_fdevice->miscdev);
fdev_error:
kfree(data);
return err;
}
static void fastrpc_notify_users(struct fastrpc_user *user)
{
struct fastrpc_invoke_ctx *ctx;
spin_lock(&user->lock);
list_for_each_entry(ctx, &user->pending, node) {
ctx->retval = -EPIPE;
complete(&ctx->work);
}
spin_unlock(&user->lock);
}
static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
{
struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
struct fastrpc_buf *buf, *b;
struct fastrpc_user *user;
unsigned long flags;
/* No invocations past this point */
spin_lock_irqsave(&cctx->lock, flags);
cctx->rpdev = NULL;
list_for_each_entry(user, &cctx->users, user)
fastrpc_notify_users(user);
spin_unlock_irqrestore(&cctx->lock, flags);
if (cctx->fdevice)
misc_deregister(&cctx->fdevice->miscdev);
if (cctx->secure_fdevice)
misc_deregister(&cctx->secure_fdevice->miscdev);
list_for_each_entry_safe(buf, b, &cctx->invoke_interrupted_mmaps, node)
list_del(&buf->node);
if (cctx->remote_heap)
fastrpc_buf_free(cctx->remote_heap);
of_platform_depopulate(&rpdev->dev);
fastrpc_channel_ctx_put(cctx);
}
static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
int len, void *priv, u32 addr)
{
struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev);
struct fastrpc_invoke_rsp *rsp = data;
struct fastrpc_invoke_ctx *ctx;
unsigned long flags;
unsigned long ctxid;
if (len < sizeof(*rsp))
return -EINVAL;
ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
spin_lock_irqsave(&cctx->lock, flags);
ctx = idr_find(&cctx->ctx_idr, ctxid);
spin_unlock_irqrestore(&cctx->lock, flags);
if (!ctx) {
dev_err(&rpdev->dev, "No context ID matches response\n");
return -ENOENT;
}
ctx->retval = rsp->retval;
complete(&ctx->work);
/*
* The DMA buffer associated with the context cannot be freed in
* interrupt context so schedule it through a worker thread to
* avoid a kernel BUG.
*/
schedule_work(&ctx->put_work);
return 0;
}
static const struct of_device_id fastrpc_rpmsg_of_match[] = {
{ .compatible = "qcom,fastrpc" },
{ },
};
MODULE_DEVICE_TABLE(of, fastrpc_rpmsg_of_match);
static struct rpmsg_driver fastrpc_driver = {
.probe = fastrpc_rpmsg_probe,
.remove = fastrpc_rpmsg_remove,
.callback = fastrpc_rpmsg_callback,
.drv = {
.name = "qcom,fastrpc",
.of_match_table = fastrpc_rpmsg_of_match,
},
};
static int fastrpc_init(void)
{
int ret;
ret = platform_driver_register(&fastrpc_cb_driver);
if (ret < 0) {
pr_err("fastrpc: failed to register cb driver\n");
return ret;
}
ret = register_rpmsg_driver(&fastrpc_driver);
if (ret < 0) {
pr_err("fastrpc: failed to register rpmsg driver\n");
platform_driver_unregister(&fastrpc_cb_driver);
return ret;
}
return 0;
}
module_init(fastrpc_init);
static void fastrpc_exit(void)
{
platform_driver_unregister(&fastrpc_cb_driver);
unregister_rpmsg_driver(&fastrpc_driver);
}
module_exit(fastrpc_exit);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(DMA_BUF);
| linux-master | drivers/misc/fastrpc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Enclosure Services
*
* Copyright (C) 2008 James Bottomley <[email protected]>
*
**-----------------------------------------------------------------------------
**
**
**-----------------------------------------------------------------------------
*/
#include <linux/device.h>
#include <linux/enclosure.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
static LIST_HEAD(container_list);
static DEFINE_MUTEX(container_list_lock);
static struct class enclosure_class;
/**
* enclosure_find - find an enclosure given a parent device
* @dev: the parent to match against
* @start: Optional enclosure device to start from (NULL if none)
*
* Looks through the list of registered enclosures to find all those
* with @dev as a parent. Returns NULL if no enclosure is
* found. @start can be used as a starting point to obtain multiple
* enclosures per parent (should begin with NULL and then be set to
* each returned enclosure device). Obtains a reference to the
* enclosure class device which must be released with put_device().
* If @start is not NULL, a reference must be taken on it which is
* released before returning (this allows a loop through all
* enclosures to exit with only the reference on the enclosure of
* interest held). Note that the @dev may correspond to the actual
* device housing the enclosure, in which case no iteration via @start
* is required.
*/
struct enclosure_device *enclosure_find(struct device *dev,
struct enclosure_device *start)
{
struct enclosure_device *edev;
mutex_lock(&container_list_lock);
edev = list_prepare_entry(start, &container_list, node);
if (start)
put_device(&start->edev);
list_for_each_entry_continue(edev, &container_list, node) {
struct device *parent = edev->edev.parent;
/* parent might not be immediate, so iterate up to
* the root of the tree if necessary */
while (parent) {
if (parent == dev) {
get_device(&edev->edev);
mutex_unlock(&container_list_lock);
return edev;
}
parent = parent->parent;
}
}
mutex_unlock(&container_list_lock);
return NULL;
}
EXPORT_SYMBOL_GPL(enclosure_find);
/**
* enclosure_for_each_device - calls a function for each enclosure
* @fn: the function to call
* @data: the data to pass to each call
*
* Loops over all the enclosures calling the function.
*
* Note, this function uses a mutex which will be held across calls to
* @fn, so it must have non atomic context, and @fn may (although it
* should not) sleep or otherwise cause the mutex to be held for
* indefinite periods
*/
int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *),
void *data)
{
int error = 0;
struct enclosure_device *edev;
mutex_lock(&container_list_lock);
list_for_each_entry(edev, &container_list, node) {
error = fn(edev, data);
if (error)
break;
}
mutex_unlock(&container_list_lock);
return error;
}
EXPORT_SYMBOL_GPL(enclosure_for_each_device);
/**
* enclosure_register - register device as an enclosure
*
* @dev: device containing the enclosure
* @name: chosen device name
* @components: number of components in the enclosure
* @cb: platform call-backs
*
* This sets up the device for being an enclosure. Note that @dev does
* not have to be a dedicated enclosure device. It may be some other type
* of device that additionally responds to enclosure services
*/
struct enclosure_device *
enclosure_register(struct device *dev, const char *name, int components,
struct enclosure_component_callbacks *cb)
{
struct enclosure_device *edev =
kzalloc(struct_size(edev, component, components), GFP_KERNEL);
int err, i;
BUG_ON(!cb);
if (!edev)
return ERR_PTR(-ENOMEM);
edev->components = components;
edev->edev.class = &enclosure_class;
edev->edev.parent = get_device(dev);
edev->cb = cb;
dev_set_name(&edev->edev, "%s", name);
err = device_register(&edev->edev);
if (err)
goto err;
for (i = 0; i < components; i++) {
edev->component[i].number = -1;
edev->component[i].slot = -1;
edev->component[i].power_status = -1;
}
mutex_lock(&container_list_lock);
list_add_tail(&edev->node, &container_list);
mutex_unlock(&container_list_lock);
return edev;
err:
put_device(edev->edev.parent);
kfree(edev);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(enclosure_register);
static struct enclosure_component_callbacks enclosure_null_callbacks;
/**
* enclosure_unregister - remove an enclosure
*
* @edev: the registered enclosure to remove;
*/
void enclosure_unregister(struct enclosure_device *edev)
{
int i;
mutex_lock(&container_list_lock);
list_del(&edev->node);
mutex_unlock(&container_list_lock);
for (i = 0; i < edev->components; i++)
if (edev->component[i].number != -1)
device_unregister(&edev->component[i].cdev);
/* prevent any callbacks into service user */
edev->cb = &enclosure_null_callbacks;
device_unregister(&edev->edev);
}
EXPORT_SYMBOL_GPL(enclosure_unregister);
#define ENCLOSURE_NAME_SIZE 64
#define COMPONENT_NAME_SIZE 64
static void enclosure_link_name(struct enclosure_component *cdev, char *name)
{
strcpy(name, "enclosure_device:");
strcat(name, dev_name(&cdev->cdev));
}
static void enclosure_remove_links(struct enclosure_component *cdev)
{
char name[ENCLOSURE_NAME_SIZE];
enclosure_link_name(cdev, name);
/*
* In odd circumstances, like multipath devices, something else may
* already have removed the links, so check for this condition first.
*/
if (cdev->dev->kobj.sd)
sysfs_remove_link(&cdev->dev->kobj, name);
if (cdev->cdev.kobj.sd)
sysfs_remove_link(&cdev->cdev.kobj, "device");
}
static int enclosure_add_links(struct enclosure_component *cdev)
{
int error;
char name[ENCLOSURE_NAME_SIZE];
error = sysfs_create_link(&cdev->cdev.kobj, &cdev->dev->kobj, "device");
if (error)
return error;
enclosure_link_name(cdev, name);
error = sysfs_create_link(&cdev->dev->kobj, &cdev->cdev.kobj, name);
if (error)
sysfs_remove_link(&cdev->cdev.kobj, "device");
return error;
}
static void enclosure_release(struct device *cdev)
{
struct enclosure_device *edev = to_enclosure_device(cdev);
put_device(cdev->parent);
kfree(edev);
}
static void enclosure_component_release(struct device *dev)
{
struct enclosure_component *cdev = to_enclosure_component(dev);
if (cdev->dev) {
enclosure_remove_links(cdev);
put_device(cdev->dev);
}
put_device(dev->parent);
}
static struct enclosure_component *
enclosure_component_find_by_name(struct enclosure_device *edev,
const char *name)
{
int i;
const char *cname;
struct enclosure_component *ecomp;
if (!edev || !name || !name[0])
return NULL;
for (i = 0; i < edev->components; i++) {
ecomp = &edev->component[i];
cname = dev_name(&ecomp->cdev);
if (ecomp->number != -1 &&
cname && cname[0] &&
!strcmp(cname, name))
return ecomp;
}
return NULL;
}
static const struct attribute_group *enclosure_component_groups[];
/**
* enclosure_component_alloc - prepare a new enclosure component
* @edev: the enclosure to add the component
* @number: the device number
* @type: the type of component being added
* @name: an optional name to appear in sysfs (leave NULL if none)
*
* The name is optional for enclosures that give their components a unique
* name. If not, leave the field NULL and a name will be assigned.
*
* Returns a pointer to the enclosure component or an error.
*/
struct enclosure_component *
enclosure_component_alloc(struct enclosure_device *edev,
unsigned int number,
enum enclosure_component_type type,
const char *name)
{
struct enclosure_component *ecomp;
struct device *cdev;
int i;
char newname[COMPONENT_NAME_SIZE];
if (number >= edev->components)
return ERR_PTR(-EINVAL);
ecomp = &edev->component[number];
if (ecomp->number != -1)
return ERR_PTR(-EINVAL);
ecomp->type = type;
ecomp->number = number;
cdev = &ecomp->cdev;
cdev->parent = get_device(&edev->edev);
if (name && name[0]) {
/* Some hardware (e.g. enclosure in RX300 S6) has components
* with non unique names. Registering duplicates in sysfs
* will lead to warnings during bootup. So make the names
* unique by appending consecutive numbers -1, -2, ... */
i = 1;
snprintf(newname, COMPONENT_NAME_SIZE,
"%s", name);
while (enclosure_component_find_by_name(edev, newname))
snprintf(newname, COMPONENT_NAME_SIZE,
"%s-%i", name, i++);
dev_set_name(cdev, "%s", newname);
} else
dev_set_name(cdev, "%u", number);
cdev->release = enclosure_component_release;
cdev->groups = enclosure_component_groups;
return ecomp;
}
EXPORT_SYMBOL_GPL(enclosure_component_alloc);
/**
* enclosure_component_register - publishes an initialized enclosure component
* @ecomp: component to add
*
* Returns 0 on successful registration, releases the component otherwise
*/
int enclosure_component_register(struct enclosure_component *ecomp)
{
struct device *cdev;
int err;
cdev = &ecomp->cdev;
err = device_register(cdev);
if (err) {
ecomp->number = -1;
put_device(cdev);
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(enclosure_component_register);
/**
* enclosure_add_device - add a device as being part of an enclosure
* @edev: the enclosure device being added to.
* @component: the number of the component
* @dev: the device being added
*
* Declares a real device to reside in slot (or identifier) @num of an
* enclosure. This will cause the relevant sysfs links to appear.
* This function may also be used to change a device associated with
* an enclosure without having to call enclosure_remove_device() in
* between.
*
* Returns zero on success or an error.
*/
int enclosure_add_device(struct enclosure_device *edev, int component,
struct device *dev)
{
struct enclosure_component *cdev;
int err;
if (!edev || component >= edev->components)
return -EINVAL;
cdev = &edev->component[component];
if (cdev->dev == dev)
return -EEXIST;
if (cdev->dev) {
enclosure_remove_links(cdev);
put_device(cdev->dev);
}
cdev->dev = get_device(dev);
err = enclosure_add_links(cdev);
if (err) {
put_device(cdev->dev);
cdev->dev = NULL;
}
return err;
}
EXPORT_SYMBOL_GPL(enclosure_add_device);
/**
* enclosure_remove_device - remove a device from an enclosure
* @edev: the enclosure device
* @dev: device to remove/put
*
* Returns zero on success or an error.
*
*/
int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
{
struct enclosure_component *cdev;
int i;
if (!edev || !dev)
return -EINVAL;
for (i = 0; i < edev->components; i++) {
cdev = &edev->component[i];
if (cdev->dev == dev) {
enclosure_remove_links(cdev);
put_device(dev);
cdev->dev = NULL;
return 0;
}
}
return -ENODEV;
}
EXPORT_SYMBOL_GPL(enclosure_remove_device);
/*
* sysfs pieces below
*/
static ssize_t components_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev);
return sysfs_emit(buf, "%d\n", edev->components);
}
static DEVICE_ATTR_RO(components);
static ssize_t id_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev);
if (edev->cb->show_id)
return edev->cb->show_id(edev, buf);
return -EINVAL;
}
static DEVICE_ATTR_RO(id);
static struct attribute *enclosure_class_attrs[] = {
&dev_attr_components.attr,
&dev_attr_id.attr,
NULL,
};
ATTRIBUTE_GROUPS(enclosure_class);
static struct class enclosure_class = {
.name = "enclosure",
.dev_release = enclosure_release,
.dev_groups = enclosure_class_groups,
};
static const char *const enclosure_status[] = {
[ENCLOSURE_STATUS_UNSUPPORTED] = "unsupported",
[ENCLOSURE_STATUS_OK] = "OK",
[ENCLOSURE_STATUS_CRITICAL] = "critical",
[ENCLOSURE_STATUS_NON_CRITICAL] = "non-critical",
[ENCLOSURE_STATUS_UNRECOVERABLE] = "unrecoverable",
[ENCLOSURE_STATUS_NOT_INSTALLED] = "not installed",
[ENCLOSURE_STATUS_UNKNOWN] = "unknown",
[ENCLOSURE_STATUS_UNAVAILABLE] = "unavailable",
[ENCLOSURE_STATUS_MAX] = NULL,
};
static const char *const enclosure_type[] = {
[ENCLOSURE_COMPONENT_DEVICE] = "device",
[ENCLOSURE_COMPONENT_ARRAY_DEVICE] = "array device",
};
static ssize_t get_component_fault(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
if (edev->cb->get_fault)
edev->cb->get_fault(edev, ecomp);
return sysfs_emit(buf, "%d\n", ecomp->fault);
}
static ssize_t set_component_fault(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
int val = simple_strtoul(buf, NULL, 0);
if (edev->cb->set_fault)
edev->cb->set_fault(edev, ecomp, val);
return count;
}
static ssize_t get_component_status(struct device *cdev,
struct device_attribute *attr,char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
if (edev->cb->get_status)
edev->cb->get_status(edev, ecomp);
return sysfs_emit(buf, "%s\n", enclosure_status[ecomp->status]);
}
static ssize_t set_component_status(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
int i;
for (i = 0; enclosure_status[i]; i++) {
if (strncmp(buf, enclosure_status[i],
strlen(enclosure_status[i])) == 0 &&
(buf[strlen(enclosure_status[i])] == '\n' ||
buf[strlen(enclosure_status[i])] == '\0'))
break;
}
if (enclosure_status[i] && edev->cb->set_status) {
edev->cb->set_status(edev, ecomp, i);
return count;
} else
return -EINVAL;
}
static ssize_t get_component_active(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
if (edev->cb->get_active)
edev->cb->get_active(edev, ecomp);
return sysfs_emit(buf, "%d\n", ecomp->active);
}
static ssize_t set_component_active(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
int val = simple_strtoul(buf, NULL, 0);
if (edev->cb->set_active)
edev->cb->set_active(edev, ecomp, val);
return count;
}
static ssize_t get_component_locate(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
if (edev->cb->get_locate)
edev->cb->get_locate(edev, ecomp);
return sysfs_emit(buf, "%d\n", ecomp->locate);
}
static ssize_t set_component_locate(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
int val = simple_strtoul(buf, NULL, 0);
if (edev->cb->set_locate)
edev->cb->set_locate(edev, ecomp, val);
return count;
}
static ssize_t get_component_power_status(struct device *cdev,
struct device_attribute *attr,
char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
if (edev->cb->get_power_status)
edev->cb->get_power_status(edev, ecomp);
/* If still uninitialized, the callback failed or does not exist. */
if (ecomp->power_status == -1)
return (edev->cb->get_power_status) ? -EIO : -ENOTTY;
return sysfs_emit(buf, "%s\n", ecomp->power_status ? "on" : "off");
}
static ssize_t set_component_power_status(struct device *cdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct enclosure_device *edev = to_enclosure_device(cdev->parent);
struct enclosure_component *ecomp = to_enclosure_component(cdev);
int val;
if (strncmp(buf, "on", 2) == 0 &&
(buf[2] == '\n' || buf[2] == '\0'))
val = 1;
else if (strncmp(buf, "off", 3) == 0 &&
(buf[3] == '\n' || buf[3] == '\0'))
val = 0;
else
return -EINVAL;
if (edev->cb->set_power_status)
edev->cb->set_power_status(edev, ecomp, val);
return count;
}
static ssize_t get_component_type(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct enclosure_component *ecomp = to_enclosure_component(cdev);
return sysfs_emit(buf, "%s\n", enclosure_type[ecomp->type]);
}
static ssize_t get_component_slot(struct device *cdev,
struct device_attribute *attr, char *buf)
{
struct enclosure_component *ecomp = to_enclosure_component(cdev);
int slot;
/* if the enclosure does not override then use 'number' as a stand-in */
if (ecomp->slot >= 0)
slot = ecomp->slot;
else
slot = ecomp->number;
return sysfs_emit(buf, "%d\n", slot);
}
static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
set_component_fault);
static DEVICE_ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
set_component_status);
static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
set_component_active);
static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
set_component_locate);
static DEVICE_ATTR(power_status, S_IRUGO | S_IWUSR, get_component_power_status,
set_component_power_status);
static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL);
static DEVICE_ATTR(slot, S_IRUGO, get_component_slot, NULL);
static struct attribute *enclosure_component_attrs[] = {
&dev_attr_fault.attr,
&dev_attr_status.attr,
&dev_attr_active.attr,
&dev_attr_locate.attr,
&dev_attr_power_status.attr,
&dev_attr_type.attr,
&dev_attr_slot.attr,
NULL
};
ATTRIBUTE_GROUPS(enclosure_component);
static int __init enclosure_init(void)
{
return class_register(&enclosure_class);
}
static void __exit enclosure_exit(void)
{
class_unregister(&enclosure_class);
}
module_init(enclosure_init);
module_exit(enclosure_exit);
MODULE_AUTHOR("James Bottomley");
MODULE_DESCRIPTION("Enclosure Services");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/enclosure.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012 Stefan Roese <[email protected]>
*/
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <asm/unaligned.h>
#define FIRMWARE_NAME "lattice-ecp3.bit"
/*
* The JTAG ID's of the supported FPGA's. The ID is 32bit wide
* reversed as noted in the manual.
*/
#define ID_ECP3_17 0xc2088080
#define ID_ECP3_35 0xc2048080
/* FPGA commands */
#define FPGA_CMD_READ_ID 0x07 /* plus 24 bits */
#define FPGA_CMD_READ_STATUS 0x09 /* plus 24 bits */
#define FPGA_CMD_CLEAR 0x70
#define FPGA_CMD_REFRESH 0x71
#define FPGA_CMD_WRITE_EN 0x4a /* plus 2 bits */
#define FPGA_CMD_WRITE_DIS 0x4f /* plus 8 bits */
#define FPGA_CMD_WRITE_INC 0x41 /* plus 0 bits */
/*
* The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf
* (LatticeECP3 Slave SPI Port User's Guide)
*/
#define FPGA_STATUS_DONE 0x00004000
#define FPGA_STATUS_CLEARED 0x00010000
#define FPGA_CLEAR_TIMEOUT 5000 /* max. 5000ms for FPGA clear */
#define FPGA_CLEAR_MSLEEP 10
#define FPGA_CLEAR_LOOP_COUNT (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP)
struct fpga_data {
struct completion fw_loaded;
};
struct ecp3_dev {
u32 jedec_id;
char *name;
};
static const struct ecp3_dev ecp3_dev[] = {
{
.jedec_id = ID_ECP3_17,
.name = "Lattice ECP3-17",
},
{
.jedec_id = ID_ECP3_35,
.name = "Lattice ECP3-35",
},
};
static void firmware_load(const struct firmware *fw, void *context)
{
struct spi_device *spi = (struct spi_device *)context;
struct fpga_data *data = spi_get_drvdata(spi);
u8 *buffer;
u8 txbuf[8];
u8 rxbuf[8];
int rx_len = 8;
int i;
u32 jedec_id;
u32 status;
if (fw == NULL) {
dev_err(&spi->dev, "Cannot load firmware, aborting\n");
goto out;
}
if (fw->size == 0) {
dev_err(&spi->dev, "Error: Firmware size is 0!\n");
goto out;
}
/* Fill dummy data (24 stuffing bits for commands) */
txbuf[1] = 0x00;
txbuf[2] = 0x00;
txbuf[3] = 0x00;
/* Trying to speak with the FPGA via SPI... */
txbuf[0] = FPGA_CMD_READ_ID;
spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
jedec_id = get_unaligned_be32(&rxbuf[4]);
dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", jedec_id);
for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) {
if (jedec_id == ecp3_dev[i].jedec_id)
break;
}
if (i == ARRAY_SIZE(ecp3_dev)) {
dev_err(&spi->dev,
"Error: No supported FPGA detected (JEDEC_ID=%08x)!\n",
jedec_id);
goto out;
}
dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
txbuf[0] = FPGA_CMD_READ_STATUS;
spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = get_unaligned_be32(&rxbuf[4]);
dev_dbg(&spi->dev, "FPGA Status=%08x\n", status);
buffer = kzalloc(fw->size + 8, GFP_KERNEL);
if (!buffer) {
dev_err(&spi->dev, "Error: Can't allocate memory!\n");
goto out;
}
/*
* Insert WRITE_INC command into stream (one SPI frame)
*/
buffer[0] = FPGA_CMD_WRITE_INC;
buffer[1] = 0xff;
buffer[2] = 0xff;
buffer[3] = 0xff;
memcpy(buffer + 4, fw->data, fw->size);
txbuf[0] = FPGA_CMD_REFRESH;
spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_WRITE_EN;
spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_CLEAR;
spi_write(spi, txbuf, 4);
/*
* Wait for FPGA memory to become cleared
*/
for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) {
txbuf[0] = FPGA_CMD_READ_STATUS;
spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = get_unaligned_be32(&rxbuf[4]);
if (status == FPGA_STATUS_CLEARED)
break;
msleep(FPGA_CLEAR_MSLEEP);
}
if (i == FPGA_CLEAR_LOOP_COUNT) {
dev_err(&spi->dev,
"Error: Timeout waiting for FPGA to clear (status=%08x)!\n",
status);
kfree(buffer);
goto out;
}
dev_info(&spi->dev, "Configuring the FPGA...\n");
spi_write(spi, buffer, fw->size + 8);
txbuf[0] = FPGA_CMD_WRITE_DIS;
spi_write(spi, txbuf, 4);
txbuf[0] = FPGA_CMD_READ_STATUS;
spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len);
status = get_unaligned_be32(&rxbuf[4]);
dev_dbg(&spi->dev, "FPGA Status=%08x\n", status);
/* Check result */
if (status & FPGA_STATUS_DONE)
dev_info(&spi->dev, "FPGA successfully configured!\n");
else
dev_info(&spi->dev, "FPGA not configured (DONE not set)\n");
/*
* Don't forget to release the firmware again
*/
release_firmware(fw);
kfree(buffer);
out:
complete(&data->fw_loaded);
}
static int lattice_ecp3_probe(struct spi_device *spi)
{
struct fpga_data *data;
int err;
data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL);
if (!data) {
dev_err(&spi->dev, "Memory allocation for fpga_data failed\n");
return -ENOMEM;
}
spi_set_drvdata(spi, data);
init_completion(&data->fw_loaded);
err = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
FIRMWARE_NAME, &spi->dev,
GFP_KERNEL, spi, firmware_load);
if (err) {
dev_err(&spi->dev, "Firmware loading failed with %d!\n", err);
return err;
}
dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n");
return 0;
}
static void lattice_ecp3_remove(struct spi_device *spi)
{
struct fpga_data *data = spi_get_drvdata(spi);
wait_for_completion(&data->fw_loaded);
}
static const struct spi_device_id lattice_ecp3_id[] = {
{ "ecp3-17", 0 },
{ "ecp3-35", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, lattice_ecp3_id);
static struct spi_driver lattice_ecp3_driver = {
.driver = {
.name = "lattice-ecp3",
},
.probe = lattice_ecp3_probe,
.remove = lattice_ecp3_remove,
.id_table = lattice_ecp3_id,
};
module_spi_driver(lattice_ecp3_driver);
MODULE_AUTHOR("Stefan Roese <[email protected]>");
MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_NAME);
| linux-master | drivers/misc/lattice-ecp3-config.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/string.h>
#include <linux/pci.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/if_ether.h>
#include <linux/ctype.h>
#include <linux/dmi.h>
#include <linux/of.h>
#define PHUB_STATUS 0x00 /* Status Register offset */
#define PHUB_CONTROL 0x04 /* Control Register offset */
#define PHUB_TIMEOUT 0x05 /* Time out value for Status Register */
#define PCH_PHUB_ROM_WRITE_ENABLE 0x01 /* Enabling for writing ROM */
#define PCH_PHUB_ROM_WRITE_DISABLE 0x00 /* Disabling for writing ROM */
#define PCH_PHUB_MAC_START_ADDR_EG20T 0x14 /* MAC data area start address
offset */
#define PCH_PHUB_MAC_START_ADDR_ML7223 0x20C /* MAC data area start address
offset */
#define PCH_PHUB_ROM_START_ADDR_EG20T 0x80 /* ROM data area start address offset
(Intel EG20T PCH)*/
#define PCH_PHUB_ROM_START_ADDR_ML7213 0x400 /* ROM data area start address
offset(LAPIS Semicon ML7213)
*/
#define PCH_PHUB_ROM_START_ADDR_ML7223 0x400 /* ROM data area start address
offset(LAPIS Semicon ML7223)
*/
/* MAX number of INT_REDUCE_CONTROL registers */
#define MAX_NUM_INT_REDUCE_CONTROL_REG 128
#define PCI_DEVICE_ID_PCH1_PHUB 0x8801
#define PCH_MINOR_NOS 1
#define CLKCFG_CAN_50MHZ 0x12000000
#define CLKCFG_CANCLK_MASK 0xFF000000
#define CLKCFG_UART_MASK 0xFFFFFF
/* CM-iTC */
#define CLKCFG_UART_48MHZ (1 << 16)
#define CLKCFG_UART_25MHZ (2 << 16)
#define CLKCFG_BAUDDIV (2 << 20)
#define CLKCFG_PLL2VCO (8 << 9)
#define CLKCFG_UARTCLKSEL (1 << 18)
/* Macros for ML7213 */
#define PCI_DEVICE_ID_ROHM_ML7213_PHUB 0x801A
/* Macros for ML7223 */
#define PCI_DEVICE_ID_ROHM_ML7223_mPHUB 0x8012 /* for Bus-m */
#define PCI_DEVICE_ID_ROHM_ML7223_nPHUB 0x8002 /* for Bus-n */
/* Macros for ML7831 */
#define PCI_DEVICE_ID_ROHM_ML7831_PHUB 0x8801
/* SROM ACCESS Macro */
#define PCH_WORD_ADDR_MASK (~((1 << 2) - 1))
/* Registers address offset */
#define PCH_PHUB_ID_REG 0x0000
#define PCH_PHUB_QUEUE_PRI_VAL_REG 0x0004
#define PCH_PHUB_RC_QUEUE_MAXSIZE_REG 0x0008
#define PCH_PHUB_BRI_QUEUE_MAXSIZE_REG 0x000C
#define PCH_PHUB_COMP_RESP_TIMEOUT_REG 0x0010
#define PCH_PHUB_BUS_SLAVE_CONTROL_REG 0x0014
#define PCH_PHUB_DEADLOCK_AVOID_TYPE_REG 0x0018
#define PCH_PHUB_INTPIN_REG_WPERMIT_REG0 0x0020
#define PCH_PHUB_INTPIN_REG_WPERMIT_REG1 0x0024
#define PCH_PHUB_INTPIN_REG_WPERMIT_REG2 0x0028
#define PCH_PHUB_INTPIN_REG_WPERMIT_REG3 0x002C
#define PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE 0x0040
#define CLKCFG_REG_OFFSET 0x500
#define FUNCSEL_REG_OFFSET 0x508
#define PCH_PHUB_OROM_SIZE 15360
/**
* struct pch_phub_reg - PHUB register structure
* @phub_id_reg: PHUB_ID register val
* @q_pri_val_reg: QUEUE_PRI_VAL register val
* @rc_q_maxsize_reg: RC_QUEUE_MAXSIZE register val
* @bri_q_maxsize_reg: BRI_QUEUE_MAXSIZE register val
* @comp_resp_timeout_reg: COMP_RESP_TIMEOUT register val
* @bus_slave_control_reg: BUS_SLAVE_CONTROL_REG register val
* @deadlock_avoid_type_reg: DEADLOCK_AVOID_TYPE register val
* @intpin_reg_wpermit_reg0: INTPIN_REG_WPERMIT register 0 val
* @intpin_reg_wpermit_reg1: INTPIN_REG_WPERMIT register 1 val
* @intpin_reg_wpermit_reg2: INTPIN_REG_WPERMIT register 2 val
* @intpin_reg_wpermit_reg3: INTPIN_REG_WPERMIT register 3 val
* @int_reduce_control_reg: INT_REDUCE_CONTROL registers val
* @clkcfg_reg: CLK CFG register val
* @funcsel_reg: Function select register value
* @pch_phub_base_address: Register base address
* @pch_phub_extrom_base_address: external rom base address
* @pch_mac_start_address: MAC address area start address
* @pch_opt_rom_start_address: Option ROM start address
* @ioh_type: Save IOH type
* @pdev: pointer to pci device struct
*/
struct pch_phub_reg {
u32 phub_id_reg;
u32 q_pri_val_reg;
u32 rc_q_maxsize_reg;
u32 bri_q_maxsize_reg;
u32 comp_resp_timeout_reg;
u32 bus_slave_control_reg;
u32 deadlock_avoid_type_reg;
u32 intpin_reg_wpermit_reg0;
u32 intpin_reg_wpermit_reg1;
u32 intpin_reg_wpermit_reg2;
u32 intpin_reg_wpermit_reg3;
u32 int_reduce_control_reg[MAX_NUM_INT_REDUCE_CONTROL_REG];
u32 clkcfg_reg;
u32 funcsel_reg;
void __iomem *pch_phub_base_address;
void __iomem *pch_phub_extrom_base_address;
u32 pch_mac_start_address;
u32 pch_opt_rom_start_address;
int ioh_type;
struct pci_dev *pdev;
};
/* SROM SPEC for MAC address assignment offset */
static const int pch_phub_mac_offset[ETH_ALEN] = {0x3, 0x2, 0x1, 0x0, 0xb, 0xa};
static DEFINE_MUTEX(pch_phub_mutex);
/**
* pch_phub_read_modify_write_reg() - Reading modifying and writing register
* @chip: Pointer to the PHUB register structure
* @reg_addr_offset: Register offset address value.
* @data: Writing value.
* @mask: Mask value.
*/
static void pch_phub_read_modify_write_reg(struct pch_phub_reg *chip,
unsigned int reg_addr_offset,
unsigned int data, unsigned int mask)
{
void __iomem *reg_addr = chip->pch_phub_base_address + reg_addr_offset;
iowrite32(((ioread32(reg_addr) & ~mask)) | data, reg_addr);
}
/* pch_phub_save_reg_conf - saves register configuration */
static void __maybe_unused pch_phub_save_reg_conf(struct pci_dev *pdev)
{
unsigned int i;
struct pch_phub_reg *chip = pci_get_drvdata(pdev);
void __iomem *p = chip->pch_phub_base_address;
chip->phub_id_reg = ioread32(p + PCH_PHUB_ID_REG);
chip->q_pri_val_reg = ioread32(p + PCH_PHUB_QUEUE_PRI_VAL_REG);
chip->rc_q_maxsize_reg = ioread32(p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG);
chip->bri_q_maxsize_reg = ioread32(p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG);
chip->comp_resp_timeout_reg =
ioread32(p + PCH_PHUB_COMP_RESP_TIMEOUT_REG);
chip->bus_slave_control_reg =
ioread32(p + PCH_PHUB_BUS_SLAVE_CONTROL_REG);
chip->deadlock_avoid_type_reg =
ioread32(p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG);
chip->intpin_reg_wpermit_reg0 =
ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0);
chip->intpin_reg_wpermit_reg1 =
ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1);
chip->intpin_reg_wpermit_reg2 =
ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2);
chip->intpin_reg_wpermit_reg3 =
ioread32(p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3);
dev_dbg(&pdev->dev, "%s : "
"chip->phub_id_reg=%x, "
"chip->q_pri_val_reg=%x, "
"chip->rc_q_maxsize_reg=%x, "
"chip->bri_q_maxsize_reg=%x, "
"chip->comp_resp_timeout_reg=%x, "
"chip->bus_slave_control_reg=%x, "
"chip->deadlock_avoid_type_reg=%x, "
"chip->intpin_reg_wpermit_reg0=%x, "
"chip->intpin_reg_wpermit_reg1=%x, "
"chip->intpin_reg_wpermit_reg2=%x, "
"chip->intpin_reg_wpermit_reg3=%x\n", __func__,
chip->phub_id_reg,
chip->q_pri_val_reg,
chip->rc_q_maxsize_reg,
chip->bri_q_maxsize_reg,
chip->comp_resp_timeout_reg,
chip->bus_slave_control_reg,
chip->deadlock_avoid_type_reg,
chip->intpin_reg_wpermit_reg0,
chip->intpin_reg_wpermit_reg1,
chip->intpin_reg_wpermit_reg2,
chip->intpin_reg_wpermit_reg3);
for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) {
chip->int_reduce_control_reg[i] =
ioread32(p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i);
dev_dbg(&pdev->dev, "%s : "
"chip->int_reduce_control_reg[%d]=%x\n",
__func__, i, chip->int_reduce_control_reg[i]);
}
chip->clkcfg_reg = ioread32(p + CLKCFG_REG_OFFSET);
if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
chip->funcsel_reg = ioread32(p + FUNCSEL_REG_OFFSET);
}
/* pch_phub_restore_reg_conf - restore register configuration */
static void __maybe_unused pch_phub_restore_reg_conf(struct pci_dev *pdev)
{
unsigned int i;
struct pch_phub_reg *chip = pci_get_drvdata(pdev);
void __iomem *p;
p = chip->pch_phub_base_address;
iowrite32(chip->phub_id_reg, p + PCH_PHUB_ID_REG);
iowrite32(chip->q_pri_val_reg, p + PCH_PHUB_QUEUE_PRI_VAL_REG);
iowrite32(chip->rc_q_maxsize_reg, p + PCH_PHUB_RC_QUEUE_MAXSIZE_REG);
iowrite32(chip->bri_q_maxsize_reg, p + PCH_PHUB_BRI_QUEUE_MAXSIZE_REG);
iowrite32(chip->comp_resp_timeout_reg,
p + PCH_PHUB_COMP_RESP_TIMEOUT_REG);
iowrite32(chip->bus_slave_control_reg,
p + PCH_PHUB_BUS_SLAVE_CONTROL_REG);
iowrite32(chip->deadlock_avoid_type_reg,
p + PCH_PHUB_DEADLOCK_AVOID_TYPE_REG);
iowrite32(chip->intpin_reg_wpermit_reg0,
p + PCH_PHUB_INTPIN_REG_WPERMIT_REG0);
iowrite32(chip->intpin_reg_wpermit_reg1,
p + PCH_PHUB_INTPIN_REG_WPERMIT_REG1);
iowrite32(chip->intpin_reg_wpermit_reg2,
p + PCH_PHUB_INTPIN_REG_WPERMIT_REG2);
iowrite32(chip->intpin_reg_wpermit_reg3,
p + PCH_PHUB_INTPIN_REG_WPERMIT_REG3);
dev_dbg(&pdev->dev, "%s : "
"chip->phub_id_reg=%x, "
"chip->q_pri_val_reg=%x, "
"chip->rc_q_maxsize_reg=%x, "
"chip->bri_q_maxsize_reg=%x, "
"chip->comp_resp_timeout_reg=%x, "
"chip->bus_slave_control_reg=%x, "
"chip->deadlock_avoid_type_reg=%x, "
"chip->intpin_reg_wpermit_reg0=%x, "
"chip->intpin_reg_wpermit_reg1=%x, "
"chip->intpin_reg_wpermit_reg2=%x, "
"chip->intpin_reg_wpermit_reg3=%x\n", __func__,
chip->phub_id_reg,
chip->q_pri_val_reg,
chip->rc_q_maxsize_reg,
chip->bri_q_maxsize_reg,
chip->comp_resp_timeout_reg,
chip->bus_slave_control_reg,
chip->deadlock_avoid_type_reg,
chip->intpin_reg_wpermit_reg0,
chip->intpin_reg_wpermit_reg1,
chip->intpin_reg_wpermit_reg2,
chip->intpin_reg_wpermit_reg3);
for (i = 0; i < MAX_NUM_INT_REDUCE_CONTROL_REG; i++) {
iowrite32(chip->int_reduce_control_reg[i],
p + PCH_PHUB_INT_REDUCE_CONTROL_REG_BASE + 4 * i);
dev_dbg(&pdev->dev, "%s : "
"chip->int_reduce_control_reg[%d]=%x\n",
__func__, i, chip->int_reduce_control_reg[i]);
}
iowrite32(chip->clkcfg_reg, p + CLKCFG_REG_OFFSET);
if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET);
}
/**
* pch_phub_read_serial_rom() - Reading Serial ROM
* @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM offset address to read.
* @data: Read buffer for specified Serial ROM value.
*/
static void pch_phub_read_serial_rom(struct pch_phub_reg *chip,
unsigned int offset_address, u8 *data)
{
void __iomem *mem_addr = chip->pch_phub_extrom_base_address +
offset_address;
*data = ioread8(mem_addr);
}
/**
* pch_phub_write_serial_rom() - Writing Serial ROM
* @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM offset address.
* @data: Serial ROM value to write.
*/
static int pch_phub_write_serial_rom(struct pch_phub_reg *chip,
unsigned int offset_address, u8 data)
{
void __iomem *mem_addr = chip->pch_phub_extrom_base_address +
(offset_address & PCH_WORD_ADDR_MASK);
int i;
unsigned int word_data;
unsigned int pos;
unsigned int mask;
pos = (offset_address % 4) * 8;
mask = ~(0xFF << pos);
iowrite32(PCH_PHUB_ROM_WRITE_ENABLE,
chip->pch_phub_extrom_base_address + PHUB_CONTROL);
word_data = ioread32(mem_addr);
iowrite32((word_data & mask) | (u32)data << pos, mem_addr);
i = 0;
while (ioread8(chip->pch_phub_extrom_base_address +
PHUB_STATUS) != 0x00) {
msleep(1);
if (i == PHUB_TIMEOUT)
return -ETIMEDOUT;
i++;
}
iowrite32(PCH_PHUB_ROM_WRITE_DISABLE,
chip->pch_phub_extrom_base_address + PHUB_CONTROL);
return 0;
}
/**
* pch_phub_read_serial_rom_val() - Read Serial ROM value
* @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM address offset value.
* @data: Serial ROM value to read.
*/
static void pch_phub_read_serial_rom_val(struct pch_phub_reg *chip,
unsigned int offset_address, u8 *data)
{
unsigned int mem_addr;
mem_addr = chip->pch_mac_start_address +
pch_phub_mac_offset[offset_address];
pch_phub_read_serial_rom(chip, mem_addr, data);
}
/**
* pch_phub_write_serial_rom_val() - writing Serial ROM value
* @chip: Pointer to the PHUB register structure
* @offset_address: Serial ROM address offset value.
* @data: Serial ROM value.
*/
static int pch_phub_write_serial_rom_val(struct pch_phub_reg *chip,
unsigned int offset_address, u8 data)
{
int retval;
unsigned int mem_addr;
mem_addr = chip->pch_mac_start_address +
pch_phub_mac_offset[offset_address];
retval = pch_phub_write_serial_rom(chip, mem_addr, data);
return retval;
}
/* pch_phub_gbe_serial_rom_conf - makes Serial ROM header format configuration
* for Gigabit Ethernet MAC address
*/
static int pch_phub_gbe_serial_rom_conf(struct pch_phub_reg *chip)
{
int retval;
retval = pch_phub_write_serial_rom(chip, 0x0b, 0xbc);
retval |= pch_phub_write_serial_rom(chip, 0x0a, 0x10);
retval |= pch_phub_write_serial_rom(chip, 0x09, 0x01);
retval |= pch_phub_write_serial_rom(chip, 0x08, 0x02);
retval |= pch_phub_write_serial_rom(chip, 0x0f, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x0e, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x0d, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x0c, 0x80);
retval |= pch_phub_write_serial_rom(chip, 0x13, 0xbc);
retval |= pch_phub_write_serial_rom(chip, 0x12, 0x10);
retval |= pch_phub_write_serial_rom(chip, 0x11, 0x01);
retval |= pch_phub_write_serial_rom(chip, 0x10, 0x18);
retval |= pch_phub_write_serial_rom(chip, 0x1b, 0xbc);
retval |= pch_phub_write_serial_rom(chip, 0x1a, 0x10);
retval |= pch_phub_write_serial_rom(chip, 0x19, 0x01);
retval |= pch_phub_write_serial_rom(chip, 0x18, 0x19);
retval |= pch_phub_write_serial_rom(chip, 0x23, 0xbc);
retval |= pch_phub_write_serial_rom(chip, 0x22, 0x10);
retval |= pch_phub_write_serial_rom(chip, 0x21, 0x01);
retval |= pch_phub_write_serial_rom(chip, 0x20, 0x3a);
retval |= pch_phub_write_serial_rom(chip, 0x27, 0x01);
retval |= pch_phub_write_serial_rom(chip, 0x26, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x25, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x24, 0x00);
return retval;
}
/* pch_phub_gbe_serial_rom_conf_mp - makes SerialROM header format configuration
* for Gigabit Ethernet MAC address
*/
static int pch_phub_gbe_serial_rom_conf_mp(struct pch_phub_reg *chip)
{
int retval;
u32 offset_addr;
offset_addr = 0x200;
retval = pch_phub_write_serial_rom(chip, 0x03 + offset_addr, 0xbc);
retval |= pch_phub_write_serial_rom(chip, 0x02 + offset_addr, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x01 + offset_addr, 0x40);
retval |= pch_phub_write_serial_rom(chip, 0x00 + offset_addr, 0x02);
retval |= pch_phub_write_serial_rom(chip, 0x07 + offset_addr, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x06 + offset_addr, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x05 + offset_addr, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x04 + offset_addr, 0x80);
retval |= pch_phub_write_serial_rom(chip, 0x0b + offset_addr, 0xbc);
retval |= pch_phub_write_serial_rom(chip, 0x0a + offset_addr, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x09 + offset_addr, 0x40);
retval |= pch_phub_write_serial_rom(chip, 0x08 + offset_addr, 0x18);
retval |= pch_phub_write_serial_rom(chip, 0x13 + offset_addr, 0xbc);
retval |= pch_phub_write_serial_rom(chip, 0x12 + offset_addr, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x11 + offset_addr, 0x40);
retval |= pch_phub_write_serial_rom(chip, 0x10 + offset_addr, 0x19);
retval |= pch_phub_write_serial_rom(chip, 0x1b + offset_addr, 0xbc);
retval |= pch_phub_write_serial_rom(chip, 0x1a + offset_addr, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x19 + offset_addr, 0x40);
retval |= pch_phub_write_serial_rom(chip, 0x18 + offset_addr, 0x3a);
retval |= pch_phub_write_serial_rom(chip, 0x1f + offset_addr, 0x01);
retval |= pch_phub_write_serial_rom(chip, 0x1e + offset_addr, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x1d + offset_addr, 0x00);
retval |= pch_phub_write_serial_rom(chip, 0x1c + offset_addr, 0x00);
return retval;
}
/**
* pch_phub_read_gbe_mac_addr() - Read Gigabit Ethernet MAC address
* @chip: Pointer to the PHUB register structure
* @data: Buffer of the Gigabit Ethernet MAC address value.
*/
static void pch_phub_read_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
{
int i;
for (i = 0; i < ETH_ALEN; i++)
pch_phub_read_serial_rom_val(chip, i, &data[i]);
}
/**
* pch_phub_write_gbe_mac_addr() - Write MAC address
* @chip: Pointer to the PHUB register structure
* @data: Gigabit Ethernet MAC address value.
*/
static int pch_phub_write_gbe_mac_addr(struct pch_phub_reg *chip, u8 *data)
{
int retval;
int i;
if ((chip->ioh_type == 1) || (chip->ioh_type == 5)) /* EG20T or ML7831*/
retval = pch_phub_gbe_serial_rom_conf(chip);
else /* ML7223 */
retval = pch_phub_gbe_serial_rom_conf_mp(chip);
if (retval)
return retval;
for (i = 0; i < ETH_ALEN; i++) {
retval = pch_phub_write_serial_rom_val(chip, i, data[i]);
if (retval)
return retval;
}
return retval;
}
static ssize_t pch_phub_bin_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
{
unsigned int rom_signature;
unsigned char rom_length;
unsigned int tmp;
unsigned int addr_offset;
unsigned int orom_size;
int ret;
int err;
ssize_t rom_size;
struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj));
ret = mutex_lock_interruptible(&pch_phub_mutex);
if (ret) {
err = -ERESTARTSYS;
goto return_err_nomutex;
}
/* Get Rom signature */
chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
if (!chip->pch_phub_extrom_base_address) {
err = -ENODATA;
goto exrom_map_err;
}
pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address,
(unsigned char *)&rom_signature);
rom_signature &= 0xff;
pch_phub_read_serial_rom(chip, chip->pch_opt_rom_start_address + 1,
(unsigned char *)&tmp);
rom_signature |= (tmp & 0xff) << 8;
if (rom_signature == 0xAA55) {
pch_phub_read_serial_rom(chip,
chip->pch_opt_rom_start_address + 2,
&rom_length);
orom_size = rom_length * 512;
if (orom_size < off) {
addr_offset = 0;
goto return_ok;
}
if (orom_size < count) {
addr_offset = 0;
goto return_ok;
}
for (addr_offset = 0; addr_offset < count; addr_offset++) {
pch_phub_read_serial_rom(chip,
chip->pch_opt_rom_start_address + addr_offset + off,
&buf[addr_offset]);
}
} else {
err = -ENODATA;
goto return_err;
}
return_ok:
pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
mutex_unlock(&pch_phub_mutex);
return addr_offset;
return_err:
pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
exrom_map_err:
mutex_unlock(&pch_phub_mutex);
return_err_nomutex:
return err;
}
static ssize_t pch_phub_bin_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
int err;
unsigned int addr_offset;
int ret;
ssize_t rom_size;
struct pch_phub_reg *chip = dev_get_drvdata(kobj_to_dev(kobj));
ret = mutex_lock_interruptible(&pch_phub_mutex);
if (ret)
return -ERESTARTSYS;
if (off > PCH_PHUB_OROM_SIZE) {
addr_offset = 0;
goto return_ok;
}
if (count > PCH_PHUB_OROM_SIZE) {
addr_offset = 0;
goto return_ok;
}
chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
if (!chip->pch_phub_extrom_base_address) {
err = -ENOMEM;
goto exrom_map_err;
}
for (addr_offset = 0; addr_offset < count; addr_offset++) {
if (PCH_PHUB_OROM_SIZE < off + addr_offset)
goto return_ok;
ret = pch_phub_write_serial_rom(chip,
chip->pch_opt_rom_start_address + addr_offset + off,
buf[addr_offset]);
if (ret) {
err = ret;
goto return_err;
}
}
return_ok:
pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
mutex_unlock(&pch_phub_mutex);
return addr_offset;
return_err:
pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
exrom_map_err:
mutex_unlock(&pch_phub_mutex);
return err;
}
static ssize_t show_pch_mac(struct device *dev, struct device_attribute *attr,
char *buf)
{
u8 mac[8];
struct pch_phub_reg *chip = dev_get_drvdata(dev);
ssize_t rom_size;
chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
if (!chip->pch_phub_extrom_base_address)
return -ENOMEM;
pch_phub_read_gbe_mac_addr(chip, mac);
pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
return sprintf(buf, "%pM\n", mac);
}
static ssize_t store_pch_mac(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u8 mac[ETH_ALEN];
ssize_t rom_size;
struct pch_phub_reg *chip = dev_get_drvdata(dev);
int ret;
if (!mac_pton(buf, mac))
return -EINVAL;
chip->pch_phub_extrom_base_address = pci_map_rom(chip->pdev, &rom_size);
if (!chip->pch_phub_extrom_base_address)
return -ENOMEM;
ret = pch_phub_write_gbe_mac_addr(chip, mac);
pci_unmap_rom(chip->pdev, chip->pch_phub_extrom_base_address);
if (ret)
return ret;
return count;
}
static DEVICE_ATTR(pch_mac, S_IRUGO | S_IWUSR, show_pch_mac, store_pch_mac);
static const struct bin_attribute pch_bin_attr = {
.attr = {
.name = "pch_firmware",
.mode = S_IRUGO | S_IWUSR,
},
.size = PCH_PHUB_OROM_SIZE + 1,
.read = pch_phub_bin_read,
.write = pch_phub_bin_write,
};
static int pch_phub_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
int ret;
struct pch_phub_reg *chip;
chip = kzalloc(sizeof(struct pch_phub_reg), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev,
"%s : pci_enable_device FAILED(ret=%d)", __func__, ret);
goto err_pci_enable_dev;
}
dev_dbg(&pdev->dev, "%s : pci_enable_device returns %d\n", __func__,
ret);
ret = pci_request_regions(pdev, KBUILD_MODNAME);
if (ret) {
dev_err(&pdev->dev,
"%s : pci_request_regions FAILED(ret=%d)", __func__, ret);
goto err_req_regions;
}
dev_dbg(&pdev->dev, "%s : "
"pci_request_regions returns %d\n", __func__, ret);
chip->pch_phub_base_address = pci_iomap(pdev, 1, 0);
if (chip->pch_phub_base_address == NULL) {
dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
ret = -ENOMEM;
goto err_pci_iomap;
}
dev_dbg(&pdev->dev, "%s : pci_iomap SUCCESS and value "
"in pch_phub_base_address variable is %p\n", __func__,
chip->pch_phub_base_address);
chip->pdev = pdev; /* Save pci device struct */
if (id->driver_data == 1) { /* EG20T PCH */
const char *board_name;
unsigned int prefetch = 0x000affaa;
if (pdev->dev.of_node)
of_property_read_u32(pdev->dev.of_node,
"intel,eg20t-prefetch",
&prefetch);
ret = sysfs_create_file(&pdev->dev.kobj,
&dev_attr_pch_mac.attr);
if (ret)
goto err_sysfs_create;
ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
if (ret)
goto exit_bin_attr;
pch_phub_read_modify_write_reg(chip,
(unsigned int)CLKCFG_REG_OFFSET,
CLKCFG_CAN_50MHZ,
CLKCFG_CANCLK_MASK);
/* quirk for CM-iTC board */
board_name = dmi_get_system_info(DMI_BOARD_NAME);
if (board_name && strstr(board_name, "CM-iTC"))
pch_phub_read_modify_write_reg(chip,
(unsigned int)CLKCFG_REG_OFFSET,
CLKCFG_UART_48MHZ | CLKCFG_BAUDDIV |
CLKCFG_PLL2VCO | CLKCFG_UARTCLKSEL,
CLKCFG_UART_MASK);
/* set the prefech value */
iowrite32(prefetch, chip->pch_phub_base_address + 0x14);
/* set the interrupt delay value */
iowrite32(0x25, chip->pch_phub_base_address + 0x44);
chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T;
chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T;
/* quirk for MIPS Boston platform */
if (pdev->dev.of_node) {
if (of_machine_is_compatible("img,boston")) {
pch_phub_read_modify_write_reg(chip,
(unsigned int)CLKCFG_REG_OFFSET,
CLKCFG_UART_25MHZ,
CLKCFG_UART_MASK);
}
}
} else if (id->driver_data == 2) { /* ML7213 IOH */
ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
if (ret)
goto err_sysfs_create;
/* set the prefech value
* Device2(USB OHCI #1/ USB EHCI #1/ USB Device):a
* Device4(SDIO #0,1,2):f
* Device6(SATA 2):f
* Device8(USB OHCI #0/ USB EHCI #0):a
*/
iowrite32(0x000affa0, chip->pch_phub_base_address + 0x14);
chip->pch_opt_rom_start_address =\
PCH_PHUB_ROM_START_ADDR_ML7213;
} else if (id->driver_data == 3) { /* ML7223 IOH Bus-m*/
/* set the prefech value
* Device8(GbE)
*/
iowrite32(0x000a0000, chip->pch_phub_base_address + 0x14);
/* set the interrupt delay value */
iowrite32(0x25, chip->pch_phub_base_address + 0x140);
chip->pch_opt_rom_start_address =\
PCH_PHUB_ROM_START_ADDR_ML7223;
chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
} else if (id->driver_data == 4) { /* ML7223 IOH Bus-n*/
ret = sysfs_create_file(&pdev->dev.kobj,
&dev_attr_pch_mac.attr);
if (ret)
goto err_sysfs_create;
ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
if (ret)
goto exit_bin_attr;
/* set the prefech value
* Device2(USB OHCI #0,1,2,3/ USB EHCI #0):a
* Device4(SDIO #0,1):f
* Device6(SATA 2):f
*/
iowrite32(0x0000ffa0, chip->pch_phub_base_address + 0x14);
chip->pch_opt_rom_start_address =\
PCH_PHUB_ROM_START_ADDR_ML7223;
chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_ML7223;
} else if (id->driver_data == 5) { /* ML7831 */
ret = sysfs_create_file(&pdev->dev.kobj,
&dev_attr_pch_mac.attr);
if (ret)
goto err_sysfs_create;
ret = sysfs_create_bin_file(&pdev->dev.kobj, &pch_bin_attr);
if (ret)
goto exit_bin_attr;
/* set the prefech value */
iowrite32(0x000affaa, chip->pch_phub_base_address + 0x14);
/* set the interrupt delay value */
iowrite32(0x25, chip->pch_phub_base_address + 0x44);
chip->pch_opt_rom_start_address = PCH_PHUB_ROM_START_ADDR_EG20T;
chip->pch_mac_start_address = PCH_PHUB_MAC_START_ADDR_EG20T;
}
chip->ioh_type = id->driver_data;
pci_set_drvdata(pdev, chip);
return 0;
exit_bin_attr:
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
err_sysfs_create:
pci_iounmap(pdev, chip->pch_phub_base_address);
err_pci_iomap:
pci_release_regions(pdev);
err_req_regions:
pci_disable_device(pdev);
err_pci_enable_dev:
kfree(chip);
dev_err(&pdev->dev, "%s returns %d\n", __func__, ret);
return ret;
}
static void pch_phub_remove(struct pci_dev *pdev)
{
struct pch_phub_reg *chip = pci_get_drvdata(pdev);
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_pch_mac.attr);
sysfs_remove_bin_file(&pdev->dev.kobj, &pch_bin_attr);
pci_iounmap(pdev, chip->pch_phub_base_address);
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(chip);
}
static int __maybe_unused pch_phub_suspend(struct device *dev_d)
{
device_wakeup_disable(dev_d);
return 0;
}
static int __maybe_unused pch_phub_resume(struct device *dev_d)
{
device_wakeup_disable(dev_d);
return 0;
}
static const struct pci_device_id pch_phub_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH1_PHUB), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7213_PHUB), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_mPHUB), 3, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7223_nPHUB), 4, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ROHM_ML7831_PHUB), 5, },
{ }
};
MODULE_DEVICE_TABLE(pci, pch_phub_pcidev_id);
static SIMPLE_DEV_PM_OPS(pch_phub_pm_ops, pch_phub_suspend, pch_phub_resume);
static struct pci_driver pch_phub_driver = {
.name = "pch_phub",
.id_table = pch_phub_pcidev_id,
.probe = pch_phub_probe,
.remove = pch_phub_remove,
.driver.pm = &pch_phub_pm_ops,
};
module_pci_driver(pch_phub_driver);
MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor IOH(ML7213/ML7223) PHUB");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/pch_phub.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
* Synopsys DesignWare xData driver
*
* Author: Gustavo Pimentel <[email protected]>
*/
#include <linux/miscdevice.h>
#include <linux/bitfield.h>
#include <linux/pci-epf.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/pci.h>
#define DW_XDATA_DRIVER_NAME "dw-xdata-pcie"
#define DW_XDATA_EP_MEM_OFFSET 0x8000000
static DEFINE_IDA(xdata_ida);
#define STATUS_DONE BIT(0)
#define CONTROL_DOORBELL BIT(0)
#define CONTROL_IS_WRITE BIT(1)
#define CONTROL_LENGTH(a) FIELD_PREP(GENMASK(13, 2), a)
#define CONTROL_PATTERN_INC BIT(16)
#define CONTROL_NO_ADDR_INC BIT(18)
#define XPERF_CONTROL_ENABLE BIT(5)
#define BURST_REPEAT BIT(31)
#define BURST_VALUE 0x1001
#define PATTERN_VALUE 0x0
struct dw_xdata_regs {
u32 addr_lsb; /* 0x000 */
u32 addr_msb; /* 0x004 */
u32 burst_cnt; /* 0x008 */
u32 control; /* 0x00c */
u32 pattern; /* 0x010 */
u32 status; /* 0x014 */
u32 RAM_addr; /* 0x018 */
u32 RAM_port; /* 0x01c */
u32 _reserved0[14]; /* 0x020..0x054 */
u32 perf_control; /* 0x058 */
u32 _reserved1[41]; /* 0x05c..0x0fc */
u32 wr_cnt_lsb; /* 0x100 */
u32 wr_cnt_msb; /* 0x104 */
u32 rd_cnt_lsb; /* 0x108 */
u32 rd_cnt_msb; /* 0x10c */
} __packed;
struct dw_xdata_region {
phys_addr_t paddr; /* physical address */
void __iomem *vaddr; /* virtual address */
};
struct dw_xdata {
struct dw_xdata_region rg_region; /* registers */
size_t max_wr_len; /* max wr xfer len */
size_t max_rd_len; /* max rd xfer len */
struct mutex mutex;
struct pci_dev *pdev;
struct miscdevice misc_dev;
};
static inline struct dw_xdata_regs __iomem *__dw_regs(struct dw_xdata *dw)
{
return dw->rg_region.vaddr;
}
static void dw_xdata_stop(struct dw_xdata *dw)
{
u32 burst;
mutex_lock(&dw->mutex);
burst = readl(&(__dw_regs(dw)->burst_cnt));
if (burst & BURST_REPEAT) {
burst &= ~(u32)BURST_REPEAT;
writel(burst, &(__dw_regs(dw)->burst_cnt));
}
mutex_unlock(&dw->mutex);
}
static void dw_xdata_start(struct dw_xdata *dw, bool write)
{
struct device *dev = &dw->pdev->dev;
u32 control, status;
/* Stop first if xfer in progress */
dw_xdata_stop(dw);
mutex_lock(&dw->mutex);
/* Clear status register */
writel(0x0, &(__dw_regs(dw)->status));
/* Burst count register set for continuous until stopped */
writel(BURST_REPEAT | BURST_VALUE, &(__dw_regs(dw)->burst_cnt));
/* Pattern register */
writel(PATTERN_VALUE, &(__dw_regs(dw)->pattern));
/* Control register */
control = CONTROL_DOORBELL | CONTROL_PATTERN_INC | CONTROL_NO_ADDR_INC;
if (write) {
control |= CONTROL_IS_WRITE;
control |= CONTROL_LENGTH(dw->max_wr_len);
} else {
control |= CONTROL_LENGTH(dw->max_rd_len);
}
writel(control, &(__dw_regs(dw)->control));
/*
* The xData HW block needs about 100 ms to initiate the traffic
* generation according this HW block datasheet.
*/
usleep_range(100, 150);
status = readl(&(__dw_regs(dw)->status));
mutex_unlock(&dw->mutex);
if (!(status & STATUS_DONE))
dev_dbg(dev, "xData: started %s direction\n",
write ? "write" : "read");
}
static void dw_xdata_perf_meas(struct dw_xdata *dw, u64 *data, bool write)
{
if (write) {
*data = readl(&(__dw_regs(dw)->wr_cnt_msb));
*data <<= 32;
*data |= readl(&(__dw_regs(dw)->wr_cnt_lsb));
} else {
*data = readl(&(__dw_regs(dw)->rd_cnt_msb));
*data <<= 32;
*data |= readl(&(__dw_regs(dw)->rd_cnt_lsb));
}
}
static u64 dw_xdata_perf_diff(u64 *m1, u64 *m2, u64 time)
{
u64 rate = (*m1 - *m2);
rate *= (1000 * 1000 * 1000);
rate >>= 20;
rate = DIV_ROUND_CLOSEST_ULL(rate, time);
return rate;
}
static void dw_xdata_perf(struct dw_xdata *dw, u64 *rate, bool write)
{
struct device *dev = &dw->pdev->dev;
u64 data[2], time[2], diff;
mutex_lock(&dw->mutex);
/* First acquisition of current count frames */
writel(0x0, &(__dw_regs(dw)->perf_control));
dw_xdata_perf_meas(dw, &data[0], write);
time[0] = jiffies;
writel((u32)XPERF_CONTROL_ENABLE, &(__dw_regs(dw)->perf_control));
/*
* Wait 100ms between the 1st count frame acquisition and the 2nd
* count frame acquisition, in order to calculate the speed later
*/
mdelay(100);
/* Second acquisition of current count frames */
writel(0x0, &(__dw_regs(dw)->perf_control));
dw_xdata_perf_meas(dw, &data[1], write);
time[1] = jiffies;
writel((u32)XPERF_CONTROL_ENABLE, &(__dw_regs(dw)->perf_control));
/*
* Speed calculation
*
* rate = (2nd count frames - 1st count frames) / (time elapsed)
*/
diff = jiffies_to_nsecs(time[1] - time[0]);
*rate = dw_xdata_perf_diff(&data[1], &data[0], diff);
mutex_unlock(&dw->mutex);
dev_dbg(dev, "xData: time=%llu us, %s=%llu MB/s\n",
diff, write ? "write" : "read", *rate);
}
static struct dw_xdata *misc_dev_to_dw(struct miscdevice *misc_dev)
{
return container_of(misc_dev, struct dw_xdata, misc_dev);
}
static ssize_t write_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct miscdevice *misc_dev = dev_get_drvdata(dev);
struct dw_xdata *dw = misc_dev_to_dw(misc_dev);
u64 rate;
dw_xdata_perf(dw, &rate, true);
return sysfs_emit(buf, "%llu\n", rate);
}
static ssize_t write_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct miscdevice *misc_dev = dev_get_drvdata(dev);
struct dw_xdata *dw = misc_dev_to_dw(misc_dev);
bool enabled;
int ret;
ret = kstrtobool(buf, &enabled);
if (ret < 0)
return ret;
if (enabled) {
dev_dbg(dev, "xData: requested write transfer\n");
dw_xdata_start(dw, true);
} else {
dev_dbg(dev, "xData: requested stop transfer\n");
dw_xdata_stop(dw);
}
return size;
}
static DEVICE_ATTR_RW(write);
static ssize_t read_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct miscdevice *misc_dev = dev_get_drvdata(dev);
struct dw_xdata *dw = misc_dev_to_dw(misc_dev);
u64 rate;
dw_xdata_perf(dw, &rate, false);
return sysfs_emit(buf, "%llu\n", rate);
}
static ssize_t read_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t size)
{
struct miscdevice *misc_dev = dev_get_drvdata(dev);
struct dw_xdata *dw = misc_dev_to_dw(misc_dev);
bool enabled;
int ret;
ret = kstrtobool(buf, &enabled);
if (ret < 0)
return ret;
if (enabled) {
dev_dbg(dev, "xData: requested read transfer\n");
dw_xdata_start(dw, false);
} else {
dev_dbg(dev, "xData: requested stop transfer\n");
dw_xdata_stop(dw);
}
return size;
}
static DEVICE_ATTR_RW(read);
static struct attribute *xdata_attrs[] = {
&dev_attr_write.attr,
&dev_attr_read.attr,
NULL,
};
ATTRIBUTE_GROUPS(xdata);
static int dw_xdata_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
struct device *dev = &pdev->dev;
struct dw_xdata *dw;
char name[24];
u64 addr;
int err;
int id;
/* Enable PCI device */
err = pcim_enable_device(pdev);
if (err) {
dev_err(dev, "enabling device failed\n");
return err;
}
/* Mapping PCI BAR regions */
err = pcim_iomap_regions(pdev, BIT(BAR_0), pci_name(pdev));
if (err) {
dev_err(dev, "xData BAR I/O remapping failed\n");
return err;
}
pci_set_master(pdev);
/* Allocate memory */
dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
if (!dw)
return -ENOMEM;
/* Data structure initialization */
mutex_init(&dw->mutex);
dw->rg_region.vaddr = pcim_iomap_table(pdev)[BAR_0];
if (!dw->rg_region.vaddr)
return -ENOMEM;
dw->rg_region.paddr = pdev->resource[BAR_0].start;
dw->max_wr_len = pcie_get_mps(pdev);
dw->max_wr_len >>= 2;
dw->max_rd_len = pcie_get_readrq(pdev);
dw->max_rd_len >>= 2;
dw->pdev = pdev;
id = ida_simple_get(&xdata_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
dev_err(dev, "xData: unable to get id\n");
return id;
}
snprintf(name, sizeof(name), DW_XDATA_DRIVER_NAME ".%d", id);
dw->misc_dev.name = kstrdup(name, GFP_KERNEL);
if (!dw->misc_dev.name) {
err = -ENOMEM;
goto err_ida_remove;
}
dw->misc_dev.minor = MISC_DYNAMIC_MINOR;
dw->misc_dev.parent = dev;
dw->misc_dev.groups = xdata_groups;
writel(0x0, &(__dw_regs(dw)->RAM_addr));
writel(0x0, &(__dw_regs(dw)->RAM_port));
addr = dw->rg_region.paddr + DW_XDATA_EP_MEM_OFFSET;
writel(lower_32_bits(addr), &(__dw_regs(dw)->addr_lsb));
writel(upper_32_bits(addr), &(__dw_regs(dw)->addr_msb));
dev_dbg(dev, "xData: target address = 0x%.16llx\n", addr);
dev_dbg(dev, "xData: wr_len = %zu, rd_len = %zu\n",
dw->max_wr_len * 4, dw->max_rd_len * 4);
/* Saving data structure reference */
pci_set_drvdata(pdev, dw);
/* Register misc device */
err = misc_register(&dw->misc_dev);
if (err) {
dev_err(dev, "xData: failed to register device\n");
goto err_kfree_name;
}
return 0;
err_kfree_name:
kfree(dw->misc_dev.name);
err_ida_remove:
ida_simple_remove(&xdata_ida, id);
return err;
}
static void dw_xdata_pcie_remove(struct pci_dev *pdev)
{
struct dw_xdata *dw = pci_get_drvdata(pdev);
int id;
if (sscanf(dw->misc_dev.name, DW_XDATA_DRIVER_NAME ".%d", &id) != 1)
return;
if (id < 0)
return;
dw_xdata_stop(dw);
misc_deregister(&dw->misc_dev);
kfree(dw->misc_dev.name);
ida_simple_remove(&xdata_ida, id);
}
static const struct pci_device_id dw_xdata_pcie_id_table[] = {
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
{ }
};
MODULE_DEVICE_TABLE(pci, dw_xdata_pcie_id_table);
static struct pci_driver dw_xdata_pcie_driver = {
.name = DW_XDATA_DRIVER_NAME,
.id_table = dw_xdata_pcie_id_table,
.probe = dw_xdata_pcie_probe,
.remove = dw_xdata_pcie_remove,
};
module_pci_driver(dw_xdata_pcie_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare xData PCIe driver");
MODULE_AUTHOR("Gustavo Pimentel <[email protected]>");
| linux-master | drivers/misc/dw-xdata-pcie.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2005-2007 Jiri Slaby <[email protected]>
*
* You need a userspace library to cooperate with this driver. It (and other
* info) may be obtained here:
* http://www.fi.muni.cz/~xslaby/phantom.html
* or alternatively, you might use OpenHaptics provided by Sensable.
*/
#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/interrupt.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/phantom.h>
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/atomic.h>
#include <asm/io.h>
#define PHANTOM_VERSION "n0.9.8"
#define PHANTOM_MAX_MINORS 8
#define PHN_IRQCTL 0x4c /* irq control in caddr space */
#define PHB_RUNNING 1
#define PHB_NOT_OH 2
static DEFINE_MUTEX(phantom_mutex);
static struct class *phantom_class;
static int phantom_major;
struct phantom_device {
unsigned int opened;
void __iomem *caddr;
u32 __iomem *iaddr;
u32 __iomem *oaddr;
unsigned long status;
atomic_t counter;
wait_queue_head_t wait;
struct cdev cdev;
struct mutex open_lock;
spinlock_t regs_lock;
/* used in NOT_OH mode */
struct phm_regs oregs;
u32 ctl_reg;
};
static unsigned char phantom_devices[PHANTOM_MAX_MINORS];
static int phantom_status(struct phantom_device *dev, unsigned long newstat)
{
pr_debug("phantom_status %lx %lx\n", dev->status, newstat);
if (!(dev->status & PHB_RUNNING) && (newstat & PHB_RUNNING)) {
atomic_set(&dev->counter, 0);
iowrite32(PHN_CTL_IRQ, dev->iaddr + PHN_CONTROL);
iowrite32(0x43, dev->caddr + PHN_IRQCTL);
ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */
} else if ((dev->status & PHB_RUNNING) && !(newstat & PHB_RUNNING)) {
iowrite32(0, dev->caddr + PHN_IRQCTL);
ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */
}
dev->status = newstat;
return 0;
}
/*
* File ops
*/
static long phantom_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct phantom_device *dev = file->private_data;
struct phm_regs rs;
struct phm_reg r;
void __user *argp = (void __user *)arg;
unsigned long flags;
unsigned int i;
switch (cmd) {
case PHN_SETREG:
case PHN_SET_REG:
if (copy_from_user(&r, argp, sizeof(r)))
return -EFAULT;
if (r.reg > 7)
return -EINVAL;
spin_lock_irqsave(&dev->regs_lock, flags);
if (r.reg == PHN_CONTROL && (r.value & PHN_CTL_IRQ) &&
phantom_status(dev, dev->status | PHB_RUNNING)){
spin_unlock_irqrestore(&dev->regs_lock, flags);
return -ENODEV;
}
pr_debug("phantom: writing %x to %u\n", r.value, r.reg);
/* preserve amp bit (don't allow to change it when in NOT_OH) */
if (r.reg == PHN_CONTROL && (dev->status & PHB_NOT_OH)) {
r.value &= ~PHN_CTL_AMP;
r.value |= dev->ctl_reg & PHN_CTL_AMP;
dev->ctl_reg = r.value;
}
iowrite32(r.value, dev->iaddr + r.reg);
ioread32(dev->iaddr); /* PCI posting */
if (r.reg == PHN_CONTROL && !(r.value & PHN_CTL_IRQ))
phantom_status(dev, dev->status & ~PHB_RUNNING);
spin_unlock_irqrestore(&dev->regs_lock, flags);
break;
case PHN_SETREGS:
case PHN_SET_REGS:
if (copy_from_user(&rs, argp, sizeof(rs)))
return -EFAULT;
pr_debug("phantom: SRS %u regs %x\n", rs.count, rs.mask);
spin_lock_irqsave(&dev->regs_lock, flags);
if (dev->status & PHB_NOT_OH)
memcpy(&dev->oregs, &rs, sizeof(rs));
else {
u32 m = min(rs.count, 8U);
for (i = 0; i < m; i++)
if (rs.mask & BIT(i))
iowrite32(rs.values[i], dev->oaddr + i);
ioread32(dev->iaddr); /* PCI posting */
}
spin_unlock_irqrestore(&dev->regs_lock, flags);
break;
case PHN_GETREG:
case PHN_GET_REG:
if (copy_from_user(&r, argp, sizeof(r)))
return -EFAULT;
if (r.reg > 7)
return -EINVAL;
r.value = ioread32(dev->iaddr + r.reg);
if (copy_to_user(argp, &r, sizeof(r)))
return -EFAULT;
break;
case PHN_GETREGS:
case PHN_GET_REGS: {
u32 m;
if (copy_from_user(&rs, argp, sizeof(rs)))
return -EFAULT;
m = min(rs.count, 8U);
pr_debug("phantom: GRS %u regs %x\n", rs.count, rs.mask);
spin_lock_irqsave(&dev->regs_lock, flags);
for (i = 0; i < m; i++)
if (rs.mask & BIT(i))
rs.values[i] = ioread32(dev->iaddr + i);
atomic_set(&dev->counter, 0);
spin_unlock_irqrestore(&dev->regs_lock, flags);
if (copy_to_user(argp, &rs, sizeof(rs)))
return -EFAULT;
break;
} case PHN_NOT_OH:
spin_lock_irqsave(&dev->regs_lock, flags);
if (dev->status & PHB_RUNNING) {
printk(KERN_ERR "phantom: you need to set NOT_OH "
"before you start the device!\n");
spin_unlock_irqrestore(&dev->regs_lock, flags);
return -EINVAL;
}
dev->status |= PHB_NOT_OH;
spin_unlock_irqrestore(&dev->regs_lock, flags);
break;
default:
return -ENOTTY;
}
return 0;
}
#ifdef CONFIG_COMPAT
static long phantom_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
if (_IOC_NR(cmd) <= 3 && _IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT);
cmd |= sizeof(void *) << _IOC_SIZESHIFT;
}
return phantom_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
}
#else
#define phantom_compat_ioctl NULL
#endif
static int phantom_open(struct inode *inode, struct file *file)
{
struct phantom_device *dev = container_of(inode->i_cdev,
struct phantom_device, cdev);
mutex_lock(&phantom_mutex);
nonseekable_open(inode, file);
if (mutex_lock_interruptible(&dev->open_lock)) {
mutex_unlock(&phantom_mutex);
return -ERESTARTSYS;
}
if (dev->opened) {
mutex_unlock(&dev->open_lock);
mutex_unlock(&phantom_mutex);
return -EINVAL;
}
WARN_ON(dev->status & PHB_NOT_OH);
file->private_data = dev;
atomic_set(&dev->counter, 0);
dev->opened++;
mutex_unlock(&dev->open_lock);
mutex_unlock(&phantom_mutex);
return 0;
}
static int phantom_release(struct inode *inode, struct file *file)
{
struct phantom_device *dev = file->private_data;
mutex_lock(&dev->open_lock);
dev->opened = 0;
phantom_status(dev, dev->status & ~PHB_RUNNING);
dev->status &= ~PHB_NOT_OH;
mutex_unlock(&dev->open_lock);
return 0;
}
static __poll_t phantom_poll(struct file *file, poll_table *wait)
{
struct phantom_device *dev = file->private_data;
__poll_t mask = 0;
pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter));
poll_wait(file, &dev->wait, wait);
if (!(dev->status & PHB_RUNNING))
mask = EPOLLERR;
else if (atomic_read(&dev->counter))
mask = EPOLLIN | EPOLLRDNORM;
pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter));
return mask;
}
static const struct file_operations phantom_file_ops = {
.open = phantom_open,
.release = phantom_release,
.unlocked_ioctl = phantom_ioctl,
.compat_ioctl = phantom_compat_ioctl,
.poll = phantom_poll,
.llseek = no_llseek,
};
static irqreturn_t phantom_isr(int irq, void *data)
{
struct phantom_device *dev = data;
unsigned int i;
u32 ctl;
spin_lock(&dev->regs_lock);
ctl = ioread32(dev->iaddr + PHN_CONTROL);
if (!(ctl & PHN_CTL_IRQ)) {
spin_unlock(&dev->regs_lock);
return IRQ_NONE;
}
iowrite32(0, dev->iaddr);
iowrite32(0xc0, dev->iaddr);
if (dev->status & PHB_NOT_OH) {
struct phm_regs *r = &dev->oregs;
u32 m = min(r->count, 8U);
for (i = 0; i < m; i++)
if (r->mask & BIT(i))
iowrite32(r->values[i], dev->oaddr + i);
dev->ctl_reg ^= PHN_CTL_AMP;
iowrite32(dev->ctl_reg, dev->iaddr + PHN_CONTROL);
}
spin_unlock(&dev->regs_lock);
ioread32(dev->iaddr); /* PCI posting */
atomic_inc(&dev->counter);
wake_up_interruptible(&dev->wait);
return IRQ_HANDLED;
}
/*
* Init and deinit driver
*/
static unsigned int phantom_get_free(void)
{
unsigned int i;
for (i = 0; i < PHANTOM_MAX_MINORS; i++)
if (phantom_devices[i] == 0)
break;
return i;
}
static int phantom_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_id)
{
struct phantom_device *pht;
unsigned int minor;
int retval;
retval = pci_enable_device(pdev);
if (retval) {
dev_err(&pdev->dev, "pci_enable_device failed!\n");
goto err;
}
minor = phantom_get_free();
if (minor == PHANTOM_MAX_MINORS) {
dev_err(&pdev->dev, "too many devices found!\n");
retval = -EIO;
goto err_dis;
}
phantom_devices[minor] = 1;
retval = pci_request_regions(pdev, "phantom");
if (retval) {
dev_err(&pdev->dev, "pci_request_regions failed!\n");
goto err_null;
}
retval = -ENOMEM;
pht = kzalloc(sizeof(*pht), GFP_KERNEL);
if (pht == NULL) {
dev_err(&pdev->dev, "unable to allocate device\n");
goto err_reg;
}
pht->caddr = pci_iomap(pdev, 0, 0);
if (pht->caddr == NULL) {
dev_err(&pdev->dev, "can't remap conf space\n");
goto err_fr;
}
pht->iaddr = pci_iomap(pdev, 2, 0);
if (pht->iaddr == NULL) {
dev_err(&pdev->dev, "can't remap input space\n");
goto err_unmc;
}
pht->oaddr = pci_iomap(pdev, 3, 0);
if (pht->oaddr == NULL) {
dev_err(&pdev->dev, "can't remap output space\n");
goto err_unmi;
}
mutex_init(&pht->open_lock);
spin_lock_init(&pht->regs_lock);
init_waitqueue_head(&pht->wait);
cdev_init(&pht->cdev, &phantom_file_ops);
pht->cdev.owner = THIS_MODULE;
iowrite32(0, pht->caddr + PHN_IRQCTL);
ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */
retval = request_irq(pdev->irq, phantom_isr,
IRQF_SHARED, "phantom", pht);
if (retval) {
dev_err(&pdev->dev, "can't establish ISR\n");
goto err_unmo;
}
retval = cdev_add(&pht->cdev, MKDEV(phantom_major, minor), 1);
if (retval) {
dev_err(&pdev->dev, "chardev registration failed\n");
goto err_irq;
}
if (IS_ERR(device_create(phantom_class, &pdev->dev,
MKDEV(phantom_major, minor), NULL,
"phantom%u", minor)))
dev_err(&pdev->dev, "can't create device\n");
pci_set_drvdata(pdev, pht);
return 0;
err_irq:
free_irq(pdev->irq, pht);
err_unmo:
pci_iounmap(pdev, pht->oaddr);
err_unmi:
pci_iounmap(pdev, pht->iaddr);
err_unmc:
pci_iounmap(pdev, pht->caddr);
err_fr:
kfree(pht);
err_reg:
pci_release_regions(pdev);
err_null:
phantom_devices[minor] = 0;
err_dis:
pci_disable_device(pdev);
err:
return retval;
}
static void phantom_remove(struct pci_dev *pdev)
{
struct phantom_device *pht = pci_get_drvdata(pdev);
unsigned int minor = MINOR(pht->cdev.dev);
device_destroy(phantom_class, MKDEV(phantom_major, minor));
cdev_del(&pht->cdev);
iowrite32(0, pht->caddr + PHN_IRQCTL);
ioread32(pht->caddr + PHN_IRQCTL); /* PCI posting */
free_irq(pdev->irq, pht);
pci_iounmap(pdev, pht->oaddr);
pci_iounmap(pdev, pht->iaddr);
pci_iounmap(pdev, pht->caddr);
kfree(pht);
pci_release_regions(pdev);
phantom_devices[minor] = 0;
pci_disable_device(pdev);
}
static int __maybe_unused phantom_suspend(struct device *dev_d)
{
struct phantom_device *dev = dev_get_drvdata(dev_d);
iowrite32(0, dev->caddr + PHN_IRQCTL);
ioread32(dev->caddr + PHN_IRQCTL); /* PCI posting */
synchronize_irq(to_pci_dev(dev_d)->irq);
return 0;
}
static int __maybe_unused phantom_resume(struct device *dev_d)
{
struct phantom_device *dev = dev_get_drvdata(dev_d);
iowrite32(0, dev->caddr + PHN_IRQCTL);
return 0;
}
static struct pci_device_id phantom_pci_tbl[] = {
{ .vendor = PCI_VENDOR_ID_PLX, .device = PCI_DEVICE_ID_PLX_9050,
.subvendor = PCI_VENDOR_ID_PLX, .subdevice = PCI_DEVICE_ID_PLX_9050,
.class = PCI_CLASS_BRIDGE_OTHER << 8, .class_mask = 0xffff00 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, phantom_pci_tbl);
static SIMPLE_DEV_PM_OPS(phantom_pm_ops, phantom_suspend, phantom_resume);
static struct pci_driver phantom_pci_driver = {
.name = "phantom",
.id_table = phantom_pci_tbl,
.probe = phantom_probe,
.remove = phantom_remove,
.driver.pm = &phantom_pm_ops,
};
static CLASS_ATTR_STRING(version, 0444, PHANTOM_VERSION);
static int __init phantom_init(void)
{
int retval;
dev_t dev;
phantom_class = class_create("phantom");
if (IS_ERR(phantom_class)) {
retval = PTR_ERR(phantom_class);
printk(KERN_ERR "phantom: can't register phantom class\n");
goto err;
}
retval = class_create_file(phantom_class, &class_attr_version.attr);
if (retval) {
printk(KERN_ERR "phantom: can't create sysfs version file\n");
goto err_class;
}
retval = alloc_chrdev_region(&dev, 0, PHANTOM_MAX_MINORS, "phantom");
if (retval) {
printk(KERN_ERR "phantom: can't register character device\n");
goto err_attr;
}
phantom_major = MAJOR(dev);
retval = pci_register_driver(&phantom_pci_driver);
if (retval) {
printk(KERN_ERR "phantom: can't register pci driver\n");
goto err_unchr;
}
printk(KERN_INFO "Phantom Linux Driver, version " PHANTOM_VERSION ", "
"init OK\n");
return 0;
err_unchr:
unregister_chrdev_region(dev, PHANTOM_MAX_MINORS);
err_attr:
class_remove_file(phantom_class, &class_attr_version.attr);
err_class:
class_destroy(phantom_class);
err:
return retval;
}
static void __exit phantom_exit(void)
{
pci_unregister_driver(&phantom_pci_driver);
unregister_chrdev_region(MKDEV(phantom_major, 0), PHANTOM_MAX_MINORS);
class_remove_file(phantom_class, &class_attr_version.attr);
class_destroy(phantom_class);
pr_debug("phantom: module successfully removed\n");
}
module_init(phantom_init);
module_exit(phantom_exit);
MODULE_AUTHOR("Jiri Slaby <[email protected]>");
MODULE_DESCRIPTION("Sensable Phantom driver (PCI devices)");
MODULE_LICENSE("GPL");
MODULE_VERSION(PHANTOM_VERSION);
| linux-master | drivers/misc/phantom.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Driver for the Analog Devices digital potentiometers (I2C bus)
*
* Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
*/
#include <linux/i2c.h>
#include <linux/module.h>
#include "ad525x_dpot.h"
/* I2C bus functions */
static int write_d8(void *client, u8 val)
{
return i2c_smbus_write_byte(client, val);
}
static int write_r8d8(void *client, u8 reg, u8 val)
{
return i2c_smbus_write_byte_data(client, reg, val);
}
static int write_r8d16(void *client, u8 reg, u16 val)
{
return i2c_smbus_write_word_data(client, reg, val);
}
static int read_d8(void *client)
{
return i2c_smbus_read_byte(client);
}
static int read_r8d8(void *client, u8 reg)
{
return i2c_smbus_read_byte_data(client, reg);
}
static int read_r8d16(void *client, u8 reg)
{
return i2c_smbus_read_word_data(client, reg);
}
static const struct ad_dpot_bus_ops bops = {
.read_d8 = read_d8,
.read_r8d8 = read_r8d8,
.read_r8d16 = read_r8d16,
.write_d8 = write_d8,
.write_r8d8 = write_r8d8,
.write_r8d16 = write_r8d16,
};
static int ad_dpot_i2c_probe(struct i2c_client *client)
{
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct ad_dpot_bus_data bdata = {
.client = client,
.bops = &bops,
};
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
dev_err(&client->dev, "SMBUS Word Data not Supported\n");
return -EIO;
}
return ad_dpot_probe(&client->dev, &bdata, id->driver_data, id->name);
}
static void ad_dpot_i2c_remove(struct i2c_client *client)
{
ad_dpot_remove(&client->dev);
}
static const struct i2c_device_id ad_dpot_id[] = {
{"ad5258", AD5258_ID},
{"ad5259", AD5259_ID},
{"ad5251", AD5251_ID},
{"ad5252", AD5252_ID},
{"ad5253", AD5253_ID},
{"ad5254", AD5254_ID},
{"ad5255", AD5255_ID},
{"ad5241", AD5241_ID},
{"ad5242", AD5242_ID},
{"ad5243", AD5243_ID},
{"ad5245", AD5245_ID},
{"ad5246", AD5246_ID},
{"ad5247", AD5247_ID},
{"ad5248", AD5248_ID},
{"ad5280", AD5280_ID},
{"ad5282", AD5282_ID},
{"adn2860", ADN2860_ID},
{"ad5273", AD5273_ID},
{"ad5161", AD5161_ID},
{"ad5171", AD5171_ID},
{"ad5170", AD5170_ID},
{"ad5172", AD5172_ID},
{"ad5173", AD5173_ID},
{"ad5272", AD5272_ID},
{"ad5274", AD5274_ID},
{}
};
MODULE_DEVICE_TABLE(i2c, ad_dpot_id);
static struct i2c_driver ad_dpot_i2c_driver = {
.driver = {
.name = "ad_dpot",
},
.probe = ad_dpot_i2c_probe,
.remove = ad_dpot_i2c_remove,
.id_table = ad_dpot_id,
};
module_i2c_driver(ad_dpot_i2c_driver);
MODULE_AUTHOR("Michael Hennerich <[email protected]>");
MODULE_DESCRIPTION("digital potentiometer I2C bus driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/ad525x_dpot-i2c.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Dallas Semiconductor DS1682 Elapsed Time Recorder device driver
*
* Written by: Grant Likely <[email protected]>
*
* Copyright (C) 2007 Secret Lab Technologies Ltd.
*/
/*
* The DS1682 elapsed timer recorder is a simple device that implements
* one elapsed time counter, one event counter, an alarm signal and 10
* bytes of general purpose EEPROM.
*
* This driver provides access to the DS1682 counters and user data via
* the sysfs. The following attributes are added to the device node:
* elapsed_time (u32): Total elapsed event time in ms resolution
* alarm_time (u32): When elapsed time exceeds the value in alarm_time,
* then the alarm pin is asserted.
* event_count (u16): number of times the event pin has gone low.
* eeprom (u8[10]): general purpose EEPROM
*
* Counter registers and user data are both read/write unless the device
* has been write protected. This driver does not support turning off write
* protection. Once write protection is turned on, it is impossible to
* turn it off again, so I have left the feature out of this driver to avoid
* accidental enabling, but it is trivial to add write protect support.
*
*/
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/string.h>
#include <linux/list.h>
#include <linux/sysfs.h>
#include <linux/ctype.h>
#include <linux/hwmon-sysfs.h>
/* Device registers */
#define DS1682_REG_CONFIG 0x00
#define DS1682_REG_ALARM 0x01
#define DS1682_REG_ELAPSED 0x05
#define DS1682_REG_EVT_CNTR 0x09
#define DS1682_REG_EEPROM 0x0b
#define DS1682_REG_RESET 0x1d
#define DS1682_REG_WRITE_DISABLE 0x1e
#define DS1682_REG_WRITE_MEM_DISABLE 0x1f
#define DS1682_EEPROM_SIZE 10
/*
* Generic counter attributes
*/
static ssize_t ds1682_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
unsigned long long val, check;
__le32 val_le = 0;
int rc;
dev_dbg(dev, "ds1682_show() called on %s\n", attr->attr.name);
/* Read the register */
rc = i2c_smbus_read_i2c_block_data(client, sattr->index, sattr->nr,
(u8 *)&val_le);
if (rc < 0)
return -EIO;
val = le32_to_cpu(val_le);
if (sattr->index == DS1682_REG_ELAPSED) {
int retries = 5;
/* Detect and retry when a tick occurs mid-read */
do {
rc = i2c_smbus_read_i2c_block_data(client, sattr->index,
sattr->nr,
(u8 *)&val_le);
if (rc < 0 || retries <= 0)
return -EIO;
check = val;
val = le32_to_cpu(val_le);
retries--;
} while (val != check && val != (check + 1));
}
/* Format the output string and return # of bytes
* Special case: the 32 bit regs are time values with 1/4s
* resolution, scale them up to milliseconds
*/
return sprintf(buf, "%llu\n", (sattr->nr == 4) ? (val * 250) : val);
}
static ssize_t ds1682_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
struct i2c_client *client = to_i2c_client(dev);
u64 val;
__le32 val_le;
int rc;
dev_dbg(dev, "ds1682_store() called on %s\n", attr->attr.name);
/* Decode input */
rc = kstrtoull(buf, 0, &val);
if (rc < 0) {
dev_dbg(dev, "input string not a number\n");
return -EINVAL;
}
/* Special case: the 32 bit regs are time values with 1/4s
* resolution, scale input down to quarter-seconds */
if (sattr->nr == 4)
do_div(val, 250);
/* write out the value */
val_le = cpu_to_le32(val);
rc = i2c_smbus_write_i2c_block_data(client, sattr->index, sattr->nr,
(u8 *) & val_le);
if (rc < 0) {
dev_err(dev, "register write failed; reg=0x%x, size=%i\n",
sattr->index, sattr->nr);
return -EIO;
}
return count;
}
/*
* Simple register attributes
*/
static SENSOR_DEVICE_ATTR_2(elapsed_time, S_IRUGO | S_IWUSR, ds1682_show,
ds1682_store, 4, DS1682_REG_ELAPSED);
static SENSOR_DEVICE_ATTR_2(alarm_time, S_IRUGO | S_IWUSR, ds1682_show,
ds1682_store, 4, DS1682_REG_ALARM);
static SENSOR_DEVICE_ATTR_2(event_count, S_IRUGO | S_IWUSR, ds1682_show,
ds1682_store, 2, DS1682_REG_EVT_CNTR);
static const struct attribute_group ds1682_group = {
.attrs = (struct attribute *[]) {
&sensor_dev_attr_elapsed_time.dev_attr.attr,
&sensor_dev_attr_alarm_time.dev_attr.attr,
&sensor_dev_attr_event_count.dev_attr.attr,
NULL,
},
};
/*
* User data attribute
*/
static ssize_t ds1682_eeprom_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
int rc;
dev_dbg(&client->dev, "ds1682_eeprom_read(p=%p, off=%lli, c=%zi)\n",
buf, off, count);
rc = i2c_smbus_read_i2c_block_data(client, DS1682_REG_EEPROM + off,
count, buf);
if (rc < 0)
return -EIO;
return count;
}
static ssize_t ds1682_eeprom_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
struct i2c_client *client = kobj_to_i2c_client(kobj);
dev_dbg(&client->dev, "ds1682_eeprom_write(p=%p, off=%lli, c=%zi)\n",
buf, off, count);
/* Write out to the device */
if (i2c_smbus_write_i2c_block_data(client, DS1682_REG_EEPROM + off,
count, buf) < 0)
return -EIO;
return count;
}
static const struct bin_attribute ds1682_eeprom_attr = {
.attr = {
.name = "eeprom",
.mode = S_IRUGO | S_IWUSR,
},
.size = DS1682_EEPROM_SIZE,
.read = ds1682_eeprom_read,
.write = ds1682_eeprom_write,
};
/*
* Called when a ds1682 device is matched with this driver
*/
static int ds1682_probe(struct i2c_client *client)
{
int rc;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_I2C_BLOCK)) {
dev_err(&client->dev, "i2c bus does not support the ds1682\n");
rc = -ENODEV;
goto exit;
}
rc = sysfs_create_group(&client->dev.kobj, &ds1682_group);
if (rc)
goto exit;
rc = sysfs_create_bin_file(&client->dev.kobj, &ds1682_eeprom_attr);
if (rc)
goto exit_bin_attr;
return 0;
exit_bin_attr:
sysfs_remove_group(&client->dev.kobj, &ds1682_group);
exit:
return rc;
}
static void ds1682_remove(struct i2c_client *client)
{
sysfs_remove_bin_file(&client->dev.kobj, &ds1682_eeprom_attr);
sysfs_remove_group(&client->dev.kobj, &ds1682_group);
}
static const struct i2c_device_id ds1682_id[] = {
{ "ds1682", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ds1682_id);
static const struct of_device_id ds1682_of_match[] = {
{ .compatible = "dallas,ds1682", },
{}
};
MODULE_DEVICE_TABLE(of, ds1682_of_match);
static struct i2c_driver ds1682_driver = {
.driver = {
.name = "ds1682",
.of_match_table = ds1682_of_match,
},
.probe = ds1682_probe,
.remove = ds1682_remove,
.id_table = ds1682_id,
};
module_i2c_driver(ds1682_driver);
MODULE_AUTHOR("Grant Likely <[email protected]>");
MODULE_DESCRIPTION("DS1682 Elapsed Time Indicator driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/ds1682.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* IBM Power Systems Virtual Management Channel Support.
*
* Copyright (c) 2004, 2018 IBM Corp.
* Dave Engebretsen [email protected]
* Steven Royer [email protected]
* Adam Reznechek [email protected]
* Bryant G. Ly <[email protected]>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/fcntl.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/percpu.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/miscdevice.h>
#include <linux/sched/signal.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
#include <asm/vio.h>
#include "ibmvmc.h"
#define IBMVMC_DRIVER_VERSION "1.0"
/*
* Static global variables
*/
static DECLARE_WAIT_QUEUE_HEAD(ibmvmc_read_wait);
static const char ibmvmc_driver_name[] = "ibmvmc";
static struct ibmvmc_struct ibmvmc;
static struct ibmvmc_hmc hmcs[MAX_HMCS];
static struct crq_server_adapter ibmvmc_adapter;
static int ibmvmc_max_buf_pool_size = DEFAULT_BUF_POOL_SIZE;
static int ibmvmc_max_hmcs = DEFAULT_HMCS;
static int ibmvmc_max_mtu = DEFAULT_MTU;
static inline long h_copy_rdma(s64 length, u64 sliobn, u64 slioba,
u64 dliobn, u64 dlioba)
{
long rc = 0;
/* Ensure all writes to source memory are visible before hcall */
dma_wmb();
pr_debug("ibmvmc: h_copy_rdma(0x%llx, 0x%llx, 0x%llx, 0x%llx, 0x%llx\n",
length, sliobn, slioba, dliobn, dlioba);
rc = plpar_hcall_norets(H_COPY_RDMA, length, sliobn, slioba,
dliobn, dlioba);
pr_debug("ibmvmc: h_copy_rdma rc = 0x%lx\n", rc);
return rc;
}
static inline void h_free_crq(uint32_t unit_address)
{
long rc = 0;
do {
if (H_IS_LONG_BUSY(rc))
msleep(get_longbusy_msecs(rc));
rc = plpar_hcall_norets(H_FREE_CRQ, unit_address);
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
}
/**
* h_request_vmc: - request a hypervisor virtual management channel device
* @vmc_index: drc index of the vmc device created
*
* Requests the hypervisor create a new virtual management channel device,
* allowing this partition to send hypervisor virtualization control
* commands.
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static inline long h_request_vmc(u32 *vmc_index)
{
long rc = 0;
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
do {
if (H_IS_LONG_BUSY(rc))
msleep(get_longbusy_msecs(rc));
/* Call to request the VMC device from phyp */
rc = plpar_hcall(H_REQUEST_VMC, retbuf);
pr_debug("ibmvmc: %s rc = 0x%lx\n", __func__, rc);
*vmc_index = retbuf[0];
} while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
return rc;
}
/* routines for managing a command/response queue */
/**
* ibmvmc_handle_event: - Interrupt handler for crq events
* @irq: number of irq to handle, not used
* @dev_instance: crq_server_adapter that received interrupt
*
* Disables interrupts and schedules ibmvmc_task
*
* Always returns IRQ_HANDLED
*/
static irqreturn_t ibmvmc_handle_event(int irq, void *dev_instance)
{
struct crq_server_adapter *adapter =
(struct crq_server_adapter *)dev_instance;
vio_disable_interrupts(to_vio_dev(adapter->dev));
tasklet_schedule(&adapter->work_task);
return IRQ_HANDLED;
}
/**
* ibmvmc_release_crq_queue - Release CRQ Queue
*
* @adapter: crq_server_adapter struct
*
* Return:
* 0 - Success
* Non-Zero - Failure
*/
static void ibmvmc_release_crq_queue(struct crq_server_adapter *adapter)
{
struct vio_dev *vdev = to_vio_dev(adapter->dev);
struct crq_queue *queue = &adapter->queue;
free_irq(vdev->irq, (void *)adapter);
tasklet_kill(&adapter->work_task);
if (adapter->reset_task)
kthread_stop(adapter->reset_task);
h_free_crq(vdev->unit_address);
dma_unmap_single(adapter->dev,
queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
free_page((unsigned long)queue->msgs);
}
/**
* ibmvmc_reset_crq_queue - Reset CRQ Queue
*
* @adapter: crq_server_adapter struct
*
* This function calls h_free_crq and then calls H_REG_CRQ and does all the
* bookkeeping to get us back to where we can communicate.
*
* Return:
* 0 - Success
* Non-Zero - Failure
*/
static int ibmvmc_reset_crq_queue(struct crq_server_adapter *adapter)
{
struct vio_dev *vdev = to_vio_dev(adapter->dev);
struct crq_queue *queue = &adapter->queue;
int rc = 0;
/* Close the CRQ */
h_free_crq(vdev->unit_address);
/* Clean out the queue */
memset(queue->msgs, 0x00, PAGE_SIZE);
queue->cur = 0;
/* And re-open it again */
rc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
if (rc == 2)
/* Adapter is good, but other end is not ready */
dev_warn(adapter->dev, "Partner adapter not ready\n");
else if (rc != 0)
dev_err(adapter->dev, "couldn't register crq--rc 0x%x\n", rc);
return rc;
}
/**
* crq_queue_next_crq: - Returns the next entry in message queue
* @queue: crq_queue to use
*
* Returns pointer to next entry in queue, or NULL if there are no new
* entried in the CRQ.
*/
static struct ibmvmc_crq_msg *crq_queue_next_crq(struct crq_queue *queue)
{
struct ibmvmc_crq_msg *crq;
unsigned long flags;
spin_lock_irqsave(&queue->lock, flags);
crq = &queue->msgs[queue->cur];
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
/* Ensure the read of the valid bit occurs before reading any
* other bits of the CRQ entry
*/
dma_rmb();
} else {
crq = NULL;
}
spin_unlock_irqrestore(&queue->lock, flags);
return crq;
}
/**
* ibmvmc_send_crq - Send CRQ
*
* @adapter: crq_server_adapter struct
* @word1: Word1 Data field
* @word2: Word2 Data field
*
* Return:
* 0 - Success
* Non-Zero - Failure
*/
static long ibmvmc_send_crq(struct crq_server_adapter *adapter,
u64 word1, u64 word2)
{
struct vio_dev *vdev = to_vio_dev(adapter->dev);
long rc = 0;
dev_dbg(adapter->dev, "(0x%x, 0x%016llx, 0x%016llx)\n",
vdev->unit_address, word1, word2);
/*
* Ensure the command buffer is flushed to memory before handing it
* over to the other side to prevent it from fetching any stale data.
*/
dma_wmb();
rc = plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
dev_dbg(adapter->dev, "rc = 0x%lx\n", rc);
return rc;
}
/**
* alloc_dma_buffer - Create DMA Buffer
*
* @vdev: vio_dev struct
* @size: Size field
* @dma_handle: DMA address field
*
* Allocates memory for the command queue and maps remote memory into an
* ioba.
*
* Returns a pointer to the buffer
*/
static void *alloc_dma_buffer(struct vio_dev *vdev, size_t size,
dma_addr_t *dma_handle)
{
/* allocate memory */
void *buffer = kzalloc(size, GFP_ATOMIC);
if (!buffer) {
*dma_handle = 0;
return NULL;
}
/* DMA map */
*dma_handle = dma_map_single(&vdev->dev, buffer, size,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(&vdev->dev, *dma_handle)) {
*dma_handle = 0;
kfree_sensitive(buffer);
return NULL;
}
return buffer;
}
/**
* free_dma_buffer - Free DMA Buffer
*
* @vdev: vio_dev struct
* @size: Size field
* @vaddr: Address field
* @dma_handle: DMA address field
*
* Releases memory for a command queue and unmaps mapped remote memory.
*/
static void free_dma_buffer(struct vio_dev *vdev, size_t size, void *vaddr,
dma_addr_t dma_handle)
{
/* DMA unmap */
dma_unmap_single(&vdev->dev, dma_handle, size, DMA_BIDIRECTIONAL);
/* deallocate memory */
kfree_sensitive(vaddr);
}
/**
* ibmvmc_get_valid_hmc_buffer - Retrieve Valid HMC Buffer
*
* @hmc_index: HMC Index Field
*
* Return:
* Pointer to ibmvmc_buffer
*/
static struct ibmvmc_buffer *ibmvmc_get_valid_hmc_buffer(u8 hmc_index)
{
struct ibmvmc_buffer *buffer;
struct ibmvmc_buffer *ret_buf = NULL;
unsigned long i;
if (hmc_index > ibmvmc.max_hmc_index)
return NULL;
buffer = hmcs[hmc_index].buffer;
for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
if (buffer[i].valid && buffer[i].free &&
buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
buffer[i].free = 0;
ret_buf = &buffer[i];
break;
}
}
return ret_buf;
}
/**
* ibmvmc_get_free_hmc_buffer - Get Free HMC Buffer
*
* @adapter: crq_server_adapter struct
* @hmc_index: Hmc Index field
*
* Return:
* Pointer to ibmvmc_buffer
*/
static struct ibmvmc_buffer *ibmvmc_get_free_hmc_buffer(struct crq_server_adapter *adapter,
u8 hmc_index)
{
struct ibmvmc_buffer *buffer;
struct ibmvmc_buffer *ret_buf = NULL;
unsigned long i;
if (hmc_index > ibmvmc.max_hmc_index) {
dev_info(adapter->dev, "get_free_hmc_buffer: invalid hmc_index=0x%x\n",
hmc_index);
return NULL;
}
buffer = hmcs[hmc_index].buffer;
for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
if (buffer[i].free &&
buffer[i].owner == VMC_BUF_OWNER_ALPHA) {
buffer[i].free = 0;
ret_buf = &buffer[i];
break;
}
}
return ret_buf;
}
/**
* ibmvmc_free_hmc_buffer - Free an HMC Buffer
*
* @hmc: ibmvmc_hmc struct
* @buffer: ibmvmc_buffer struct
*
*/
static void ibmvmc_free_hmc_buffer(struct ibmvmc_hmc *hmc,
struct ibmvmc_buffer *buffer)
{
unsigned long flags;
spin_lock_irqsave(&hmc->lock, flags);
buffer->free = 1;
spin_unlock_irqrestore(&hmc->lock, flags);
}
/**
* ibmvmc_count_hmc_buffers - Count HMC Buffers
*
* @hmc_index: HMC Index field
* @valid: Valid number of buffers field
* @free: Free number of buffers field
*
*/
static void ibmvmc_count_hmc_buffers(u8 hmc_index, unsigned int *valid,
unsigned int *free)
{
struct ibmvmc_buffer *buffer;
unsigned long i;
unsigned long flags;
if (hmc_index > ibmvmc.max_hmc_index)
return;
if (!valid || !free)
return;
*valid = 0; *free = 0;
buffer = hmcs[hmc_index].buffer;
spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
if (buffer[i].valid) {
*valid = *valid + 1;
if (buffer[i].free)
*free = *free + 1;
}
}
spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
}
/**
* ibmvmc_get_free_hmc - Get Free HMC
*
* Return:
* Pointer to an available HMC Connection
* Null otherwise
*/
static struct ibmvmc_hmc *ibmvmc_get_free_hmc(void)
{
unsigned long i;
unsigned long flags;
/*
* Find an available HMC connection.
*/
for (i = 0; i <= ibmvmc.max_hmc_index; i++) {
spin_lock_irqsave(&hmcs[i].lock, flags);
if (hmcs[i].state == ibmhmc_state_free) {
hmcs[i].index = i;
hmcs[i].state = ibmhmc_state_initial;
spin_unlock_irqrestore(&hmcs[i].lock, flags);
return &hmcs[i];
}
spin_unlock_irqrestore(&hmcs[i].lock, flags);
}
return NULL;
}
/**
* ibmvmc_return_hmc - Return an HMC Connection
*
* @hmc: ibmvmc_hmc struct
* @release_readers: Number of readers connected to session
*
* This function releases the HMC connections back into the pool.
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static int ibmvmc_return_hmc(struct ibmvmc_hmc *hmc, bool release_readers)
{
struct ibmvmc_buffer *buffer;
struct crq_server_adapter *adapter;
struct vio_dev *vdev;
unsigned long i;
unsigned long flags;
if (!hmc || !hmc->adapter)
return -EIO;
if (release_readers) {
if (hmc->file_session) {
struct ibmvmc_file_session *session = hmc->file_session;
session->valid = 0;
wake_up_interruptible(&ibmvmc_read_wait);
}
}
adapter = hmc->adapter;
vdev = to_vio_dev(adapter->dev);
spin_lock_irqsave(&hmc->lock, flags);
hmc->index = 0;
hmc->state = ibmhmc_state_free;
hmc->queue_head = 0;
hmc->queue_tail = 0;
buffer = hmc->buffer;
for (i = 0; i < ibmvmc_max_buf_pool_size; i++) {
if (buffer[i].valid) {
free_dma_buffer(vdev,
ibmvmc.max_mtu,
buffer[i].real_addr_local,
buffer[i].dma_addr_local);
dev_dbg(adapter->dev, "Forgot buffer id 0x%lx\n", i);
}
memset(&buffer[i], 0, sizeof(struct ibmvmc_buffer));
hmc->queue_outbound_msgs[i] = VMC_INVALID_BUFFER_ID;
}
spin_unlock_irqrestore(&hmc->lock, flags);
return 0;
}
/**
* ibmvmc_send_open - Interface Open
* @buffer: Pointer to ibmvmc_buffer struct
* @hmc: Pointer to ibmvmc_hmc struct
*
* This command is sent by the management partition as the result of a
* management partition device request. It causes the hypervisor to
* prepare a set of data buffers for the management application connection
* indicated HMC idx. A unique HMC Idx would be used if multiple management
* applications running concurrently were desired. Before responding to this
* command, the hypervisor must provide the management partition with at
* least one of these new buffers via the Add Buffer. This indicates whether
* the messages are inbound or outbound from the hypervisor.
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static int ibmvmc_send_open(struct ibmvmc_buffer *buffer,
struct ibmvmc_hmc *hmc)
{
struct ibmvmc_crq_msg crq_msg;
struct crq_server_adapter *adapter;
__be64 *crq_as_u64 = (__be64 *)&crq_msg;
int rc = 0;
if (!hmc || !hmc->adapter)
return -EIO;
adapter = hmc->adapter;
dev_dbg(adapter->dev, "send_open: 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
(unsigned long)buffer->size, (unsigned long)adapter->liobn,
(unsigned long)buffer->dma_addr_local,
(unsigned long)adapter->riobn,
(unsigned long)buffer->dma_addr_remote);
rc = h_copy_rdma(buffer->size,
adapter->liobn,
buffer->dma_addr_local,
adapter->riobn,
buffer->dma_addr_remote);
if (rc) {
dev_err(adapter->dev, "Error: In send_open, h_copy_rdma rc 0x%x\n",
rc);
return -EIO;
}
hmc->state = ibmhmc_state_opening;
crq_msg.valid = 0x80;
crq_msg.type = VMC_MSG_OPEN;
crq_msg.status = 0;
crq_msg.var1.rsvd = 0;
crq_msg.hmc_session = hmc->session;
crq_msg.hmc_index = hmc->index;
crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
crq_msg.rsvd = 0;
crq_msg.var3.rsvd = 0;
ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
be64_to_cpu(crq_as_u64[1]));
return rc;
}
/**
* ibmvmc_send_close - Interface Close
* @hmc: Pointer to ibmvmc_hmc struct
*
* This command is sent by the management partition to terminate a
* management application to hypervisor connection. When this command is
* sent, the management partition has quiesced all I/O operations to all
* buffers associated with this management application connection, and
* has freed any storage for these buffers.
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static int ibmvmc_send_close(struct ibmvmc_hmc *hmc)
{
struct ibmvmc_crq_msg crq_msg;
struct crq_server_adapter *adapter;
__be64 *crq_as_u64 = (__be64 *)&crq_msg;
int rc = 0;
if (!hmc || !hmc->adapter)
return -EIO;
adapter = hmc->adapter;
dev_info(adapter->dev, "CRQ send: close\n");
crq_msg.valid = 0x80;
crq_msg.type = VMC_MSG_CLOSE;
crq_msg.status = 0;
crq_msg.var1.rsvd = 0;
crq_msg.hmc_session = hmc->session;
crq_msg.hmc_index = hmc->index;
crq_msg.var2.rsvd = 0;
crq_msg.rsvd = 0;
crq_msg.var3.rsvd = 0;
ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
be64_to_cpu(crq_as_u64[1]));
return rc;
}
/**
* ibmvmc_send_capabilities - Send VMC Capabilities
*
* @adapter: crq_server_adapter struct
*
* The capabilities message is an administrative message sent after the CRQ
* initialization sequence of messages and is used to exchange VMC capabilities
* between the management partition and the hypervisor. The management
* partition must send this message and the hypervisor must respond with VMC
* capabilities Response message before HMC interface message can begin. Any
* HMC interface messages received before the exchange of capabilities has
* complete are dropped.
*
* Return:
* 0 - Success
*/
static int ibmvmc_send_capabilities(struct crq_server_adapter *adapter)
{
struct ibmvmc_admin_crq_msg crq_msg;
__be64 *crq_as_u64 = (__be64 *)&crq_msg;
dev_dbg(adapter->dev, "ibmvmc: CRQ send: capabilities\n");
crq_msg.valid = 0x80;
crq_msg.type = VMC_MSG_CAP;
crq_msg.status = 0;
crq_msg.rsvd[0] = 0;
crq_msg.rsvd[1] = 0;
crq_msg.max_hmc = ibmvmc_max_hmcs;
crq_msg.max_mtu = cpu_to_be32(ibmvmc_max_mtu);
crq_msg.pool_size = cpu_to_be16(ibmvmc_max_buf_pool_size);
crq_msg.crq_size = cpu_to_be16(adapter->queue.size);
crq_msg.version = cpu_to_be16(IBMVMC_PROTOCOL_VERSION);
ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
be64_to_cpu(crq_as_u64[1]));
ibmvmc.state = ibmvmc_state_capabilities;
return 0;
}
/**
* ibmvmc_send_add_buffer_resp - Add Buffer Response
*
* @adapter: crq_server_adapter struct
* @status: Status field
* @hmc_session: HMC Session field
* @hmc_index: HMC Index field
* @buffer_id: Buffer Id field
*
* This command is sent by the management partition to the hypervisor in
* response to the Add Buffer message. The Status field indicates the result of
* the command.
*
* Return:
* 0 - Success
*/
static int ibmvmc_send_add_buffer_resp(struct crq_server_adapter *adapter,
u8 status, u8 hmc_session,
u8 hmc_index, u16 buffer_id)
{
struct ibmvmc_crq_msg crq_msg;
__be64 *crq_as_u64 = (__be64 *)&crq_msg;
dev_dbg(adapter->dev, "CRQ send: add_buffer_resp\n");
crq_msg.valid = 0x80;
crq_msg.type = VMC_MSG_ADD_BUF_RESP;
crq_msg.status = status;
crq_msg.var1.rsvd = 0;
crq_msg.hmc_session = hmc_session;
crq_msg.hmc_index = hmc_index;
crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
crq_msg.rsvd = 0;
crq_msg.var3.rsvd = 0;
ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
be64_to_cpu(crq_as_u64[1]));
return 0;
}
/**
* ibmvmc_send_rem_buffer_resp - Remove Buffer Response
*
* @adapter: crq_server_adapter struct
* @status: Status field
* @hmc_session: HMC Session field
* @hmc_index: HMC Index field
* @buffer_id: Buffer Id field
*
* This command is sent by the management partition to the hypervisor in
* response to the Remove Buffer message. The Buffer ID field indicates
* which buffer the management partition selected to remove. The Status
* field indicates the result of the command.
*
* Return:
* 0 - Success
*/
static int ibmvmc_send_rem_buffer_resp(struct crq_server_adapter *adapter,
u8 status, u8 hmc_session,
u8 hmc_index, u16 buffer_id)
{
struct ibmvmc_crq_msg crq_msg;
__be64 *crq_as_u64 = (__be64 *)&crq_msg;
dev_dbg(adapter->dev, "CRQ send: rem_buffer_resp\n");
crq_msg.valid = 0x80;
crq_msg.type = VMC_MSG_REM_BUF_RESP;
crq_msg.status = status;
crq_msg.var1.rsvd = 0;
crq_msg.hmc_session = hmc_session;
crq_msg.hmc_index = hmc_index;
crq_msg.var2.buffer_id = cpu_to_be16(buffer_id);
crq_msg.rsvd = 0;
crq_msg.var3.rsvd = 0;
ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
be64_to_cpu(crq_as_u64[1]));
return 0;
}
/**
* ibmvmc_send_msg - Signal Message
*
* @adapter: crq_server_adapter struct
* @buffer: ibmvmc_buffer struct
* @hmc: ibmvmc_hmc struct
* @msg_len: message length field
*
* This command is sent between the management partition and the hypervisor
* in order to signal the arrival of an HMC protocol message. The command
* can be sent by both the management partition and the hypervisor. It is
* used for all traffic between the management application and the hypervisor,
* regardless of who initiated the communication.
*
* There is no response to this message.
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static int ibmvmc_send_msg(struct crq_server_adapter *adapter,
struct ibmvmc_buffer *buffer,
struct ibmvmc_hmc *hmc, int msg_len)
{
struct ibmvmc_crq_msg crq_msg;
__be64 *crq_as_u64 = (__be64 *)&crq_msg;
int rc = 0;
dev_dbg(adapter->dev, "CRQ send: rdma to HV\n");
rc = h_copy_rdma(msg_len,
adapter->liobn,
buffer->dma_addr_local,
adapter->riobn,
buffer->dma_addr_remote);
if (rc) {
dev_err(adapter->dev, "Error in send_msg, h_copy_rdma rc 0x%x\n",
rc);
return rc;
}
crq_msg.valid = 0x80;
crq_msg.type = VMC_MSG_SIGNAL;
crq_msg.status = 0;
crq_msg.var1.rsvd = 0;
crq_msg.hmc_session = hmc->session;
crq_msg.hmc_index = hmc->index;
crq_msg.var2.buffer_id = cpu_to_be16(buffer->id);
crq_msg.var3.msg_len = cpu_to_be32(msg_len);
dev_dbg(adapter->dev, "CRQ send: msg to HV 0x%llx 0x%llx\n",
be64_to_cpu(crq_as_u64[0]), be64_to_cpu(crq_as_u64[1]));
buffer->owner = VMC_BUF_OWNER_HV;
ibmvmc_send_crq(adapter, be64_to_cpu(crq_as_u64[0]),
be64_to_cpu(crq_as_u64[1]));
return rc;
}
/**
* ibmvmc_open - Open Session
*
* @inode: inode struct
* @file: file struct
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static int ibmvmc_open(struct inode *inode, struct file *file)
{
struct ibmvmc_file_session *session;
pr_debug("%s: inode = 0x%lx, file = 0x%lx, state = 0x%x\n", __func__,
(unsigned long)inode, (unsigned long)file,
ibmvmc.state);
session = kzalloc(sizeof(*session), GFP_KERNEL);
if (!session)
return -ENOMEM;
session->file = file;
file->private_data = session;
return 0;
}
/**
* ibmvmc_close - Close Session
*
* @inode: inode struct
* @file: file struct
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static int ibmvmc_close(struct inode *inode, struct file *file)
{
struct ibmvmc_file_session *session;
struct ibmvmc_hmc *hmc;
int rc = 0;
unsigned long flags;
pr_debug("%s: file = 0x%lx, state = 0x%x\n", __func__,
(unsigned long)file, ibmvmc.state);
session = file->private_data;
if (!session)
return -EIO;
hmc = session->hmc;
if (hmc) {
if (!hmc->adapter)
return -EIO;
if (ibmvmc.state == ibmvmc_state_failed) {
dev_warn(hmc->adapter->dev, "close: state_failed\n");
return -EIO;
}
spin_lock_irqsave(&hmc->lock, flags);
if (hmc->state >= ibmhmc_state_opening) {
rc = ibmvmc_send_close(hmc);
if (rc)
dev_warn(hmc->adapter->dev, "close: send_close failed.\n");
}
spin_unlock_irqrestore(&hmc->lock, flags);
}
kfree_sensitive(session);
return rc;
}
/**
* ibmvmc_read - Read
*
* @file: file struct
* @buf: Character buffer
* @nbytes: Size in bytes
* @ppos: Offset
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static ssize_t ibmvmc_read(struct file *file, char *buf, size_t nbytes,
loff_t *ppos)
{
struct ibmvmc_file_session *session;
struct ibmvmc_hmc *hmc;
struct crq_server_adapter *adapter;
struct ibmvmc_buffer *buffer;
ssize_t n;
ssize_t retval = 0;
unsigned long flags;
DEFINE_WAIT(wait);
pr_debug("ibmvmc: read: file = 0x%lx, buf = 0x%lx, nbytes = 0x%lx\n",
(unsigned long)file, (unsigned long)buf,
(unsigned long)nbytes);
if (nbytes == 0)
return 0;
if (nbytes > ibmvmc.max_mtu) {
pr_warn("ibmvmc: read: nbytes invalid 0x%x\n",
(unsigned int)nbytes);
return -EINVAL;
}
session = file->private_data;
if (!session) {
pr_warn("ibmvmc: read: no session\n");
return -EIO;
}
hmc = session->hmc;
if (!hmc) {
pr_warn("ibmvmc: read: no hmc\n");
return -EIO;
}
adapter = hmc->adapter;
if (!adapter) {
pr_warn("ibmvmc: read: no adapter\n");
return -EIO;
}
do {
prepare_to_wait(&ibmvmc_read_wait, &wait, TASK_INTERRUPTIBLE);
spin_lock_irqsave(&hmc->lock, flags);
if (hmc->queue_tail != hmc->queue_head)
/* Data is available */
break;
spin_unlock_irqrestore(&hmc->lock, flags);
if (!session->valid) {
retval = -EBADFD;
goto out;
}
if (file->f_flags & O_NONBLOCK) {
retval = -EAGAIN;
goto out;
}
schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
goto out;
}
} while (1);
buffer = &(hmc->buffer[hmc->queue_outbound_msgs[hmc->queue_tail]]);
hmc->queue_tail++;
if (hmc->queue_tail == ibmvmc_max_buf_pool_size)
hmc->queue_tail = 0;
spin_unlock_irqrestore(&hmc->lock, flags);
nbytes = min_t(size_t, nbytes, buffer->msg_len);
n = copy_to_user((void *)buf, buffer->real_addr_local, nbytes);
dev_dbg(adapter->dev, "read: copy to user nbytes = 0x%lx.\n", nbytes);
ibmvmc_free_hmc_buffer(hmc, buffer);
retval = nbytes;
if (n) {
dev_warn(adapter->dev, "read: copy to user failed.\n");
retval = -EFAULT;
}
out:
finish_wait(&ibmvmc_read_wait, &wait);
dev_dbg(adapter->dev, "read: out %ld\n", retval);
return retval;
}
/**
* ibmvmc_poll - Poll
*
* @file: file struct
* @wait: Poll Table
*
* Return:
* poll.h return values
*/
static unsigned int ibmvmc_poll(struct file *file, poll_table *wait)
{
struct ibmvmc_file_session *session;
struct ibmvmc_hmc *hmc;
unsigned int mask = 0;
session = file->private_data;
if (!session)
return 0;
hmc = session->hmc;
if (!hmc)
return 0;
poll_wait(file, &ibmvmc_read_wait, wait);
if (hmc->queue_head != hmc->queue_tail)
mask |= POLLIN | POLLRDNORM;
return mask;
}
/**
* ibmvmc_write - Write
*
* @file: file struct
* @buffer: Character buffer
* @count: Count field
* @ppos: Offset
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static ssize_t ibmvmc_write(struct file *file, const char *buffer,
size_t count, loff_t *ppos)
{
struct inode *inode;
struct ibmvmc_buffer *vmc_buffer;
struct ibmvmc_file_session *session;
struct crq_server_adapter *adapter;
struct ibmvmc_hmc *hmc;
unsigned char *buf;
unsigned long flags;
size_t bytes;
const char *p = buffer;
size_t c = count;
int ret = 0;
session = file->private_data;
if (!session)
return -EIO;
hmc = session->hmc;
if (!hmc)
return -EIO;
spin_lock_irqsave(&hmc->lock, flags);
if (hmc->state == ibmhmc_state_free) {
/* HMC connection is not valid (possibly was reset under us). */
ret = -EIO;
goto out;
}
adapter = hmc->adapter;
if (!adapter) {
ret = -EIO;
goto out;
}
if (count > ibmvmc.max_mtu) {
dev_warn(adapter->dev, "invalid buffer size 0x%lx\n",
(unsigned long)count);
ret = -EIO;
goto out;
}
/* Waiting for the open resp message to the ioctl(1) - retry */
if (hmc->state == ibmhmc_state_opening) {
ret = -EBUSY;
goto out;
}
/* Make sure the ioctl() was called & the open msg sent, and that
* the HMC connection has not failed.
*/
if (hmc->state != ibmhmc_state_ready) {
ret = -EIO;
goto out;
}
vmc_buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
if (!vmc_buffer) {
/* No buffer available for the msg send, or we have not yet
* completed the open/open_resp sequence. Retry until this is
* complete.
*/
ret = -EBUSY;
goto out;
}
if (!vmc_buffer->real_addr_local) {
dev_err(adapter->dev, "no buffer storage assigned\n");
ret = -EIO;
goto out;
}
buf = vmc_buffer->real_addr_local;
while (c > 0) {
bytes = min_t(size_t, c, vmc_buffer->size);
bytes -= copy_from_user(buf, p, bytes);
if (!bytes) {
ret = -EFAULT;
goto out;
}
c -= bytes;
p += bytes;
}
if (p == buffer)
goto out;
inode = file_inode(file);
inode->i_mtime = inode_set_ctime_current(inode);
mark_inode_dirty(inode);
dev_dbg(adapter->dev, "write: file = 0x%lx, count = 0x%lx\n",
(unsigned long)file, (unsigned long)count);
ibmvmc_send_msg(adapter, vmc_buffer, hmc, count);
ret = p - buffer;
out:
spin_unlock_irqrestore(&hmc->lock, flags);
return (ssize_t)(ret);
}
/**
* ibmvmc_setup_hmc - Setup the HMC
*
* @session: ibmvmc_file_session struct
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static long ibmvmc_setup_hmc(struct ibmvmc_file_session *session)
{
struct ibmvmc_hmc *hmc;
unsigned int valid, free, index;
if (ibmvmc.state == ibmvmc_state_failed) {
pr_warn("ibmvmc: Reserve HMC: state_failed\n");
return -EIO;
}
if (ibmvmc.state < ibmvmc_state_ready) {
pr_warn("ibmvmc: Reserve HMC: not state_ready\n");
return -EAGAIN;
}
/* Device is busy until capabilities have been exchanged and we
* have a generic buffer for each possible HMC connection.
*/
for (index = 0; index <= ibmvmc.max_hmc_index; index++) {
valid = 0;
ibmvmc_count_hmc_buffers(index, &valid, &free);
if (valid == 0) {
pr_warn("ibmvmc: buffers not ready for index %d\n",
index);
return -ENOBUFS;
}
}
/* Get an hmc object, and transition to ibmhmc_state_initial */
hmc = ibmvmc_get_free_hmc();
if (!hmc) {
pr_warn("%s: free hmc not found\n", __func__);
return -EBUSY;
}
hmc->session = hmc->session + 1;
if (hmc->session == 0xff)
hmc->session = 1;
session->hmc = hmc;
hmc->adapter = &ibmvmc_adapter;
hmc->file_session = session;
session->valid = 1;
return 0;
}
/**
* ibmvmc_ioctl_sethmcid - IOCTL Set HMC ID
*
* @session: ibmvmc_file_session struct
* @new_hmc_id: HMC id field
*
* IOCTL command to setup the hmc id
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static long ibmvmc_ioctl_sethmcid(struct ibmvmc_file_session *session,
unsigned char __user *new_hmc_id)
{
struct ibmvmc_hmc *hmc;
struct ibmvmc_buffer *buffer;
size_t bytes;
char print_buffer[HMC_ID_LEN + 1];
unsigned long flags;
long rc = 0;
/* Reserve HMC session */
hmc = session->hmc;
if (!hmc) {
rc = ibmvmc_setup_hmc(session);
if (rc)
return rc;
hmc = session->hmc;
if (!hmc) {
pr_err("ibmvmc: setup_hmc success but no hmc\n");
return -EIO;
}
}
if (hmc->state != ibmhmc_state_initial) {
pr_warn("ibmvmc: sethmcid: invalid state to send open 0x%x\n",
hmc->state);
return -EIO;
}
bytes = copy_from_user(hmc->hmc_id, new_hmc_id, HMC_ID_LEN);
if (bytes)
return -EFAULT;
/* Send Open Session command */
spin_lock_irqsave(&hmc->lock, flags);
buffer = ibmvmc_get_valid_hmc_buffer(hmc->index);
spin_unlock_irqrestore(&hmc->lock, flags);
if (!buffer || !buffer->real_addr_local) {
pr_warn("ibmvmc: sethmcid: no buffer available\n");
return -EIO;
}
/* Make sure buffer is NULL terminated before trying to print it */
memset(print_buffer, 0, HMC_ID_LEN + 1);
strncpy(print_buffer, hmc->hmc_id, HMC_ID_LEN);
pr_info("ibmvmc: sethmcid: Set HMC ID: \"%s\"\n", print_buffer);
memcpy(buffer->real_addr_local, hmc->hmc_id, HMC_ID_LEN);
/* RDMA over ID, send open msg, change state to ibmhmc_state_opening */
rc = ibmvmc_send_open(buffer, hmc);
return rc;
}
/**
* ibmvmc_ioctl_query - IOCTL Query
*
* @session: ibmvmc_file_session struct
* @ret_struct: ibmvmc_query_struct
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static long ibmvmc_ioctl_query(struct ibmvmc_file_session *session,
struct ibmvmc_query_struct __user *ret_struct)
{
struct ibmvmc_query_struct query_struct;
size_t bytes;
memset(&query_struct, 0, sizeof(query_struct));
query_struct.have_vmc = (ibmvmc.state > ibmvmc_state_initial);
query_struct.state = ibmvmc.state;
query_struct.vmc_drc_index = ibmvmc.vmc_drc_index;
bytes = copy_to_user(ret_struct, &query_struct,
sizeof(query_struct));
if (bytes)
return -EFAULT;
return 0;
}
/**
* ibmvmc_ioctl_requestvmc - IOCTL Request VMC
*
* @session: ibmvmc_file_session struct
* @ret_vmc_index: VMC Index
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static long ibmvmc_ioctl_requestvmc(struct ibmvmc_file_session *session,
u32 __user *ret_vmc_index)
{
/* TODO: (adreznec) Add locking to control multiple process access */
size_t bytes;
long rc;
u32 vmc_drc_index;
/* Call to request the VMC device from phyp*/
rc = h_request_vmc(&vmc_drc_index);
pr_debug("ibmvmc: requestvmc: H_REQUEST_VMC rc = 0x%lx\n", rc);
if (rc == H_SUCCESS) {
rc = 0;
} else if (rc == H_FUNCTION) {
pr_err("ibmvmc: requestvmc: h_request_vmc not supported\n");
return -EPERM;
} else if (rc == H_AUTHORITY) {
pr_err("ibmvmc: requestvmc: hypervisor denied vmc request\n");
return -EPERM;
} else if (rc == H_HARDWARE) {
pr_err("ibmvmc: requestvmc: hypervisor hardware fault\n");
return -EIO;
} else if (rc == H_RESOURCE) {
pr_err("ibmvmc: requestvmc: vmc resource unavailable\n");
return -ENODEV;
} else if (rc == H_NOT_AVAILABLE) {
pr_err("ibmvmc: requestvmc: system cannot be vmc managed\n");
return -EPERM;
} else if (rc == H_PARAMETER) {
pr_err("ibmvmc: requestvmc: invalid parameter\n");
return -EINVAL;
}
/* Success, set the vmc index in global struct */
ibmvmc.vmc_drc_index = vmc_drc_index;
bytes = copy_to_user(ret_vmc_index, &vmc_drc_index,
sizeof(*ret_vmc_index));
if (bytes) {
pr_warn("ibmvmc: requestvmc: copy to user failed.\n");
return -EFAULT;
}
return rc;
}
/**
* ibmvmc_ioctl - IOCTL
*
* @file: file information
* @cmd: cmd field
* @arg: Argument field
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static long ibmvmc_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct ibmvmc_file_session *session = file->private_data;
pr_debug("ibmvmc: ioctl file=0x%lx, cmd=0x%x, arg=0x%lx, ses=0x%lx\n",
(unsigned long)file, cmd, arg,
(unsigned long)session);
if (!session) {
pr_warn("ibmvmc: ioctl: no session\n");
return -EIO;
}
switch (cmd) {
case VMC_IOCTL_SETHMCID:
return ibmvmc_ioctl_sethmcid(session,
(unsigned char __user *)arg);
case VMC_IOCTL_QUERY:
return ibmvmc_ioctl_query(session,
(struct ibmvmc_query_struct __user *)arg);
case VMC_IOCTL_REQUESTVMC:
return ibmvmc_ioctl_requestvmc(session,
(unsigned int __user *)arg);
default:
pr_warn("ibmvmc: unknown ioctl 0x%x\n", cmd);
return -EINVAL;
}
}
static const struct file_operations ibmvmc_fops = {
.owner = THIS_MODULE,
.read = ibmvmc_read,
.write = ibmvmc_write,
.poll = ibmvmc_poll,
.unlocked_ioctl = ibmvmc_ioctl,
.open = ibmvmc_open,
.release = ibmvmc_close,
};
/**
* ibmvmc_add_buffer - Add Buffer
*
* @adapter: crq_server_adapter struct
* @crq: ibmvmc_crq_msg struct
*
* This message transfers a buffer from hypervisor ownership to management
* partition ownership. The LIOBA is obtained from the virtual TCE table
* associated with the hypervisor side of the VMC device, and points to a
* buffer of size MTU (as established in the capabilities exchange).
*
* Typical flow for ading buffers:
* 1. A new management application connection is opened by the management
* partition.
* 2. The hypervisor assigns new buffers for the traffic associated with
* that connection.
* 3. The hypervisor sends VMC Add Buffer messages to the management
* partition, informing it of the new buffers.
* 4. The hypervisor sends an HMC protocol message (to the management
* application) notifying it of the new buffers. This informs the
* application that it has buffers available for sending HMC
* commands.
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static int ibmvmc_add_buffer(struct crq_server_adapter *adapter,
struct ibmvmc_crq_msg *crq)
{
struct ibmvmc_buffer *buffer;
u8 hmc_index;
u8 hmc_session;
u16 buffer_id;
unsigned long flags;
int rc = 0;
if (!crq)
return -1;
hmc_session = crq->hmc_session;
hmc_index = crq->hmc_index;
buffer_id = be16_to_cpu(crq->var2.buffer_id);
if (hmc_index > ibmvmc.max_hmc_index) {
dev_err(adapter->dev, "add_buffer: invalid hmc_index = 0x%x\n",
hmc_index);
ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
hmc_session, hmc_index, buffer_id);
return -1;
}
if (buffer_id >= ibmvmc.max_buffer_pool_size) {
dev_err(adapter->dev, "add_buffer: invalid buffer_id = 0x%x\n",
buffer_id);
ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
hmc_session, hmc_index, buffer_id);
return -1;
}
spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
buffer = &hmcs[hmc_index].buffer[buffer_id];
if (buffer->real_addr_local || buffer->dma_addr_local) {
dev_warn(adapter->dev, "add_buffer: already allocated id = 0x%lx\n",
(unsigned long)buffer_id);
spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
hmc_session, hmc_index, buffer_id);
return -1;
}
buffer->real_addr_local = alloc_dma_buffer(to_vio_dev(adapter->dev),
ibmvmc.max_mtu,
&buffer->dma_addr_local);
if (!buffer->real_addr_local) {
dev_err(adapter->dev, "add_buffer: alloc_dma_buffer failed.\n");
spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INTERFACE_FAILURE,
hmc_session, hmc_index, buffer_id);
return -1;
}
buffer->dma_addr_remote = be32_to_cpu(crq->var3.lioba);
buffer->size = ibmvmc.max_mtu;
buffer->owner = crq->var1.owner;
buffer->free = 1;
/* Must ensure valid==1 is observable only after all other fields are */
dma_wmb();
buffer->valid = 1;
buffer->id = buffer_id;
dev_dbg(adapter->dev, "add_buffer: successfully added a buffer:\n");
dev_dbg(adapter->dev, " index: %d, session: %d, buffer: 0x%x, owner: %d\n",
hmc_index, hmc_session, buffer_id, buffer->owner);
dev_dbg(adapter->dev, " local: 0x%x, remote: 0x%x\n",
(u32)buffer->dma_addr_local,
(u32)buffer->dma_addr_remote);
spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
hmc_index, buffer_id);
return rc;
}
/**
* ibmvmc_rem_buffer - Remove Buffer
*
* @adapter: crq_server_adapter struct
* @crq: ibmvmc_crq_msg struct
*
* This message requests an HMC buffer to be transferred from management
* partition ownership to hypervisor ownership. The management partition may
* not be able to satisfy the request at a particular point in time if all its
* buffers are in use. The management partition requires a depth of at least
* one inbound buffer to allow management application commands to flow to the
* hypervisor. It is, therefore, an interface error for the hypervisor to
* attempt to remove the management partition's last buffer.
*
* The hypervisor is expected to manage buffer usage with the management
* application directly and inform the management partition when buffers may be
* removed. The typical flow for removing buffers:
*
* 1. The management application no longer needs a communication path to a
* particular hypervisor function. That function is closed.
* 2. The hypervisor and the management application quiesce all traffic to that
* function. The hypervisor requests a reduction in buffer pool size.
* 3. The management application acknowledges the reduction in buffer pool size.
* 4. The hypervisor sends a Remove Buffer message to the management partition,
* informing it of the reduction in buffers.
* 5. The management partition verifies it can remove the buffer. This is
* possible if buffers have been quiesced.
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
/*
* The hypervisor requested that we pick an unused buffer, and return it.
* Before sending the buffer back, we free any storage associated with the
* buffer.
*/
static int ibmvmc_rem_buffer(struct crq_server_adapter *adapter,
struct ibmvmc_crq_msg *crq)
{
struct ibmvmc_buffer *buffer;
u8 hmc_index;
u8 hmc_session;
u16 buffer_id = 0;
unsigned long flags;
int rc = 0;
if (!crq)
return -1;
hmc_session = crq->hmc_session;
hmc_index = crq->hmc_index;
if (hmc_index > ibmvmc.max_hmc_index) {
dev_warn(adapter->dev, "rem_buffer: invalid hmc_index = 0x%x\n",
hmc_index);
ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
hmc_session, hmc_index, buffer_id);
return -1;
}
spin_lock_irqsave(&hmcs[hmc_index].lock, flags);
buffer = ibmvmc_get_free_hmc_buffer(adapter, hmc_index);
if (!buffer) {
dev_info(adapter->dev, "rem_buffer: no buffer to remove\n");
spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_NO_BUFFER,
hmc_session, hmc_index,
VMC_INVALID_BUFFER_ID);
return -1;
}
buffer_id = buffer->id;
if (buffer->valid)
free_dma_buffer(to_vio_dev(adapter->dev),
ibmvmc.max_mtu,
buffer->real_addr_local,
buffer->dma_addr_local);
memset(buffer, 0, sizeof(struct ibmvmc_buffer));
spin_unlock_irqrestore(&hmcs[hmc_index].lock, flags);
dev_dbg(adapter->dev, "rem_buffer: removed buffer 0x%x.\n", buffer_id);
ibmvmc_send_rem_buffer_resp(adapter, VMC_MSG_SUCCESS, hmc_session,
hmc_index, buffer_id);
return rc;
}
static int ibmvmc_recv_msg(struct crq_server_adapter *adapter,
struct ibmvmc_crq_msg *crq)
{
struct ibmvmc_buffer *buffer;
struct ibmvmc_hmc *hmc;
unsigned long msg_len;
u8 hmc_index;
u8 hmc_session;
u16 buffer_id;
unsigned long flags;
int rc = 0;
if (!crq)
return -1;
/* Hypervisor writes CRQs directly into our memory in big endian */
dev_dbg(adapter->dev, "Recv_msg: msg from HV 0x%016llx 0x%016llx\n",
be64_to_cpu(*((unsigned long *)crq)),
be64_to_cpu(*(((unsigned long *)crq) + 1)));
hmc_session = crq->hmc_session;
hmc_index = crq->hmc_index;
buffer_id = be16_to_cpu(crq->var2.buffer_id);
msg_len = be32_to_cpu(crq->var3.msg_len);
if (hmc_index > ibmvmc.max_hmc_index) {
dev_err(adapter->dev, "Recv_msg: invalid hmc_index = 0x%x\n",
hmc_index);
ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_HMC_INDEX,
hmc_session, hmc_index, buffer_id);
return -1;
}
if (buffer_id >= ibmvmc.max_buffer_pool_size) {
dev_err(adapter->dev, "Recv_msg: invalid buffer_id = 0x%x\n",
buffer_id);
ibmvmc_send_add_buffer_resp(adapter, VMC_MSG_INVALID_BUFFER_ID,
hmc_session, hmc_index, buffer_id);
return -1;
}
hmc = &hmcs[hmc_index];
spin_lock_irqsave(&hmc->lock, flags);
if (hmc->state == ibmhmc_state_free) {
dev_err(adapter->dev, "Recv_msg: invalid hmc state = 0x%x\n",
hmc->state);
/* HMC connection is not valid (possibly was reset under us). */
spin_unlock_irqrestore(&hmc->lock, flags);
return -1;
}
buffer = &hmc->buffer[buffer_id];
if (buffer->valid == 0 || buffer->owner == VMC_BUF_OWNER_ALPHA) {
dev_err(adapter->dev, "Recv_msg: not valid, or not HV. 0x%x 0x%x\n",
buffer->valid, buffer->owner);
spin_unlock_irqrestore(&hmc->lock, flags);
return -1;
}
/* RDMA the data into the partition. */
rc = h_copy_rdma(msg_len,
adapter->riobn,
buffer->dma_addr_remote,
adapter->liobn,
buffer->dma_addr_local);
dev_dbg(adapter->dev, "Recv_msg: msg_len = 0x%x, buffer_id = 0x%x, queue_head = 0x%x, hmc_idx = 0x%x\n",
(unsigned int)msg_len, (unsigned int)buffer_id,
(unsigned int)hmc->queue_head, (unsigned int)hmc_index);
buffer->msg_len = msg_len;
buffer->free = 0;
buffer->owner = VMC_BUF_OWNER_ALPHA;
if (rc) {
dev_err(adapter->dev, "Failure in recv_msg: h_copy_rdma = 0x%x\n",
rc);
spin_unlock_irqrestore(&hmc->lock, flags);
return -1;
}
/* Must be locked because read operates on the same data */
hmc->queue_outbound_msgs[hmc->queue_head] = buffer_id;
hmc->queue_head++;
if (hmc->queue_head == ibmvmc_max_buf_pool_size)
hmc->queue_head = 0;
if (hmc->queue_head == hmc->queue_tail)
dev_err(adapter->dev, "outbound buffer queue wrapped.\n");
spin_unlock_irqrestore(&hmc->lock, flags);
wake_up_interruptible(&ibmvmc_read_wait);
return 0;
}
/**
* ibmvmc_process_capabilities - Process Capabilities
*
* @adapter: crq_server_adapter struct
* @crqp: ibmvmc_crq_msg struct
*
*/
static void ibmvmc_process_capabilities(struct crq_server_adapter *adapter,
struct ibmvmc_crq_msg *crqp)
{
struct ibmvmc_admin_crq_msg *crq = (struct ibmvmc_admin_crq_msg *)crqp;
if ((be16_to_cpu(crq->version) >> 8) !=
(IBMVMC_PROTOCOL_VERSION >> 8)) {
dev_err(adapter->dev, "init failed, incompatible versions 0x%x 0x%x\n",
be16_to_cpu(crq->version),
IBMVMC_PROTOCOL_VERSION);
ibmvmc.state = ibmvmc_state_failed;
return;
}
ibmvmc.max_mtu = min_t(u32, ibmvmc_max_mtu, be32_to_cpu(crq->max_mtu));
ibmvmc.max_buffer_pool_size = min_t(u16, ibmvmc_max_buf_pool_size,
be16_to_cpu(crq->pool_size));
ibmvmc.max_hmc_index = min_t(u8, ibmvmc_max_hmcs, crq->max_hmc) - 1;
ibmvmc.state = ibmvmc_state_ready;
dev_info(adapter->dev, "Capabilities: mtu=0x%x, pool_size=0x%x, max_hmc=0x%x\n",
ibmvmc.max_mtu, ibmvmc.max_buffer_pool_size,
ibmvmc.max_hmc_index);
}
/**
* ibmvmc_validate_hmc_session - Validate HMC Session
*
* @adapter: crq_server_adapter struct
* @crq: ibmvmc_crq_msg struct
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static int ibmvmc_validate_hmc_session(struct crq_server_adapter *adapter,
struct ibmvmc_crq_msg *crq)
{
unsigned char hmc_index;
hmc_index = crq->hmc_index;
if (crq->hmc_session == 0)
return 0;
if (hmc_index > ibmvmc.max_hmc_index)
return -1;
if (hmcs[hmc_index].session != crq->hmc_session) {
dev_warn(adapter->dev, "Drop, bad session: expected 0x%x, recv 0x%x\n",
hmcs[hmc_index].session, crq->hmc_session);
return -1;
}
return 0;
}
/**
* ibmvmc_reset - Reset
*
* @adapter: crq_server_adapter struct
* @xport_event: export_event field
*
* Closes all HMC sessions and conditionally schedules a CRQ reset.
* @xport_event: If true, the partner closed their CRQ; we don't need to reset.
* If false, we need to schedule a CRQ reset.
*/
static void ibmvmc_reset(struct crq_server_adapter *adapter, bool xport_event)
{
int i;
if (ibmvmc.state != ibmvmc_state_sched_reset) {
dev_info(adapter->dev, "*** Reset to initial state.\n");
for (i = 0; i < ibmvmc_max_hmcs; i++)
ibmvmc_return_hmc(&hmcs[i], xport_event);
if (xport_event) {
/* CRQ was closed by the partner. We don't need to do
* anything except set ourself to the correct state to
* handle init msgs.
*/
ibmvmc.state = ibmvmc_state_crqinit;
} else {
/* The partner did not close their CRQ - instead, we're
* closing the CRQ on our end. Need to schedule this
* for process context, because CRQ reset may require a
* sleep.
*
* Setting ibmvmc.state here immediately prevents
* ibmvmc_open from completing until the reset
* completes in process context.
*/
ibmvmc.state = ibmvmc_state_sched_reset;
dev_dbg(adapter->dev, "Device reset scheduled");
wake_up_interruptible(&adapter->reset_wait_queue);
}
}
}
/**
* ibmvmc_reset_task - Reset Task
*
* @data: Data field
*
* Performs a CRQ reset of the VMC device in process context.
* NOTE: This function should not be called directly, use ibmvmc_reset.
*/
static int ibmvmc_reset_task(void *data)
{
struct crq_server_adapter *adapter = data;
int rc;
set_user_nice(current, -20);
while (!kthread_should_stop()) {
wait_event_interruptible(adapter->reset_wait_queue,
(ibmvmc.state == ibmvmc_state_sched_reset) ||
kthread_should_stop());
if (kthread_should_stop())
break;
dev_dbg(adapter->dev, "CRQ resetting in process context");
tasklet_disable(&adapter->work_task);
rc = ibmvmc_reset_crq_queue(adapter);
if (rc != H_SUCCESS && rc != H_RESOURCE) {
dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
rc);
ibmvmc.state = ibmvmc_state_failed;
} else {
ibmvmc.state = ibmvmc_state_crqinit;
if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0)
!= 0 && rc != H_RESOURCE)
dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
}
vio_enable_interrupts(to_vio_dev(adapter->dev));
tasklet_enable(&adapter->work_task);
}
return 0;
}
/**
* ibmvmc_process_open_resp - Process Open Response
*
* @crq: ibmvmc_crq_msg struct
* @adapter: crq_server_adapter struct
*
* This command is sent by the hypervisor in response to the Interface
* Open message. When this message is received, the indicated buffer is
* again available for management partition use.
*/
static void ibmvmc_process_open_resp(struct ibmvmc_crq_msg *crq,
struct crq_server_adapter *adapter)
{
unsigned char hmc_index;
unsigned short buffer_id;
hmc_index = crq->hmc_index;
if (hmc_index > ibmvmc.max_hmc_index) {
/* Why would PHYP give an index > max negotiated? */
ibmvmc_reset(adapter, false);
return;
}
if (crq->status) {
dev_warn(adapter->dev, "open_resp: failed - status 0x%x\n",
crq->status);
ibmvmc_return_hmc(&hmcs[hmc_index], false);
return;
}
if (hmcs[hmc_index].state == ibmhmc_state_opening) {
buffer_id = be16_to_cpu(crq->var2.buffer_id);
if (buffer_id >= ibmvmc.max_buffer_pool_size) {
dev_err(adapter->dev, "open_resp: invalid buffer_id = 0x%x\n",
buffer_id);
hmcs[hmc_index].state = ibmhmc_state_failed;
} else {
ibmvmc_free_hmc_buffer(&hmcs[hmc_index],
&hmcs[hmc_index].buffer[buffer_id]);
hmcs[hmc_index].state = ibmhmc_state_ready;
dev_dbg(adapter->dev, "open_resp: set hmc state = ready\n");
}
} else {
dev_warn(adapter->dev, "open_resp: invalid hmc state (0x%x)\n",
hmcs[hmc_index].state);
}
}
/**
* ibmvmc_process_close_resp - Process Close Response
*
* @crq: ibmvmc_crq_msg struct
* @adapter: crq_server_adapter struct
*
* This command is sent by the hypervisor in response to the managemant
* application Interface Close message.
*
* If the close fails, simply reset the entire driver as the state of the VMC
* must be in tough shape.
*/
static void ibmvmc_process_close_resp(struct ibmvmc_crq_msg *crq,
struct crq_server_adapter *adapter)
{
unsigned char hmc_index;
hmc_index = crq->hmc_index;
if (hmc_index > ibmvmc.max_hmc_index) {
ibmvmc_reset(adapter, false);
return;
}
if (crq->status) {
dev_warn(adapter->dev, "close_resp: failed - status 0x%x\n",
crq->status);
ibmvmc_reset(adapter, false);
return;
}
ibmvmc_return_hmc(&hmcs[hmc_index], false);
}
/**
* ibmvmc_crq_process - Process CRQ
*
* @adapter: crq_server_adapter struct
* @crq: ibmvmc_crq_msg struct
*
* Process the CRQ message based upon the type of message received.
*
*/
static void ibmvmc_crq_process(struct crq_server_adapter *adapter,
struct ibmvmc_crq_msg *crq)
{
switch (crq->type) {
case VMC_MSG_CAP_RESP:
dev_dbg(adapter->dev, "CRQ recv: capabilities resp (0x%x)\n",
crq->type);
if (ibmvmc.state == ibmvmc_state_capabilities)
ibmvmc_process_capabilities(adapter, crq);
else
dev_warn(adapter->dev, "caps msg invalid in state 0x%x\n",
ibmvmc.state);
break;
case VMC_MSG_OPEN_RESP:
dev_dbg(adapter->dev, "CRQ recv: open resp (0x%x)\n",
crq->type);
if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
ibmvmc_process_open_resp(crq, adapter);
break;
case VMC_MSG_ADD_BUF:
dev_dbg(adapter->dev, "CRQ recv: add buf (0x%x)\n",
crq->type);
if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
ibmvmc_add_buffer(adapter, crq);
break;
case VMC_MSG_REM_BUF:
dev_dbg(adapter->dev, "CRQ recv: rem buf (0x%x)\n",
crq->type);
if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
ibmvmc_rem_buffer(adapter, crq);
break;
case VMC_MSG_SIGNAL:
dev_dbg(adapter->dev, "CRQ recv: signal msg (0x%x)\n",
crq->type);
if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
ibmvmc_recv_msg(adapter, crq);
break;
case VMC_MSG_CLOSE_RESP:
dev_dbg(adapter->dev, "CRQ recv: close resp (0x%x)\n",
crq->type);
if (ibmvmc_validate_hmc_session(adapter, crq) == 0)
ibmvmc_process_close_resp(crq, adapter);
break;
case VMC_MSG_CAP:
case VMC_MSG_OPEN:
case VMC_MSG_CLOSE:
case VMC_MSG_ADD_BUF_RESP:
case VMC_MSG_REM_BUF_RESP:
dev_warn(adapter->dev, "CRQ recv: unexpected msg (0x%x)\n",
crq->type);
break;
default:
dev_warn(adapter->dev, "CRQ recv: unknown msg (0x%x)\n",
crq->type);
break;
}
}
/**
* ibmvmc_handle_crq_init - Handle CRQ Init
*
* @crq: ibmvmc_crq_msg struct
* @adapter: crq_server_adapter struct
*
* Handle the type of crq initialization based on whether
* it is a message or a response.
*
*/
static void ibmvmc_handle_crq_init(struct ibmvmc_crq_msg *crq,
struct crq_server_adapter *adapter)
{
switch (crq->type) {
case 0x01: /* Initialization message */
dev_dbg(adapter->dev, "CRQ recv: CRQ init msg - state 0x%x\n",
ibmvmc.state);
if (ibmvmc.state == ibmvmc_state_crqinit) {
/* Send back a response */
if (ibmvmc_send_crq(adapter, 0xC002000000000000,
0) == 0)
ibmvmc_send_capabilities(adapter);
else
dev_err(adapter->dev, " Unable to send init rsp\n");
} else {
dev_err(adapter->dev, "Invalid state 0x%x mtu = 0x%x\n",
ibmvmc.state, ibmvmc.max_mtu);
}
break;
case 0x02: /* Initialization response */
dev_dbg(adapter->dev, "CRQ recv: initialization resp msg - state 0x%x\n",
ibmvmc.state);
if (ibmvmc.state == ibmvmc_state_crqinit)
ibmvmc_send_capabilities(adapter);
break;
default:
dev_warn(adapter->dev, "Unknown crq message type 0x%lx\n",
(unsigned long)crq->type);
}
}
/**
* ibmvmc_handle_crq - Handle CRQ
*
* @crq: ibmvmc_crq_msg struct
* @adapter: crq_server_adapter struct
*
* Read the command elements from the command queue and execute the
* requests based upon the type of crq message.
*
*/
static void ibmvmc_handle_crq(struct ibmvmc_crq_msg *crq,
struct crq_server_adapter *adapter)
{
switch (crq->valid) {
case 0xC0: /* initialization */
ibmvmc_handle_crq_init(crq, adapter);
break;
case 0xFF: /* Hypervisor telling us the connection is closed */
dev_warn(adapter->dev, "CRQ recv: virtual adapter failed - resetting.\n");
ibmvmc_reset(adapter, true);
break;
case 0x80: /* real payload */
ibmvmc_crq_process(adapter, crq);
break;
default:
dev_warn(adapter->dev, "CRQ recv: unknown msg 0x%02x.\n",
crq->valid);
break;
}
}
static void ibmvmc_task(unsigned long data)
{
struct crq_server_adapter *adapter =
(struct crq_server_adapter *)data;
struct vio_dev *vdev = to_vio_dev(adapter->dev);
struct ibmvmc_crq_msg *crq;
int done = 0;
while (!done) {
/* Pull all the valid messages off the CRQ */
while ((crq = crq_queue_next_crq(&adapter->queue)) != NULL) {
ibmvmc_handle_crq(crq, adapter);
crq->valid = 0x00;
/* CRQ reset was requested, stop processing CRQs.
* Interrupts will be re-enabled by the reset task.
*/
if (ibmvmc.state == ibmvmc_state_sched_reset)
return;
}
vio_enable_interrupts(vdev);
crq = crq_queue_next_crq(&adapter->queue);
if (crq) {
vio_disable_interrupts(vdev);
ibmvmc_handle_crq(crq, adapter);
crq->valid = 0x00;
/* CRQ reset was requested, stop processing CRQs.
* Interrupts will be re-enabled by the reset task.
*/
if (ibmvmc.state == ibmvmc_state_sched_reset)
return;
} else {
done = 1;
}
}
}
/**
* ibmvmc_init_crq_queue - Init CRQ Queue
*
* @adapter: crq_server_adapter struct
*
* Return:
* 0 - Success
* Non-zero - Failure
*/
static int ibmvmc_init_crq_queue(struct crq_server_adapter *adapter)
{
struct vio_dev *vdev = to_vio_dev(adapter->dev);
struct crq_queue *queue = &adapter->queue;
int rc = 0;
int retrc = 0;
queue->msgs = (struct ibmvmc_crq_msg *)get_zeroed_page(GFP_KERNEL);
if (!queue->msgs)
goto malloc_failed;
queue->size = PAGE_SIZE / sizeof(*queue->msgs);
queue->msg_token = dma_map_single(adapter->dev, queue->msgs,
queue->size * sizeof(*queue->msgs),
DMA_BIDIRECTIONAL);
if (dma_mapping_error(adapter->dev, queue->msg_token))
goto map_failed;
retrc = plpar_hcall_norets(H_REG_CRQ,
vdev->unit_address,
queue->msg_token, PAGE_SIZE);
rc = retrc;
if (rc == H_RESOURCE)
rc = ibmvmc_reset_crq_queue(adapter);
if (rc == 2) {
dev_warn(adapter->dev, "Partner adapter not ready\n");
retrc = 0;
} else if (rc != 0) {
dev_err(adapter->dev, "Error %d opening adapter\n", rc);
goto reg_crq_failed;
}
queue->cur = 0;
spin_lock_init(&queue->lock);
tasklet_init(&adapter->work_task, ibmvmc_task, (unsigned long)adapter);
if (request_irq(vdev->irq,
ibmvmc_handle_event,
0, "ibmvmc", (void *)adapter) != 0) {
dev_err(adapter->dev, "couldn't register irq 0x%x\n",
vdev->irq);
goto req_irq_failed;
}
rc = vio_enable_interrupts(vdev);
if (rc != 0) {
dev_err(adapter->dev, "Error %d enabling interrupts!!!\n", rc);
goto req_irq_failed;
}
return retrc;
req_irq_failed:
/* Cannot have any work since we either never got our IRQ registered,
* or never got interrupts enabled
*/
tasklet_kill(&adapter->work_task);
h_free_crq(vdev->unit_address);
reg_crq_failed:
dma_unmap_single(adapter->dev,
queue->msg_token,
queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
map_failed:
free_page((unsigned long)queue->msgs);
malloc_failed:
return -ENOMEM;
}
/* Fill in the liobn and riobn fields on the adapter */
static int read_dma_window(struct vio_dev *vdev,
struct crq_server_adapter *adapter)
{
const __be32 *dma_window;
const __be32 *prop;
/* TODO Using of_parse_dma_window would be better, but it doesn't give
* a way to read multiple windows without already knowing the size of
* a window or the number of windows
*/
dma_window =
(const __be32 *)vio_get_attribute(vdev, "ibm,my-dma-window",
NULL);
if (!dma_window) {
dev_warn(adapter->dev, "Couldn't find ibm,my-dma-window property\n");
return -1;
}
adapter->liobn = be32_to_cpu(*dma_window);
dma_window++;
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
NULL);
if (!prop) {
dev_warn(adapter->dev, "Couldn't find ibm,#dma-address-cells property\n");
dma_window++;
} else {
dma_window += be32_to_cpu(*prop);
}
prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
NULL);
if (!prop) {
dev_warn(adapter->dev, "Couldn't find ibm,#dma-size-cells property\n");
dma_window++;
} else {
dma_window += be32_to_cpu(*prop);
}
/* dma_window should point to the second window now */
adapter->riobn = be32_to_cpu(*dma_window);
return 0;
}
static int ibmvmc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
{
struct crq_server_adapter *adapter = &ibmvmc_adapter;
int rc;
dev_set_drvdata(&vdev->dev, NULL);
memset(adapter, 0, sizeof(*adapter));
adapter->dev = &vdev->dev;
dev_info(adapter->dev, "Probe for UA 0x%x\n", vdev->unit_address);
rc = read_dma_window(vdev, adapter);
if (rc != 0) {
ibmvmc.state = ibmvmc_state_failed;
return -1;
}
dev_dbg(adapter->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
adapter->liobn, adapter->riobn);
init_waitqueue_head(&adapter->reset_wait_queue);
adapter->reset_task = kthread_run(ibmvmc_reset_task, adapter, "ibmvmc");
if (IS_ERR(adapter->reset_task)) {
dev_err(adapter->dev, "Failed to start reset thread\n");
ibmvmc.state = ibmvmc_state_failed;
rc = PTR_ERR(adapter->reset_task);
adapter->reset_task = NULL;
return rc;
}
rc = ibmvmc_init_crq_queue(adapter);
if (rc != 0 && rc != H_RESOURCE) {
dev_err(adapter->dev, "Error initializing CRQ. rc = 0x%x\n",
rc);
ibmvmc.state = ibmvmc_state_failed;
goto crq_failed;
}
ibmvmc.state = ibmvmc_state_crqinit;
/* Try to send an initialization message. Note that this is allowed
* to fail if the other end is not acive. In that case we just wait
* for the other side to initialize.
*/
if (ibmvmc_send_crq(adapter, 0xC001000000000000LL, 0) != 0 &&
rc != H_RESOURCE)
dev_warn(adapter->dev, "Failed to send initialize CRQ message\n");
dev_set_drvdata(&vdev->dev, adapter);
return 0;
crq_failed:
kthread_stop(adapter->reset_task);
adapter->reset_task = NULL;
return -EPERM;
}
static void ibmvmc_remove(struct vio_dev *vdev)
{
struct crq_server_adapter *adapter = dev_get_drvdata(&vdev->dev);
dev_info(adapter->dev, "Entering remove for UA 0x%x\n",
vdev->unit_address);
ibmvmc_release_crq_queue(adapter);
}
static struct vio_device_id ibmvmc_device_table[] = {
{ "ibm,vmc", "IBM,vmc" },
{ "", "" }
};
MODULE_DEVICE_TABLE(vio, ibmvmc_device_table);
static struct vio_driver ibmvmc_driver = {
.name = ibmvmc_driver_name,
.id_table = ibmvmc_device_table,
.probe = ibmvmc_probe,
.remove = ibmvmc_remove,
};
static void __init ibmvmc_scrub_module_parms(void)
{
if (ibmvmc_max_mtu > MAX_MTU) {
pr_warn("ibmvmc: Max MTU reduced to %d\n", MAX_MTU);
ibmvmc_max_mtu = MAX_MTU;
} else if (ibmvmc_max_mtu < MIN_MTU) {
pr_warn("ibmvmc: Max MTU increased to %d\n", MIN_MTU);
ibmvmc_max_mtu = MIN_MTU;
}
if (ibmvmc_max_buf_pool_size > MAX_BUF_POOL_SIZE) {
pr_warn("ibmvmc: Max buffer pool size reduced to %d\n",
MAX_BUF_POOL_SIZE);
ibmvmc_max_buf_pool_size = MAX_BUF_POOL_SIZE;
} else if (ibmvmc_max_buf_pool_size < MIN_BUF_POOL_SIZE) {
pr_warn("ibmvmc: Max buffer pool size increased to %d\n",
MIN_BUF_POOL_SIZE);
ibmvmc_max_buf_pool_size = MIN_BUF_POOL_SIZE;
}
if (ibmvmc_max_hmcs > MAX_HMCS) {
pr_warn("ibmvmc: Max HMCs reduced to %d\n", MAX_HMCS);
ibmvmc_max_hmcs = MAX_HMCS;
} else if (ibmvmc_max_hmcs < MIN_HMCS) {
pr_warn("ibmvmc: Max HMCs increased to %d\n", MIN_HMCS);
ibmvmc_max_hmcs = MIN_HMCS;
}
}
static struct miscdevice ibmvmc_miscdev = {
.name = ibmvmc_driver_name,
.minor = MISC_DYNAMIC_MINOR,
.fops = &ibmvmc_fops,
};
static int __init ibmvmc_module_init(void)
{
int rc, i, j;
ibmvmc.state = ibmvmc_state_initial;
pr_info("ibmvmc: version %s\n", IBMVMC_DRIVER_VERSION);
rc = misc_register(&ibmvmc_miscdev);
if (rc) {
pr_err("ibmvmc: misc registration failed\n");
goto misc_register_failed;
}
pr_info("ibmvmc: node %d:%d\n", MISC_MAJOR,
ibmvmc_miscdev.minor);
/* Initialize data structures */
memset(hmcs, 0, sizeof(struct ibmvmc_hmc) * MAX_HMCS);
for (i = 0; i < MAX_HMCS; i++) {
spin_lock_init(&hmcs[i].lock);
hmcs[i].state = ibmhmc_state_free;
for (j = 0; j < MAX_BUF_POOL_SIZE; j++)
hmcs[i].queue_outbound_msgs[j] = VMC_INVALID_BUFFER_ID;
}
/* Sanity check module parms */
ibmvmc_scrub_module_parms();
/*
* Initialize some reasonable values. Might be negotiated smaller
* values during the capabilities exchange.
*/
ibmvmc.max_mtu = ibmvmc_max_mtu;
ibmvmc.max_buffer_pool_size = ibmvmc_max_buf_pool_size;
ibmvmc.max_hmc_index = ibmvmc_max_hmcs - 1;
rc = vio_register_driver(&ibmvmc_driver);
if (rc) {
pr_err("ibmvmc: rc %d from vio_register_driver\n", rc);
goto vio_reg_failed;
}
return 0;
vio_reg_failed:
misc_deregister(&ibmvmc_miscdev);
misc_register_failed:
return rc;
}
static void __exit ibmvmc_module_exit(void)
{
pr_info("ibmvmc: module exit\n");
vio_unregister_driver(&ibmvmc_driver);
misc_deregister(&ibmvmc_miscdev);
}
module_init(ibmvmc_module_init);
module_exit(ibmvmc_module_exit);
module_param_named(buf_pool_size, ibmvmc_max_buf_pool_size,
int, 0644);
MODULE_PARM_DESC(buf_pool_size, "Buffer pool size");
module_param_named(max_hmcs, ibmvmc_max_hmcs, int, 0644);
MODULE_PARM_DESC(max_hmcs, "Max HMCs");
module_param_named(max_mtu, ibmvmc_max_mtu, int, 0644);
MODULE_PARM_DESC(max_mtu, "Max MTU");
MODULE_AUTHOR("Steven Royer <[email protected]>");
MODULE_DESCRIPTION("IBM VMC");
MODULE_VERSION(IBMVMC_DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/ibmvmc.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Host side test driver to test endpoint functionality
*
* Copyright (C) 2017 Texas Instruments
* Author: Kishon Vijay Abraham I <[email protected]>
*/
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
#include <linux/pci_regs.h>
#include <uapi/linux/pcitest.h>
#define DRV_MODULE_NAME "pci-endpoint-test"
#define IRQ_TYPE_UNDEFINED -1
#define IRQ_TYPE_LEGACY 0
#define IRQ_TYPE_MSI 1
#define IRQ_TYPE_MSIX 2
#define PCI_ENDPOINT_TEST_MAGIC 0x0
#define PCI_ENDPOINT_TEST_COMMAND 0x4
#define COMMAND_RAISE_LEGACY_IRQ BIT(0)
#define COMMAND_RAISE_MSI_IRQ BIT(1)
#define COMMAND_RAISE_MSIX_IRQ BIT(2)
#define COMMAND_READ BIT(3)
#define COMMAND_WRITE BIT(4)
#define COMMAND_COPY BIT(5)
#define PCI_ENDPOINT_TEST_STATUS 0x8
#define STATUS_READ_SUCCESS BIT(0)
#define STATUS_READ_FAIL BIT(1)
#define STATUS_WRITE_SUCCESS BIT(2)
#define STATUS_WRITE_FAIL BIT(3)
#define STATUS_COPY_SUCCESS BIT(4)
#define STATUS_COPY_FAIL BIT(5)
#define STATUS_IRQ_RAISED BIT(6)
#define STATUS_SRC_ADDR_INVALID BIT(7)
#define STATUS_DST_ADDR_INVALID BIT(8)
#define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
#define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
#define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
#define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
#define PCI_ENDPOINT_TEST_SIZE 0x1c
#define PCI_ENDPOINT_TEST_CHECKSUM 0x20
#define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
#define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
#define PCI_ENDPOINT_TEST_FLAGS 0x2c
#define FLAG_USE_DMA BIT(0)
#define PCI_DEVICE_ID_TI_AM654 0xb00c
#define PCI_DEVICE_ID_TI_J7200 0xb00f
#define PCI_DEVICE_ID_TI_AM64 0xb010
#define PCI_DEVICE_ID_LS1088A 0x80c0
#define PCI_DEVICE_ID_IMX8 0x0808
#define is_am654_pci_dev(pdev) \
((pdev)->device == PCI_DEVICE_ID_TI_AM654)
#define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
#define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
#define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
#define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
static DEFINE_IDA(pci_endpoint_test_ida);
#define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
miscdev)
static bool no_msi;
module_param(no_msi, bool, 0444);
MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
static int irq_type = IRQ_TYPE_MSI;
module_param(irq_type, int, 0444);
MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
enum pci_barno {
BAR_0,
BAR_1,
BAR_2,
BAR_3,
BAR_4,
BAR_5,
};
struct pci_endpoint_test {
struct pci_dev *pdev;
void __iomem *base;
void __iomem *bar[PCI_STD_NUM_BARS];
struct completion irq_raised;
int last_irq;
int num_irqs;
int irq_type;
/* mutex to protect the ioctls */
struct mutex mutex;
struct miscdevice miscdev;
enum pci_barno test_reg_bar;
size_t alignment;
const char *name;
};
struct pci_endpoint_test_data {
enum pci_barno test_reg_bar;
size_t alignment;
int irq_type;
};
static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
u32 offset)
{
return readl(test->base + offset);
}
static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
u32 offset, u32 value)
{
writel(value, test->base + offset);
}
static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
int bar, int offset)
{
return readl(test->bar[bar] + offset);
}
static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
int bar, u32 offset, u32 value)
{
writel(value, test->bar[bar] + offset);
}
static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
{
struct pci_endpoint_test *test = dev_id;
u32 reg;
reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
if (reg & STATUS_IRQ_RAISED) {
test->last_irq = irq;
complete(&test->irq_raised);
}
return IRQ_HANDLED;
}
static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
{
struct pci_dev *pdev = test->pdev;
pci_free_irq_vectors(pdev);
test->irq_type = IRQ_TYPE_UNDEFINED;
}
static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
int type)
{
int irq = -1;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
bool res = true;
switch (type) {
case IRQ_TYPE_LEGACY:
irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
if (irq < 0)
dev_err(dev, "Failed to get Legacy interrupt\n");
break;
case IRQ_TYPE_MSI:
irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
if (irq < 0)
dev_err(dev, "Failed to get MSI interrupts\n");
break;
case IRQ_TYPE_MSIX:
irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
if (irq < 0)
dev_err(dev, "Failed to get MSI-X interrupts\n");
break;
default:
dev_err(dev, "Invalid IRQ type selected\n");
}
if (irq < 0) {
irq = 0;
res = false;
}
test->irq_type = type;
test->num_irqs = irq;
return res;
}
static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
{
int i;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
for (i = 0; i < test->num_irqs; i++)
devm_free_irq(dev, pci_irq_vector(pdev, i), test);
test->num_irqs = 0;
}
static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
{
int i;
int err;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
for (i = 0; i < test->num_irqs; i++) {
err = devm_request_irq(dev, pci_irq_vector(pdev, i),
pci_endpoint_test_irqhandler,
IRQF_SHARED, test->name, test);
if (err)
goto fail;
}
return true;
fail:
switch (irq_type) {
case IRQ_TYPE_LEGACY:
dev_err(dev, "Failed to request IRQ %d for Legacy\n",
pci_irq_vector(pdev, i));
break;
case IRQ_TYPE_MSI:
dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
pci_irq_vector(pdev, i),
i + 1);
break;
case IRQ_TYPE_MSIX:
dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
pci_irq_vector(pdev, i),
i + 1);
break;
}
return false;
}
static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
enum pci_barno barno)
{
int j;
u32 val;
int size;
struct pci_dev *pdev = test->pdev;
if (!test->bar[barno])
return false;
size = pci_resource_len(pdev, barno);
if (barno == test->test_reg_bar)
size = 0x4;
for (j = 0; j < size; j += 4)
pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
for (j = 0; j < size; j += 4) {
val = pci_endpoint_test_bar_readl(test, barno, j);
if (val != 0xA0A0A0A0)
return false;
}
return true;
}
static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
{
u32 val;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
IRQ_TYPE_LEGACY);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
COMMAND_RAISE_LEGACY_IRQ);
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
return false;
return true;
}
static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
u16 msi_num, bool msix)
{
u32 val;
struct pci_dev *pdev = test->pdev;
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
msix ? COMMAND_RAISE_MSIX_IRQ :
COMMAND_RAISE_MSI_IRQ);
val = wait_for_completion_timeout(&test->irq_raised,
msecs_to_jiffies(1000));
if (!val)
return false;
return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
}
static int pci_endpoint_test_validate_xfer_params(struct device *dev,
struct pci_endpoint_test_xfer_param *param, size_t alignment)
{
if (!param->size) {
dev_dbg(dev, "Data size is zero\n");
return -EINVAL;
}
if (param->size > SIZE_MAX - alignment) {
dev_dbg(dev, "Maximum transfer data size exceeded\n");
return -EINVAL;
}
return 0;
}
static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
unsigned long arg)
{
struct pci_endpoint_test_xfer_param param;
bool ret = false;
void *src_addr;
void *dst_addr;
u32 flags = 0;
bool use_dma;
size_t size;
dma_addr_t src_phys_addr;
dma_addr_t dst_phys_addr;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
void *orig_src_addr;
dma_addr_t orig_src_phys_addr;
void *orig_dst_addr;
dma_addr_t orig_dst_phys_addr;
size_t offset;
size_t alignment = test->alignment;
int irq_type = test->irq_type;
u32 src_crc32;
u32 dst_crc32;
int err;
err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
if (err) {
dev_err(dev, "Failed to get transfer param\n");
return false;
}
err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
if (err)
return false;
size = param.size;
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
if (use_dma)
flags |= FLAG_USE_DMA;
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_src_addr) {
dev_err(dev, "Failed to allocate source buffer\n");
ret = false;
goto err;
}
get_random_bytes(orig_src_addr, size + alignment);
orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
size + alignment, DMA_TO_DEVICE);
if (dma_mapping_error(dev, orig_src_phys_addr)) {
dev_err(dev, "failed to map source buffer address\n");
ret = false;
goto err_src_phys_addr;
}
if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
offset = src_phys_addr - orig_src_phys_addr;
src_addr = orig_src_addr + offset;
} else {
src_phys_addr = orig_src_phys_addr;
src_addr = orig_src_addr;
}
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
lower_32_bits(src_phys_addr));
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
upper_32_bits(src_phys_addr));
src_crc32 = crc32_le(~0, src_addr, size);
orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_dst_addr) {
dev_err(dev, "Failed to allocate destination address\n");
ret = false;
goto err_dst_addr;
}
orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
size + alignment, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, orig_dst_phys_addr)) {
dev_err(dev, "failed to map destination buffer address\n");
ret = false;
goto err_dst_phys_addr;
}
if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
offset = dst_phys_addr - orig_dst_phys_addr;
dst_addr = orig_dst_addr + offset;
} else {
dst_phys_addr = orig_dst_phys_addr;
dst_addr = orig_dst_addr;
}
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
lower_32_bits(dst_phys_addr));
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
upper_32_bits(dst_phys_addr));
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
size);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
COMMAND_COPY);
wait_for_completion(&test->irq_raised);
dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
DMA_FROM_DEVICE);
dst_crc32 = crc32_le(~0, dst_addr, size);
if (dst_crc32 == src_crc32)
ret = true;
err_dst_phys_addr:
kfree(orig_dst_addr);
err_dst_addr:
dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
DMA_TO_DEVICE);
err_src_phys_addr:
kfree(orig_src_addr);
err:
return ret;
}
static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
unsigned long arg)
{
struct pci_endpoint_test_xfer_param param;
bool ret = false;
u32 flags = 0;
bool use_dma;
u32 reg;
void *addr;
dma_addr_t phys_addr;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
void *orig_addr;
dma_addr_t orig_phys_addr;
size_t offset;
size_t alignment = test->alignment;
int irq_type = test->irq_type;
size_t size;
u32 crc32;
int err;
err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
if (err != 0) {
dev_err(dev, "Failed to get transfer param\n");
return false;
}
err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
if (err)
return false;
size = param.size;
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
if (use_dma)
flags |= FLAG_USE_DMA;
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
orig_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_addr) {
dev_err(dev, "Failed to allocate address\n");
ret = false;
goto err;
}
get_random_bytes(orig_addr, size + alignment);
orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, orig_phys_addr)) {
dev_err(dev, "failed to map source buffer address\n");
ret = false;
goto err_phys_addr;
}
if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
offset = phys_addr - orig_phys_addr;
addr = orig_addr + offset;
} else {
phys_addr = orig_phys_addr;
addr = orig_addr;
}
crc32 = crc32_le(~0, addr, size);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
crc32);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
lower_32_bits(phys_addr));
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
upper_32_bits(phys_addr));
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
COMMAND_READ);
wait_for_completion(&test->irq_raised);
reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
if (reg & STATUS_READ_SUCCESS)
ret = true;
dma_unmap_single(dev, orig_phys_addr, size + alignment,
DMA_TO_DEVICE);
err_phys_addr:
kfree(orig_addr);
err:
return ret;
}
static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
unsigned long arg)
{
struct pci_endpoint_test_xfer_param param;
bool ret = false;
u32 flags = 0;
bool use_dma;
size_t size;
void *addr;
dma_addr_t phys_addr;
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
void *orig_addr;
dma_addr_t orig_phys_addr;
size_t offset;
size_t alignment = test->alignment;
int irq_type = test->irq_type;
u32 crc32;
int err;
err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
if (err) {
dev_err(dev, "Failed to get transfer param\n");
return false;
}
err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
if (err)
return false;
size = param.size;
use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
if (use_dma)
flags |= FLAG_USE_DMA;
if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
goto err;
}
orig_addr = kzalloc(size + alignment, GFP_KERNEL);
if (!orig_addr) {
dev_err(dev, "Failed to allocate destination address\n");
ret = false;
goto err;
}
orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, orig_phys_addr)) {
dev_err(dev, "failed to map source buffer address\n");
ret = false;
goto err_phys_addr;
}
if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
offset = phys_addr - orig_phys_addr;
addr = orig_addr + offset;
} else {
phys_addr = orig_phys_addr;
addr = orig_addr;
}
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
lower_32_bits(phys_addr));
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
upper_32_bits(phys_addr));
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
COMMAND_WRITE);
wait_for_completion(&test->irq_raised);
dma_unmap_single(dev, orig_phys_addr, size + alignment,
DMA_FROM_DEVICE);
crc32 = crc32_le(~0, addr, size);
if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
ret = true;
err_phys_addr:
kfree(orig_addr);
err:
return ret;
}
static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
{
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
return true;
}
static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
int req_irq_type)
{
struct pci_dev *pdev = test->pdev;
struct device *dev = &pdev->dev;
if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
dev_err(dev, "Invalid IRQ type option\n");
return false;
}
if (test->irq_type == req_irq_type)
return true;
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
goto err;
if (!pci_endpoint_test_request_irq(test))
goto err;
return true;
err:
pci_endpoint_test_free_irq_vectors(test);
return false;
}
static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
int ret = -EINVAL;
enum pci_barno bar;
struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
struct pci_dev *pdev = test->pdev;
mutex_lock(&test->mutex);
reinit_completion(&test->irq_raised);
test->last_irq = -ENODATA;
switch (cmd) {
case PCITEST_BAR:
bar = arg;
if (bar > BAR_5)
goto ret;
if (is_am654_pci_dev(pdev) && bar == BAR_0)
goto ret;
ret = pci_endpoint_test_bar(test, bar);
break;
case PCITEST_LEGACY_IRQ:
ret = pci_endpoint_test_legacy_irq(test);
break;
case PCITEST_MSI:
case PCITEST_MSIX:
ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
break;
case PCITEST_WRITE:
ret = pci_endpoint_test_write(test, arg);
break;
case PCITEST_READ:
ret = pci_endpoint_test_read(test, arg);
break;
case PCITEST_COPY:
ret = pci_endpoint_test_copy(test, arg);
break;
case PCITEST_SET_IRQTYPE:
ret = pci_endpoint_test_set_irq(test, arg);
break;
case PCITEST_GET_IRQTYPE:
ret = irq_type;
break;
case PCITEST_CLEAR_IRQ:
ret = pci_endpoint_test_clear_irq(test);
break;
}
ret:
mutex_unlock(&test->mutex);
return ret;
}
static const struct file_operations pci_endpoint_test_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = pci_endpoint_test_ioctl,
};
static int pci_endpoint_test_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int err;
int id;
char name[24];
enum pci_barno bar;
void __iomem *base;
struct device *dev = &pdev->dev;
struct pci_endpoint_test *test;
struct pci_endpoint_test_data *data;
enum pci_barno test_reg_bar = BAR_0;
struct miscdevice *misc_device;
if (pci_is_bridge(pdev))
return -ENODEV;
test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
if (!test)
return -ENOMEM;
test->test_reg_bar = 0;
test->alignment = 0;
test->pdev = pdev;
test->irq_type = IRQ_TYPE_UNDEFINED;
if (no_msi)
irq_type = IRQ_TYPE_LEGACY;
data = (struct pci_endpoint_test_data *)ent->driver_data;
if (data) {
test_reg_bar = data->test_reg_bar;
test->test_reg_bar = test_reg_bar;
test->alignment = data->alignment;
irq_type = data->irq_type;
}
init_completion(&test->irq_raised);
mutex_init(&test->mutex);
if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
dev_err(dev, "Cannot set DMA mask\n");
return -EINVAL;
}
err = pci_enable_device(pdev);
if (err) {
dev_err(dev, "Cannot enable PCI device\n");
return err;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
dev_err(dev, "Cannot obtain PCI resources\n");
goto err_disable_pdev;
}
pci_set_master(pdev);
if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
err = -EINVAL;
goto err_disable_irq;
}
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
base = pci_ioremap_bar(pdev, bar);
if (!base) {
dev_err(dev, "Failed to read BAR%d\n", bar);
WARN_ON(bar == test_reg_bar);
}
test->bar[bar] = base;
}
}
test->base = test->bar[test_reg_bar];
if (!test->base) {
err = -ENOMEM;
dev_err(dev, "Cannot perform PCI test without BAR%d\n",
test_reg_bar);
goto err_iounmap;
}
pci_set_drvdata(pdev, test);
id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
err = id;
dev_err(dev, "Unable to get id\n");
goto err_iounmap;
}
snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
test->name = kstrdup(name, GFP_KERNEL);
if (!test->name) {
err = -ENOMEM;
goto err_ida_remove;
}
if (!pci_endpoint_test_request_irq(test)) {
err = -EINVAL;
goto err_kfree_test_name;
}
misc_device = &test->miscdev;
misc_device->minor = MISC_DYNAMIC_MINOR;
misc_device->name = kstrdup(name, GFP_KERNEL);
if (!misc_device->name) {
err = -ENOMEM;
goto err_release_irq;
}
misc_device->parent = &pdev->dev;
misc_device->fops = &pci_endpoint_test_fops;
err = misc_register(misc_device);
if (err) {
dev_err(dev, "Failed to register device\n");
goto err_kfree_name;
}
return 0;
err_kfree_name:
kfree(misc_device->name);
err_release_irq:
pci_endpoint_test_release_irq(test);
err_kfree_test_name:
kfree(test->name);
err_ida_remove:
ida_simple_remove(&pci_endpoint_test_ida, id);
err_iounmap:
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
err_disable_irq:
pci_endpoint_test_free_irq_vectors(test);
pci_release_regions(pdev);
err_disable_pdev:
pci_disable_device(pdev);
return err;
}
static void pci_endpoint_test_remove(struct pci_dev *pdev)
{
int id;
enum pci_barno bar;
struct pci_endpoint_test *test = pci_get_drvdata(pdev);
struct miscdevice *misc_device = &test->miscdev;
if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
return;
if (id < 0)
return;
pci_endpoint_test_release_irq(test);
pci_endpoint_test_free_irq_vectors(test);
misc_deregister(&test->miscdev);
kfree(misc_device->name);
kfree(test->name);
ida_simple_remove(&pci_endpoint_test_ida, id);
for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
if (test->bar[bar])
pci_iounmap(pdev, test->bar[bar]);
}
pci_release_regions(pdev);
pci_disable_device(pdev);
}
static const struct pci_endpoint_test_data default_data = {
.test_reg_bar = BAR_0,
.alignment = SZ_4K,
.irq_type = IRQ_TYPE_MSI,
};
static const struct pci_endpoint_test_data am654_data = {
.test_reg_bar = BAR_2,
.alignment = SZ_64K,
.irq_type = IRQ_TYPE_MSI,
};
static const struct pci_endpoint_test_data j721e_data = {
.alignment = 256,
.irq_type = IRQ_TYPE_MSI,
};
static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
.driver_data = (kernel_ulong_t)&default_data,
},
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
.driver_data = (kernel_ulong_t)&default_data,
},
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
.driver_data = (kernel_ulong_t)&default_data,
},
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
.driver_data = (kernel_ulong_t)&default_data,
},
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
.driver_data = (kernel_ulong_t)&am654_data
},
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
.driver_data = (kernel_ulong_t)&j721e_data,
},
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
.driver_data = (kernel_ulong_t)&j721e_data,
},
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
.driver_data = (kernel_ulong_t)&j721e_data,
},
{ }
};
MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
static struct pci_driver pci_endpoint_test_driver = {
.name = DRV_MODULE_NAME,
.id_table = pci_endpoint_test_tbl,
.probe = pci_endpoint_test_probe,
.remove = pci_endpoint_test_remove,
.sriov_configure = pci_sriov_configure_simple,
};
module_pci_driver(pci_endpoint_test_driver);
MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/pci_endpoint_test.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Driver for the CS5535/CS5536 Multi-Function General Purpose Timers (MFGPT)
*
* Copyright (C) 2006, Advanced Micro Devices, Inc.
* Copyright (C) 2007 Andres Salomon <[email protected]>
* Copyright (C) 2009 Andres Salomon <[email protected]>
*
* The MFGPTs are documented in AMD Geode CS5536 Companion Device Data Book.
*/
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/cs5535.h>
#include <linux/slab.h>
#define DRV_NAME "cs5535-mfgpt"
static int mfgpt_reset_timers;
module_param_named(mfgptfix, mfgpt_reset_timers, int, 0644);
MODULE_PARM_DESC(mfgptfix, "Try to reset the MFGPT timers during init; "
"required by some broken BIOSes (ie, TinyBIOS < 0.99) or kexec "
"(1 = reset the MFGPT using an undocumented bit, "
"2 = perform a soft reset by unconfiguring all timers); "
"use what works best for you.");
struct cs5535_mfgpt_timer {
struct cs5535_mfgpt_chip *chip;
int nr;
};
static struct cs5535_mfgpt_chip {
DECLARE_BITMAP(avail, MFGPT_MAX_TIMERS);
resource_size_t base;
struct platform_device *pdev;
spinlock_t lock;
int initialized;
} cs5535_mfgpt_chip;
int cs5535_mfgpt_toggle_event(struct cs5535_mfgpt_timer *timer, int cmp,
int event, int enable)
{
uint32_t msr, mask, value, dummy;
int shift = (cmp == MFGPT_CMP1) ? 0 : 8;
if (!timer) {
WARN_ON(1);
return -EIO;
}
/*
* The register maps for these are described in sections 6.17.1.x of
* the AMD Geode CS5536 Companion Device Data Book.
*/
switch (event) {
case MFGPT_EVENT_RESET:
/*
* XXX: According to the docs, we cannot reset timers above
* 6; that is, resets for 7 and 8 will be ignored. Is this
* a problem? -dilinger
*/
msr = MSR_MFGPT_NR;
mask = 1 << (timer->nr + 24);
break;
case MFGPT_EVENT_NMI:
msr = MSR_MFGPT_NR;
mask = 1 << (timer->nr + shift);
break;
case MFGPT_EVENT_IRQ:
msr = MSR_MFGPT_IRQ;
mask = 1 << (timer->nr + shift);
break;
default:
return -EIO;
}
rdmsr(msr, value, dummy);
if (enable)
value |= mask;
else
value &= ~mask;
wrmsr(msr, value, dummy);
return 0;
}
EXPORT_SYMBOL_GPL(cs5535_mfgpt_toggle_event);
int cs5535_mfgpt_set_irq(struct cs5535_mfgpt_timer *timer, int cmp, int *irq,
int enable)
{
uint32_t zsel, lpc, dummy;
int shift;
if (!timer) {
WARN_ON(1);
return -EIO;
}
/*
* Unfortunately, MFGPTs come in pairs sharing their IRQ lines. If VSA
* is using the same CMP of the timer's Siamese twin, the IRQ is set to
* 2, and we mustn't use nor change it.
* XXX: Likewise, 2 Linux drivers might clash if the 2nd overwrites the
* IRQ of the 1st. This can only happen if forcing an IRQ, calling this
* with *irq==0 is safe. Currently there _are_ no 2 drivers.
*/
rdmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
shift = ((cmp == MFGPT_CMP1 ? 0 : 4) + timer->nr % 4) * 4;
if (((zsel >> shift) & 0xF) == 2)
return -EIO;
/* Choose IRQ: if none supplied, keep IRQ already set or use default */
if (!*irq)
*irq = (zsel >> shift) & 0xF;
if (!*irq)
*irq = CONFIG_CS5535_MFGPT_DEFAULT_IRQ;
/* Can't use IRQ if it's 0 (=disabled), 2, or routed to LPC */
if (*irq < 1 || *irq == 2 || *irq > 15)
return -EIO;
rdmsr(MSR_PIC_IRQM_LPC, lpc, dummy);
if (lpc & (1 << *irq))
return -EIO;
/* All chosen and checked - go for it */
if (cs5535_mfgpt_toggle_event(timer, cmp, MFGPT_EVENT_IRQ, enable))
return -EIO;
if (enable) {
zsel = (zsel & ~(0xF << shift)) | (*irq << shift);
wrmsr(MSR_PIC_ZSEL_LOW, zsel, dummy);
}
return 0;
}
EXPORT_SYMBOL_GPL(cs5535_mfgpt_set_irq);
struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain)
{
struct cs5535_mfgpt_chip *mfgpt = &cs5535_mfgpt_chip;
struct cs5535_mfgpt_timer *timer = NULL;
unsigned long flags;
int max;
if (!mfgpt->initialized)
goto done;
/* only allocate timers from the working domain if requested */
if (domain == MFGPT_DOMAIN_WORKING)
max = 6;
else
max = MFGPT_MAX_TIMERS;
if (timer_nr >= max) {
/* programmer error. silly programmers! */
WARN_ON(1);
goto done;
}
spin_lock_irqsave(&mfgpt->lock, flags);
if (timer_nr < 0) {
unsigned long t;
/* try to find any available timer */
t = find_first_bit(mfgpt->avail, max);
/* set timer_nr to -1 if no timers available */
timer_nr = t < max ? (int) t : -1;
} else {
/* check if the requested timer's available */
if (!test_bit(timer_nr, mfgpt->avail))
timer_nr = -1;
}
if (timer_nr >= 0)
/* if timer_nr is not -1, it's an available timer */
__clear_bit(timer_nr, mfgpt->avail);
spin_unlock_irqrestore(&mfgpt->lock, flags);
if (timer_nr < 0)
goto done;
timer = kmalloc(sizeof(*timer), GFP_KERNEL);
if (!timer) {
/* aw hell */
spin_lock_irqsave(&mfgpt->lock, flags);
__set_bit(timer_nr, mfgpt->avail);
spin_unlock_irqrestore(&mfgpt->lock, flags);
goto done;
}
timer->chip = mfgpt;
timer->nr = timer_nr;
dev_info(&mfgpt->pdev->dev, "registered timer %d\n", timer_nr);
done:
return timer;
}
EXPORT_SYMBOL_GPL(cs5535_mfgpt_alloc_timer);
/*
* XXX: This frees the timer memory, but never resets the actual hardware
* timer. The old geode_mfgpt code did this; it would be good to figure
* out a way to actually release the hardware timer. See comments below.
*/
void cs5535_mfgpt_free_timer(struct cs5535_mfgpt_timer *timer)
{
unsigned long flags;
uint16_t val;
/* timer can be made available again only if never set up */
val = cs5535_mfgpt_read(timer, MFGPT_REG_SETUP);
if (!(val & MFGPT_SETUP_SETUP)) {
spin_lock_irqsave(&timer->chip->lock, flags);
__set_bit(timer->nr, timer->chip->avail);
spin_unlock_irqrestore(&timer->chip->lock, flags);
}
kfree(timer);
}
EXPORT_SYMBOL_GPL(cs5535_mfgpt_free_timer);
uint16_t cs5535_mfgpt_read(struct cs5535_mfgpt_timer *timer, uint16_t reg)
{
return inw(timer->chip->base + reg + (timer->nr * 8));
}
EXPORT_SYMBOL_GPL(cs5535_mfgpt_read);
void cs5535_mfgpt_write(struct cs5535_mfgpt_timer *timer, uint16_t reg,
uint16_t value)
{
outw(value, timer->chip->base + reg + (timer->nr * 8));
}
EXPORT_SYMBOL_GPL(cs5535_mfgpt_write);
/*
* This is a sledgehammer that resets all MFGPT timers. This is required by
* some broken BIOSes which leave the system in an unstable state
* (TinyBIOS 0.98, for example; fixed in 0.99). It's uncertain as to
* whether or not this secret MSR can be used to release individual timers.
* Jordan tells me that he and Mitch once played w/ it, but it's unclear
* what the results of that were (and they experienced some instability).
*/
static void reset_all_timers(void)
{
uint32_t val, dummy;
/* The following undocumented bit resets the MFGPT timers */
val = 0xFF; dummy = 0;
wrmsr(MSR_MFGPT_SETUP, val, dummy);
}
/*
* This is another sledgehammer to reset all MFGPT timers.
* Instead of using the undocumented bit method it clears
* IRQ, NMI and RESET settings.
*/
static void soft_reset(void)
{
int i;
struct cs5535_mfgpt_timer t;
for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
t.nr = i;
cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_RESET, 0);
cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_RESET, 0);
cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_NMI, 0);
cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_NMI, 0);
cs5535_mfgpt_toggle_event(&t, MFGPT_CMP1, MFGPT_EVENT_IRQ, 0);
cs5535_mfgpt_toggle_event(&t, MFGPT_CMP2, MFGPT_EVENT_IRQ, 0);
}
}
/*
* Check whether any MFGPTs are available for the kernel to use. In most
* cases, firmware that uses AMD's VSA code will claim all timers during
* bootup; we certainly don't want to take them if they're already in use.
* In other cases (such as with VSAless OpenFirmware), the system firmware
* leaves timers available for us to use.
*/
static int scan_timers(struct cs5535_mfgpt_chip *mfgpt)
{
struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
unsigned long flags;
int timers = 0;
uint16_t val;
int i;
/* bios workaround */
if (mfgpt_reset_timers == 1)
reset_all_timers();
else if (mfgpt_reset_timers == 2)
soft_reset();
/* just to be safe, protect this section w/ lock */
spin_lock_irqsave(&mfgpt->lock, flags);
for (i = 0; i < MFGPT_MAX_TIMERS; i++) {
timer.nr = i;
val = cs5535_mfgpt_read(&timer, MFGPT_REG_SETUP);
if (!(val & MFGPT_SETUP_SETUP) || mfgpt_reset_timers == 2) {
__set_bit(i, mfgpt->avail);
timers++;
}
}
spin_unlock_irqrestore(&mfgpt->lock, flags);
return timers;
}
static int cs5535_mfgpt_probe(struct platform_device *pdev)
{
struct resource *res;
int err = -EIO, t;
if (mfgpt_reset_timers < 0 || mfgpt_reset_timers > 2) {
dev_err(&pdev->dev, "Bad mfgpt_reset_timers value: %i\n",
mfgpt_reset_timers);
goto done;
}
/* There are two ways to get the MFGPT base address; one is by
* fetching it from MSR_LBAR_MFGPT, the other is by reading the
* PCI BAR info. The latter method is easier (especially across
* different architectures), so we'll stick with that for now. If
* it turns out to be unreliable in the face of crappy BIOSes, we
* can always go back to using MSRs.. */
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!res) {
dev_err(&pdev->dev, "can't fetch device resource info\n");
goto done;
}
if (!request_region(res->start, resource_size(res), pdev->name)) {
dev_err(&pdev->dev, "can't request region\n");
goto done;
}
/* set up the driver-specific struct */
cs5535_mfgpt_chip.base = res->start;
cs5535_mfgpt_chip.pdev = pdev;
spin_lock_init(&cs5535_mfgpt_chip.lock);
dev_info(&pdev->dev, "reserved resource region %pR\n", res);
/* detect the available timers */
t = scan_timers(&cs5535_mfgpt_chip);
dev_info(&pdev->dev, "%d MFGPT timers available\n", t);
cs5535_mfgpt_chip.initialized = 1;
return 0;
done:
return err;
}
static struct platform_driver cs5535_mfgpt_driver = {
.driver = {
.name = DRV_NAME,
},
.probe = cs5535_mfgpt_probe,
};
static int __init cs5535_mfgpt_init(void)
{
return platform_driver_register(&cs5535_mfgpt_driver);
}
module_init(cs5535_mfgpt_init);
MODULE_AUTHOR("Andres Salomon <[email protected]>");
MODULE_DESCRIPTION("CS5535/CS5536 MFGPT timer driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
| linux-master | drivers/misc/cs5535-mfgpt.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Ampere Computing SoC's SMpro Error Monitoring Driver
*
* Copyright (c) 2022, Ampere Computing LLC
*
*/
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
/* GPI RAS Error Registers */
#define GPI_RAS_ERR 0x7E
/* Core and L2C Error Registers */
#define CORE_CE_ERR_CNT 0x80
#define CORE_CE_ERR_LEN 0x81
#define CORE_CE_ERR_DATA 0x82
#define CORE_UE_ERR_CNT 0x83
#define CORE_UE_ERR_LEN 0x84
#define CORE_UE_ERR_DATA 0x85
/* Memory Error Registers */
#define MEM_CE_ERR_CNT 0x90
#define MEM_CE_ERR_LEN 0x91
#define MEM_CE_ERR_DATA 0x92
#define MEM_UE_ERR_CNT 0x93
#define MEM_UE_ERR_LEN 0x94
#define MEM_UE_ERR_DATA 0x95
/* RAS Error/Warning Registers */
#define ERR_SMPRO_TYPE 0xA0
#define ERR_PMPRO_TYPE 0xA1
#define ERR_SMPRO_INFO_LO 0xA2
#define ERR_SMPRO_INFO_HI 0xA3
#define ERR_SMPRO_DATA_LO 0xA4
#define ERR_SMPRO_DATA_HI 0xA5
#define WARN_SMPRO_INFO_LO 0xAA
#define WARN_SMPRO_INFO_HI 0xAB
#define ERR_PMPRO_INFO_LO 0xA6
#define ERR_PMPRO_INFO_HI 0xA7
#define ERR_PMPRO_DATA_LO 0xA8
#define ERR_PMPRO_DATA_HI 0xA9
#define WARN_PMPRO_INFO_LO 0xAC
#define WARN_PMPRO_INFO_HI 0xAD
/* Boot Stage Register */
#define BOOTSTAGE 0xB0
#define DIMM_SYNDROME_SEL 0xB4
#define DIMM_SYNDROME_ERR 0xB5
#define DIMM_SYNDROME_STAGE 4
/* PCIE Error Registers */
#define PCIE_CE_ERR_CNT 0xC0
#define PCIE_CE_ERR_LEN 0xC1
#define PCIE_CE_ERR_DATA 0xC2
#define PCIE_UE_ERR_CNT 0xC3
#define PCIE_UE_ERR_LEN 0xC4
#define PCIE_UE_ERR_DATA 0xC5
/* Other Error Registers */
#define OTHER_CE_ERR_CNT 0xD0
#define OTHER_CE_ERR_LEN 0xD1
#define OTHER_CE_ERR_DATA 0xD2
#define OTHER_UE_ERR_CNT 0xD8
#define OTHER_UE_ERR_LEN 0xD9
#define OTHER_UE_ERR_DATA 0xDA
/* Event Data Registers */
#define VRD_WARN_FAULT_EVENT_DATA 0x78
#define VRD_HOT_EVENT_DATA 0x79
#define DIMM_HOT_EVENT_DATA 0x7A
#define DIMM_2X_REFRESH_EVENT_DATA 0x96
#define MAX_READ_BLOCK_LENGTH 48
#define RAS_SMPRO_ERR 0
#define RAS_PMPRO_ERR 1
enum RAS_48BYTES_ERR_TYPES {
CORE_CE_ERR,
CORE_UE_ERR,
MEM_CE_ERR,
MEM_UE_ERR,
PCIE_CE_ERR,
PCIE_UE_ERR,
OTHER_CE_ERR,
OTHER_UE_ERR,
NUM_48BYTES_ERR_TYPE,
};
struct smpro_error_hdr {
u8 count; /* Number of the RAS errors */
u8 len; /* Number of data bytes */
u8 data; /* Start of 48-byte data */
u8 max_cnt; /* Max num of errors */
};
/*
* Included Address of registers to get Count, Length of data and Data
* of the 48 bytes error data
*/
static struct smpro_error_hdr smpro_error_table[] = {
[CORE_CE_ERR] = {
.count = CORE_CE_ERR_CNT,
.len = CORE_CE_ERR_LEN,
.data = CORE_CE_ERR_DATA,
.max_cnt = 32
},
[CORE_UE_ERR] = {
.count = CORE_UE_ERR_CNT,
.len = CORE_UE_ERR_LEN,
.data = CORE_UE_ERR_DATA,
.max_cnt = 32
},
[MEM_CE_ERR] = {
.count = MEM_CE_ERR_CNT,
.len = MEM_CE_ERR_LEN,
.data = MEM_CE_ERR_DATA,
.max_cnt = 16
},
[MEM_UE_ERR] = {
.count = MEM_UE_ERR_CNT,
.len = MEM_UE_ERR_LEN,
.data = MEM_UE_ERR_DATA,
.max_cnt = 16
},
[PCIE_CE_ERR] = {
.count = PCIE_CE_ERR_CNT,
.len = PCIE_CE_ERR_LEN,
.data = PCIE_CE_ERR_DATA,
.max_cnt = 96
},
[PCIE_UE_ERR] = {
.count = PCIE_UE_ERR_CNT,
.len = PCIE_UE_ERR_LEN,
.data = PCIE_UE_ERR_DATA,
.max_cnt = 96
},
[OTHER_CE_ERR] = {
.count = OTHER_CE_ERR_CNT,
.len = OTHER_CE_ERR_LEN,
.data = OTHER_CE_ERR_DATA,
.max_cnt = 8
},
[OTHER_UE_ERR] = {
.count = OTHER_UE_ERR_CNT,
.len = OTHER_UE_ERR_LEN,
.data = OTHER_UE_ERR_DATA,
.max_cnt = 8
},
};
/*
* List of SCP registers which are used to get
* one type of RAS Internal errors.
*/
struct smpro_int_error_hdr {
u8 type;
u8 info_l;
u8 info_h;
u8 data_l;
u8 data_h;
u8 warn_l;
u8 warn_h;
};
static struct smpro_int_error_hdr list_smpro_int_error_hdr[] = {
[RAS_SMPRO_ERR] = {
.type = ERR_SMPRO_TYPE,
.info_l = ERR_SMPRO_INFO_LO,
.info_h = ERR_SMPRO_INFO_HI,
.data_l = ERR_SMPRO_DATA_LO,
.data_h = ERR_SMPRO_DATA_HI,
.warn_l = WARN_SMPRO_INFO_LO,
.warn_h = WARN_SMPRO_INFO_HI,
},
[RAS_PMPRO_ERR] = {
.type = ERR_PMPRO_TYPE,
.info_l = ERR_PMPRO_INFO_LO,
.info_h = ERR_PMPRO_INFO_HI,
.data_l = ERR_PMPRO_DATA_LO,
.data_h = ERR_PMPRO_DATA_HI,
.warn_l = WARN_PMPRO_INFO_LO,
.warn_h = WARN_PMPRO_INFO_HI,
},
};
struct smpro_errmon {
struct regmap *regmap;
};
enum EVENT_TYPES {
VRD_WARN_FAULT_EVENT,
VRD_HOT_EVENT,
DIMM_HOT_EVENT,
DIMM_2X_REFRESH_EVENT,
NUM_EVENTS_TYPE,
};
/* Included Address of event source and data registers */
static u8 smpro_event_table[NUM_EVENTS_TYPE] = {
VRD_WARN_FAULT_EVENT_DATA,
VRD_HOT_EVENT_DATA,
DIMM_HOT_EVENT_DATA,
DIMM_2X_REFRESH_EVENT_DATA,
};
static ssize_t smpro_event_data_read(struct device *dev,
struct device_attribute *da, char *buf,
int channel)
{
struct smpro_errmon *errmon = dev_get_drvdata(dev);
s32 event_data;
int ret;
ret = regmap_read(errmon->regmap, smpro_event_table[channel], &event_data);
if (ret)
return ret;
/* Clear event after read */
if (event_data != 0)
regmap_write(errmon->regmap, smpro_event_table[channel], event_data);
return sysfs_emit(buf, "%04x\n", event_data);
}
static ssize_t smpro_overflow_data_read(struct device *dev, struct device_attribute *da,
char *buf, int channel)
{
struct smpro_errmon *errmon = dev_get_drvdata(dev);
struct smpro_error_hdr *err_info;
s32 err_count;
int ret;
err_info = &smpro_error_table[channel];
ret = regmap_read(errmon->regmap, err_info->count, &err_count);
if (ret)
return ret;
/* Bit 8 indicates the overflow status */
return sysfs_emit(buf, "%d\n", (err_count & BIT(8)) ? 1 : 0);
}
static ssize_t smpro_error_data_read(struct device *dev, struct device_attribute *da,
char *buf, int channel)
{
struct smpro_errmon *errmon = dev_get_drvdata(dev);
unsigned char err_data[MAX_READ_BLOCK_LENGTH];
struct smpro_error_hdr *err_info;
s32 err_count, err_length;
int ret;
err_info = &smpro_error_table[channel];
ret = regmap_read(errmon->regmap, err_info->count, &err_count);
/* Error count is the low byte */
err_count &= 0xff;
if (ret || !err_count || err_count > err_info->max_cnt)
return ret;
ret = regmap_read(errmon->regmap, err_info->len, &err_length);
if (ret || err_length <= 0)
return ret;
if (err_length > MAX_READ_BLOCK_LENGTH)
err_length = MAX_READ_BLOCK_LENGTH;
memset(err_data, 0x00, MAX_READ_BLOCK_LENGTH);
ret = regmap_noinc_read(errmon->regmap, err_info->data, err_data, err_length);
if (ret < 0)
return ret;
/* clear the error */
ret = regmap_write(errmon->regmap, err_info->count, 0x100);
if (ret)
return ret;
/*
* The output of Core/Memory/PCIe/Others UE/CE errors follows the format
* specified in section 5.8.1 CE/UE Error Data record in
* Altra SOC BMC Interface specification.
*/
return sysfs_emit(buf, "%*phN\n", MAX_READ_BLOCK_LENGTH, err_data);
}
/*
* Output format:
* <4-byte hex value of error info><4-byte hex value of error extensive data>
* Where:
* + error info : The error information
* + error data : Extensive data (32 bits)
* Reference to section 5.10 RAS Internal Error Register Definition in
* Altra SOC BMC Interface specification
*/
static ssize_t smpro_internal_err_read(struct device *dev, struct device_attribute *da,
char *buf, int channel)
{
struct smpro_errmon *errmon = dev_get_drvdata(dev);
struct smpro_int_error_hdr *err_info;
unsigned int err[4] = { 0 };
unsigned int err_type;
unsigned int val;
int ret;
/* read error status */
ret = regmap_read(errmon->regmap, GPI_RAS_ERR, &val);
if (ret)
return ret;
if ((channel == RAS_SMPRO_ERR && !(val & BIT(0))) ||
(channel == RAS_PMPRO_ERR && !(val & BIT(1))))
return 0;
err_info = &list_smpro_int_error_hdr[channel];
ret = regmap_read(errmon->regmap, err_info->type, &val);
if (ret)
return ret;
err_type = (val & BIT(1)) ? BIT(1) :
(val & BIT(2)) ? BIT(2) : 0;
if (!err_type)
return 0;
ret = regmap_read(errmon->regmap, err_info->info_l, err + 1);
if (ret)
return ret;
ret = regmap_read(errmon->regmap, err_info->info_h, err);
if (ret)
return ret;
if (err_type & BIT(2)) {
/* Error with data type */
ret = regmap_read(errmon->regmap, err_info->data_l, err + 3);
if (ret)
return ret;
ret = regmap_read(errmon->regmap, err_info->data_h, err + 2);
if (ret)
return ret;
}
/* clear the read errors */
ret = regmap_write(errmon->regmap, err_info->type, err_type);
if (ret)
return ret;
return sysfs_emit(buf, "%*phN\n", (int)sizeof(err), err);
}
/*
* Output format:
* <4-byte hex value of warining info>
* Reference to section 5.10 RAS Internal Error Register Definition in
* Altra SOC BMC Interface specification
*/
static ssize_t smpro_internal_warn_read(struct device *dev, struct device_attribute *da,
char *buf, int channel)
{
struct smpro_errmon *errmon = dev_get_drvdata(dev);
struct smpro_int_error_hdr *err_info;
unsigned int warn[2] = { 0 };
unsigned int val;
int ret;
/* read error status */
ret = regmap_read(errmon->regmap, GPI_RAS_ERR, &val);
if (ret)
return ret;
if ((channel == RAS_SMPRO_ERR && !(val & BIT(0))) ||
(channel == RAS_PMPRO_ERR && !(val & BIT(1))))
return 0;
err_info = &list_smpro_int_error_hdr[channel];
ret = regmap_read(errmon->regmap, err_info->type, &val);
if (ret)
return ret;
if (!(val & BIT(0)))
return 0;
ret = regmap_read(errmon->regmap, err_info->warn_l, warn + 1);
if (ret)
return ret;
ret = regmap_read(errmon->regmap, err_info->warn_h, warn);
if (ret)
return ret;
/* clear the warning */
ret = regmap_write(errmon->regmap, err_info->type, BIT(0));
if (ret)
return ret;
return sysfs_emit(buf, "%*phN\n", (int)sizeof(warn), warn);
}
#define ERROR_OVERFLOW_RO(_error, _index) \
static ssize_t overflow_##_error##_show(struct device *dev, \
struct device_attribute *da, \
char *buf) \
{ \
return smpro_overflow_data_read(dev, da, buf, _index); \
} \
static DEVICE_ATTR_RO(overflow_##_error)
ERROR_OVERFLOW_RO(core_ce, CORE_CE_ERR);
ERROR_OVERFLOW_RO(core_ue, CORE_UE_ERR);
ERROR_OVERFLOW_RO(mem_ce, MEM_CE_ERR);
ERROR_OVERFLOW_RO(mem_ue, MEM_UE_ERR);
ERROR_OVERFLOW_RO(pcie_ce, PCIE_CE_ERR);
ERROR_OVERFLOW_RO(pcie_ue, PCIE_UE_ERR);
ERROR_OVERFLOW_RO(other_ce, OTHER_CE_ERR);
ERROR_OVERFLOW_RO(other_ue, OTHER_UE_ERR);
#define ERROR_RO(_error, _index) \
static ssize_t error_##_error##_show(struct device *dev, \
struct device_attribute *da, \
char *buf) \
{ \
return smpro_error_data_read(dev, da, buf, _index); \
} \
static DEVICE_ATTR_RO(error_##_error)
ERROR_RO(core_ce, CORE_CE_ERR);
ERROR_RO(core_ue, CORE_UE_ERR);
ERROR_RO(mem_ce, MEM_CE_ERR);
ERROR_RO(mem_ue, MEM_UE_ERR);
ERROR_RO(pcie_ce, PCIE_CE_ERR);
ERROR_RO(pcie_ue, PCIE_UE_ERR);
ERROR_RO(other_ce, OTHER_CE_ERR);
ERROR_RO(other_ue, OTHER_UE_ERR);
static ssize_t error_smpro_show(struct device *dev, struct device_attribute *da, char *buf)
{
return smpro_internal_err_read(dev, da, buf, RAS_SMPRO_ERR);
}
static DEVICE_ATTR_RO(error_smpro);
static ssize_t error_pmpro_show(struct device *dev, struct device_attribute *da, char *buf)
{
return smpro_internal_err_read(dev, da, buf, RAS_PMPRO_ERR);
}
static DEVICE_ATTR_RO(error_pmpro);
static ssize_t warn_smpro_show(struct device *dev, struct device_attribute *da, char *buf)
{
return smpro_internal_warn_read(dev, da, buf, RAS_SMPRO_ERR);
}
static DEVICE_ATTR_RO(warn_smpro);
static ssize_t warn_pmpro_show(struct device *dev, struct device_attribute *da, char *buf)
{
return smpro_internal_warn_read(dev, da, buf, RAS_PMPRO_ERR);
}
static DEVICE_ATTR_RO(warn_pmpro);
#define EVENT_RO(_event, _index) \
static ssize_t event_##_event##_show(struct device *dev, \
struct device_attribute *da, \
char *buf) \
{ \
return smpro_event_data_read(dev, da, buf, _index); \
} \
static DEVICE_ATTR_RO(event_##_event)
EVENT_RO(vrd_warn_fault, VRD_WARN_FAULT_EVENT);
EVENT_RO(vrd_hot, VRD_HOT_EVENT);
EVENT_RO(dimm_hot, DIMM_HOT_EVENT);
EVENT_RO(dimm_2x_refresh, DIMM_2X_REFRESH_EVENT);
static ssize_t smpro_dimm_syndrome_read(struct device *dev, struct device_attribute *da,
char *buf, unsigned int slot)
{
struct smpro_errmon *errmon = dev_get_drvdata(dev);
unsigned int data;
int ret;
ret = regmap_read(errmon->regmap, BOOTSTAGE, &data);
if (ret)
return ret;
/* check for valid stage */
data = (data >> 8) & 0xff;
if (data != DIMM_SYNDROME_STAGE)
return ret;
/* Write the slot ID to retrieve Error Syndrome */
ret = regmap_write(errmon->regmap, DIMM_SYNDROME_SEL, slot);
if (ret)
return ret;
/* Read the Syndrome error */
ret = regmap_read(errmon->regmap, DIMM_SYNDROME_ERR, &data);
if (ret || !data)
return ret;
return sysfs_emit(buf, "%04x\n", data);
}
#define EVENT_DIMM_SYNDROME(_slot) \
static ssize_t event_dimm##_slot##_syndrome_show(struct device *dev, \
struct device_attribute *da, \
char *buf) \
{ \
return smpro_dimm_syndrome_read(dev, da, buf, _slot); \
} \
static DEVICE_ATTR_RO(event_dimm##_slot##_syndrome)
EVENT_DIMM_SYNDROME(0);
EVENT_DIMM_SYNDROME(1);
EVENT_DIMM_SYNDROME(2);
EVENT_DIMM_SYNDROME(3);
EVENT_DIMM_SYNDROME(4);
EVENT_DIMM_SYNDROME(5);
EVENT_DIMM_SYNDROME(6);
EVENT_DIMM_SYNDROME(7);
EVENT_DIMM_SYNDROME(8);
EVENT_DIMM_SYNDROME(9);
EVENT_DIMM_SYNDROME(10);
EVENT_DIMM_SYNDROME(11);
EVENT_DIMM_SYNDROME(12);
EVENT_DIMM_SYNDROME(13);
EVENT_DIMM_SYNDROME(14);
EVENT_DIMM_SYNDROME(15);
static struct attribute *smpro_errmon_attrs[] = {
&dev_attr_overflow_core_ce.attr,
&dev_attr_overflow_core_ue.attr,
&dev_attr_overflow_mem_ce.attr,
&dev_attr_overflow_mem_ue.attr,
&dev_attr_overflow_pcie_ce.attr,
&dev_attr_overflow_pcie_ue.attr,
&dev_attr_overflow_other_ce.attr,
&dev_attr_overflow_other_ue.attr,
&dev_attr_error_core_ce.attr,
&dev_attr_error_core_ue.attr,
&dev_attr_error_mem_ce.attr,
&dev_attr_error_mem_ue.attr,
&dev_attr_error_pcie_ce.attr,
&dev_attr_error_pcie_ue.attr,
&dev_attr_error_other_ce.attr,
&dev_attr_error_other_ue.attr,
&dev_attr_error_smpro.attr,
&dev_attr_error_pmpro.attr,
&dev_attr_warn_smpro.attr,
&dev_attr_warn_pmpro.attr,
&dev_attr_event_vrd_warn_fault.attr,
&dev_attr_event_vrd_hot.attr,
&dev_attr_event_dimm_hot.attr,
&dev_attr_event_dimm_2x_refresh.attr,
&dev_attr_event_dimm0_syndrome.attr,
&dev_attr_event_dimm1_syndrome.attr,
&dev_attr_event_dimm2_syndrome.attr,
&dev_attr_event_dimm3_syndrome.attr,
&dev_attr_event_dimm4_syndrome.attr,
&dev_attr_event_dimm5_syndrome.attr,
&dev_attr_event_dimm6_syndrome.attr,
&dev_attr_event_dimm7_syndrome.attr,
&dev_attr_event_dimm8_syndrome.attr,
&dev_attr_event_dimm9_syndrome.attr,
&dev_attr_event_dimm10_syndrome.attr,
&dev_attr_event_dimm11_syndrome.attr,
&dev_attr_event_dimm12_syndrome.attr,
&dev_attr_event_dimm13_syndrome.attr,
&dev_attr_event_dimm14_syndrome.attr,
&dev_attr_event_dimm15_syndrome.attr,
NULL
};
ATTRIBUTE_GROUPS(smpro_errmon);
static int smpro_errmon_probe(struct platform_device *pdev)
{
struct smpro_errmon *errmon;
errmon = devm_kzalloc(&pdev->dev, sizeof(struct smpro_errmon), GFP_KERNEL);
if (!errmon)
return -ENOMEM;
platform_set_drvdata(pdev, errmon);
errmon->regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!errmon->regmap)
return -ENODEV;
return 0;
}
static struct platform_driver smpro_errmon_driver = {
.probe = smpro_errmon_probe,
.driver = {
.name = "smpro-errmon",
.dev_groups = smpro_errmon_groups,
},
};
module_platform_driver(smpro_errmon_driver);
MODULE_AUTHOR("Tung Nguyen <[email protected]>");
MODULE_AUTHOR("Thinh Pham <[email protected]>");
MODULE_AUTHOR("Hoang Nguyen <[email protected]>");
MODULE_AUTHOR("Thu Nguyen <[email protected]>");
MODULE_AUTHOR("Quan Nguyen <[email protected]>");
MODULE_DESCRIPTION("Ampere Altra SMpro driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/smpro-errmon.c |
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Xilinx TMR Manager IP.
*
* Copyright (C) 2022 Advanced Micro Devices, Inc.
*
* Description:
* This driver is developed for TMR Manager,The Triple Modular Redundancy(TMR)
* Manager is responsible for handling the TMR subsystem state, including
* fault detection and error recovery. The core is triplicated in each of
* the sub-blocks in the TMR subsystem, and provides majority voting of
* its internal state provides soft error detection, correction and
* recovery.
*/
#include <asm/xilinx_mb_manager.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
/* TMR Manager Register offsets */
#define XTMR_MANAGER_CR_OFFSET 0x0
#define XTMR_MANAGER_FFR_OFFSET 0x4
#define XTMR_MANAGER_CMR0_OFFSET 0x8
#define XTMR_MANAGER_CMR1_OFFSET 0xC
#define XTMR_MANAGER_BDIR_OFFSET 0x10
#define XTMR_MANAGER_SEMIMR_OFFSET 0x1C
/* Register Bitmasks/shifts */
#define XTMR_MANAGER_CR_MAGIC1_MASK GENMASK(7, 0)
#define XTMR_MANAGER_CR_MAGIC2_MASK GENMASK(15, 8)
#define XTMR_MANAGER_CR_RIR_MASK BIT(16)
#define XTMR_MANAGER_FFR_LM12_MASK BIT(0)
#define XTMR_MANAGER_FFR_LM13_MASK BIT(1)
#define XTMR_MANAGER_FFR_LM23_MASK BIT(2)
#define XTMR_MANAGER_CR_MAGIC2_SHIFT 4
#define XTMR_MANAGER_CR_RIR_SHIFT 16
#define XTMR_MANAGER_CR_BB_SHIFT 18
#define XTMR_MANAGER_MAGIC1_MAX_VAL 255
/**
* struct xtmr_manager_dev - Driver data for TMR Manager
* @regs: device physical base address
* @cr_val: control register value
* @magic1: Magic 1 hardware configuration value
* @err_cnt: error statistics count
* @phys_baseaddr: Physical base address
*/
struct xtmr_manager_dev {
void __iomem *regs;
u32 cr_val;
u32 magic1;
u32 err_cnt;
resource_size_t phys_baseaddr;
};
/* IO accessors */
static inline void xtmr_manager_write(struct xtmr_manager_dev *xtmr_manager,
u32 addr, u32 value)
{
iowrite32(value, xtmr_manager->regs + addr);
}
static inline u32 xtmr_manager_read(struct xtmr_manager_dev *xtmr_manager,
u32 addr)
{
return ioread32(xtmr_manager->regs + addr);
}
static void xmb_manager_reset_handler(struct xtmr_manager_dev *xtmr_manager)
{
/* Clear the FFR Register contents as a part of recovery process. */
xtmr_manager_write(xtmr_manager, XTMR_MANAGER_FFR_OFFSET, 0);
}
static void xmb_manager_update_errcnt(struct xtmr_manager_dev *xtmr_manager)
{
xtmr_manager->err_cnt++;
}
static ssize_t errcnt_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xtmr_manager_dev *xtmr_manager = dev_get_drvdata(dev);
return sysfs_emit(buf, "%x\n", xtmr_manager->err_cnt);
}
static DEVICE_ATTR_RO(errcnt);
static ssize_t dis_block_break_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
{
struct xtmr_manager_dev *xtmr_manager = dev_get_drvdata(dev);
int ret;
long value;
ret = kstrtoul(buf, 16, &value);
if (ret)
return ret;
/* unblock the break signal*/
xtmr_manager->cr_val &= ~(1 << XTMR_MANAGER_CR_BB_SHIFT);
xtmr_manager_write(xtmr_manager, XTMR_MANAGER_CR_OFFSET,
xtmr_manager->cr_val);
return size;
}
static DEVICE_ATTR_WO(dis_block_break);
static struct attribute *xtmr_manager_dev_attrs[] = {
&dev_attr_dis_block_break.attr,
&dev_attr_errcnt.attr,
NULL,
};
ATTRIBUTE_GROUPS(xtmr_manager_dev);
static void xtmr_manager_init(struct xtmr_manager_dev *xtmr_manager)
{
/* Clear the SEM interrupt mask register to disable the interrupt */
xtmr_manager_write(xtmr_manager, XTMR_MANAGER_SEMIMR_OFFSET, 0);
/* Allow recovery reset by default */
xtmr_manager->cr_val = (1 << XTMR_MANAGER_CR_RIR_SHIFT) |
xtmr_manager->magic1;
xtmr_manager_write(xtmr_manager, XTMR_MANAGER_CR_OFFSET,
xtmr_manager->cr_val);
/*
* Configure Break Delay Initialization Register to zero so that
* break occurs immediately
*/
xtmr_manager_write(xtmr_manager, XTMR_MANAGER_BDIR_OFFSET, 0);
/*
* To come out of break handler need to block the break signal
* in the tmr manager, update the xtmr_manager cr_val for the same
*/
xtmr_manager->cr_val |= (1 << XTMR_MANAGER_CR_BB_SHIFT);
/*
* When the break vector gets asserted because of error injection,
* the break signal must be blocked before exiting from the
* break handler, Below api updates the TMR manager address and
* control register and error counter callback arguments,
* which will be used by the break handler to block the
* break and call the callback function.
*/
xmb_manager_register(xtmr_manager->phys_baseaddr, xtmr_manager->cr_val,
(void *)xmb_manager_update_errcnt,
xtmr_manager, (void *)xmb_manager_reset_handler);
}
/**
* xtmr_manager_probe - Driver probe function
* @pdev: Pointer to the platform_device structure
*
* This is the driver probe routine. It does all the memory
* allocation for the device.
*
* Return: 0 on success and failure value on error
*/
static int xtmr_manager_probe(struct platform_device *pdev)
{
struct xtmr_manager_dev *xtmr_manager;
struct resource *res;
int err;
xtmr_manager = devm_kzalloc(&pdev->dev, sizeof(*xtmr_manager),
GFP_KERNEL);
if (!xtmr_manager)
return -ENOMEM;
xtmr_manager->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(xtmr_manager->regs))
return PTR_ERR(xtmr_manager->regs);
xtmr_manager->phys_baseaddr = res->start;
err = of_property_read_u32(pdev->dev.of_node, "xlnx,magic1",
&xtmr_manager->magic1);
if (err < 0) {
dev_err(&pdev->dev, "unable to read xlnx,magic1 property");
return err;
}
if (xtmr_manager->magic1 > XTMR_MANAGER_MAGIC1_MAX_VAL) {
dev_err(&pdev->dev, "invalid xlnx,magic1 property value");
return -EINVAL;
}
/* Initialize TMR Manager */
xtmr_manager_init(xtmr_manager);
platform_set_drvdata(pdev, xtmr_manager);
return 0;
}
static const struct of_device_id xtmr_manager_of_match[] = {
{
.compatible = "xlnx,tmr-manager-1.0",
},
{ /* end of table */ }
};
MODULE_DEVICE_TABLE(of, xtmr_manager_of_match);
static struct platform_driver xtmr_manager_driver = {
.driver = {
.name = "xilinx-tmr_manager",
.of_match_table = xtmr_manager_of_match,
.dev_groups = xtmr_manager_dev_groups,
},
.probe = xtmr_manager_probe,
};
module_platform_driver(xtmr_manager_driver);
MODULE_AUTHOR("Advanced Micro Devices, Inc");
MODULE_DESCRIPTION("Xilinx TMR Manager Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/xilinx_tmr_manager.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* Dummy IRQ handler driver.
*
* This module only registers itself as a handler that is specified to it
* by the 'irq' parameter.
*
* The sole purpose of this module is to help with debugging of systems on
* which spurious IRQs would happen on disabled IRQ vector.
*
* Copyright (C) 2013 Jiri Kosina
*/
#include <linux/module.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
static int irq = -1;
static irqreturn_t dummy_interrupt(int irq, void *dev_id)
{
static int count = 0;
if (count == 0) {
printk(KERN_INFO "dummy-irq: interrupt occurred on IRQ %d\n",
irq);
count++;
}
return IRQ_NONE;
}
static int __init dummy_irq_init(void)
{
if (irq < 0) {
printk(KERN_ERR "dummy-irq: no IRQ given. Use irq=N\n");
return -EIO;
}
if (request_irq(irq, &dummy_interrupt, IRQF_SHARED, "dummy_irq", &irq)) {
printk(KERN_ERR "dummy-irq: cannot register IRQ %d\n", irq);
return -EIO;
}
printk(KERN_INFO "dummy-irq: registered for IRQ %d\n", irq);
return 0;
}
static void __exit dummy_irq_exit(void)
{
printk(KERN_INFO "dummy-irq unloaded\n");
free_irq(irq, &irq);
}
module_init(dummy_irq_init);
module_exit(dummy_irq_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jiri Kosina");
module_param_hw(irq, uint, irq, 0444);
MODULE_PARM_DESC(irq, "The IRQ to register for");
MODULE_DESCRIPTION("Dummy IRQ handler driver");
| linux-master | drivers/misc/dummy-irq.c |
// SPDX-License-Identifier: GPL-2.0
/*
* ESM (Error Signal Monitor) driver for TI TPS6594/TPS6593/LP8764 PMICs
*
* Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/mfd/tps6594.h>
#define TPS6594_DEV_REV_1 0x08
static irqreturn_t tps6594_esm_isr(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
int i;
for (i = 0 ; i < pdev->num_resources ; i++) {
if (irq == platform_get_irq_byname(pdev, pdev->resource[i].name)) {
dev_err(pdev->dev.parent, "%s error detected\n", pdev->resource[i].name);
return IRQ_HANDLED;
}
}
return IRQ_NONE;
}
static int tps6594_esm_probe(struct platform_device *pdev)
{
struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
unsigned int rev;
int irq;
int ret;
int i;
/*
* Due to a bug in revision 1 of the PMIC, the GPIO3 used for the
* SoC ESM function is used to power the load switch instead.
* As a consequence, ESM can not be used on those PMIC.
* Check the version and return an error in case of revision 1.
*/
ret = regmap_read(tps->regmap, TPS6594_REG_DEV_REV, &rev);
if (ret)
return dev_err_probe(dev, ret,
"Failed to read PMIC revision\n");
if (rev == TPS6594_DEV_REV_1)
return dev_err_probe(dev, -ENODEV,
"ESM not supported for revision 1 PMIC\n");
for (i = 0; i < pdev->num_resources; i++) {
irq = platform_get_irq_byname(pdev, pdev->resource[i].name);
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(dev, irq, NULL,
tps6594_esm_isr, IRQF_ONESHOT,
pdev->resource[i].name, pdev);
if (ret)
return dev_err_probe(dev, ret, "Failed to request irq\n");
}
ret = regmap_set_bits(tps->regmap, TPS6594_REG_ESM_SOC_MODE_CFG,
TPS6594_BIT_ESM_SOC_EN | TPS6594_BIT_ESM_SOC_ENDRV);
if (ret)
return dev_err_probe(dev, ret, "Failed to configure ESM\n");
ret = regmap_set_bits(tps->regmap, TPS6594_REG_ESM_SOC_START_REG,
TPS6594_BIT_ESM_SOC_START);
if (ret)
return dev_err_probe(dev, ret, "Failed to start ESM\n");
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
return 0;
}
static void tps6594_esm_remove(struct platform_device *pdev)
{
struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
int ret;
ret = regmap_clear_bits(tps->regmap, TPS6594_REG_ESM_SOC_START_REG,
TPS6594_BIT_ESM_SOC_START);
if (ret) {
dev_err(dev, "Failed to stop ESM\n");
goto out;
}
ret = regmap_clear_bits(tps->regmap, TPS6594_REG_ESM_SOC_MODE_CFG,
TPS6594_BIT_ESM_SOC_EN | TPS6594_BIT_ESM_SOC_ENDRV);
if (ret)
dev_err(dev, "Failed to unconfigure ESM\n");
out:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
}
static int tps6594_esm_suspend(struct device *dev)
{
struct tps6594 *tps = dev_get_drvdata(dev->parent);
int ret;
ret = regmap_clear_bits(tps->regmap, TPS6594_REG_ESM_SOC_START_REG,
TPS6594_BIT_ESM_SOC_START);
pm_runtime_put_sync(dev);
return ret;
}
static int tps6594_esm_resume(struct device *dev)
{
struct tps6594 *tps = dev_get_drvdata(dev->parent);
pm_runtime_get_sync(dev);
return regmap_set_bits(tps->regmap, TPS6594_REG_ESM_SOC_START_REG,
TPS6594_BIT_ESM_SOC_START);
}
static DEFINE_SIMPLE_DEV_PM_OPS(tps6594_esm_pm_ops, tps6594_esm_suspend, tps6594_esm_resume);
static struct platform_driver tps6594_esm_driver = {
.driver = {
.name = "tps6594-esm",
.pm = pm_sleep_ptr(&tps6594_esm_pm_ops),
},
.probe = tps6594_esm_probe,
.remove_new = tps6594_esm_remove,
};
module_platform_driver(tps6594_esm_driver);
MODULE_ALIAS("platform:tps6594-esm");
MODULE_AUTHOR("Julien Panis <[email protected]>");
MODULE_DESCRIPTION("TPS6594 Error Signal Monitor Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/tps6594-esm.c |
// SPDX-License-Identifier: GPL-2.0
/*
* PFSM (Pre-configurable Finite State Machine) driver for TI TPS6594/TPS6593/LP8764 PMICs
*
* Copyright (C) 2023 BayLibre Incorporated - https://www.baylibre.com/
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/ioctl.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/tps6594.h>
#include <linux/tps6594_pfsm.h>
#define TPS6594_STARTUP_DEST_MCU_ONLY_VAL 2
#define TPS6594_STARTUP_DEST_ACTIVE_VAL 3
#define TPS6594_STARTUP_DEST_SHIFT 5
#define TPS6594_STARTUP_DEST_MCU_ONLY (TPS6594_STARTUP_DEST_MCU_ONLY_VAL \
<< TPS6594_STARTUP_DEST_SHIFT)
#define TPS6594_STARTUP_DEST_ACTIVE (TPS6594_STARTUP_DEST_ACTIVE_VAL \
<< TPS6594_STARTUP_DEST_SHIFT)
/*
* To update the PMIC firmware, the user must be able to access
* page 0 (user registers) and page 1 (NVM control and configuration).
*/
#define TPS6594_PMIC_MAX_POS 0x200
#define TPS6594_FILE_TO_PFSM(f) container_of((f)->private_data, struct tps6594_pfsm, miscdev)
/**
* struct tps6594_pfsm - device private data structure
*
* @miscdev: misc device infos
* @regmap: regmap for accessing the device registers
*/
struct tps6594_pfsm {
struct miscdevice miscdev;
struct regmap *regmap;
};
static ssize_t tps6594_pfsm_read(struct file *f, char __user *buf,
size_t count, loff_t *ppos)
{
struct tps6594_pfsm *pfsm = TPS6594_FILE_TO_PFSM(f);
loff_t pos = *ppos;
unsigned int val;
int ret;
int i;
if (pos < 0)
return -EINVAL;
if (pos >= TPS6594_PMIC_MAX_POS)
return 0;
if (count > TPS6594_PMIC_MAX_POS - pos)
count = TPS6594_PMIC_MAX_POS - pos;
for (i = 0 ; i < count ; i++) {
ret = regmap_read(pfsm->regmap, pos + i, &val);
if (ret)
return ret;
if (put_user(val, buf + i))
return -EFAULT;
}
*ppos = pos + count;
return count;
}
static ssize_t tps6594_pfsm_write(struct file *f, const char __user *buf,
size_t count, loff_t *ppos)
{
struct tps6594_pfsm *pfsm = TPS6594_FILE_TO_PFSM(f);
loff_t pos = *ppos;
char val;
int ret;
int i;
if (pos < 0)
return -EINVAL;
if (pos >= TPS6594_PMIC_MAX_POS || !count)
return 0;
if (count > TPS6594_PMIC_MAX_POS - pos)
count = TPS6594_PMIC_MAX_POS - pos;
for (i = 0 ; i < count ; i++) {
if (get_user(val, buf + i))
return -EFAULT;
ret = regmap_write(pfsm->regmap, pos + i, val);
if (ret)
return ret;
}
*ppos = pos + count;
return count;
}
static int tps6594_pfsm_configure_ret_trig(struct regmap *regmap, u8 gpio_ret, u8 ddr_ret)
{
int ret;
if (gpio_ret)
ret = regmap_set_bits(regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(5) | TPS6594_BIT_TRIGGER_I2C(6));
else
ret = regmap_clear_bits(regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(5) | TPS6594_BIT_TRIGGER_I2C(6));
if (ret)
return ret;
if (ddr_ret)
ret = regmap_set_bits(regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(7));
else
ret = regmap_clear_bits(regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(7));
return ret;
}
static long tps6594_pfsm_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
{
struct tps6594_pfsm *pfsm = TPS6594_FILE_TO_PFSM(f);
struct pmic_state_opt state_opt;
void __user *argp = (void __user *)arg;
int ret = -ENOIOCTLCMD;
switch (cmd) {
case PMIC_GOTO_STANDBY:
/* Disable LP mode */
ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
TPS6594_BIT_LP_STANDBY_SEL);
if (ret)
return ret;
/* Force trigger */
ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(0), TPS6594_BIT_TRIGGER_I2C(0));
break;
case PMIC_GOTO_LP_STANDBY:
/* Enable LP mode */
ret = regmap_set_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
TPS6594_BIT_LP_STANDBY_SEL);
if (ret)
return ret;
/* Force trigger */
ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(0), TPS6594_BIT_TRIGGER_I2C(0));
break;
case PMIC_UPDATE_PGM:
/* Force trigger */
ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_FSM_I2C_TRIGGERS,
TPS6594_BIT_TRIGGER_I2C(3), TPS6594_BIT_TRIGGER_I2C(3));
break;
case PMIC_SET_ACTIVE_STATE:
/* Modify NSLEEP1-2 bits */
ret = regmap_set_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
TPS6594_BIT_NSLEEP1B | TPS6594_BIT_NSLEEP2B);
break;
case PMIC_SET_MCU_ONLY_STATE:
if (copy_from_user(&state_opt, argp, sizeof(state_opt)))
return -EFAULT;
/* Configure retention triggers */
ret = tps6594_pfsm_configure_ret_trig(pfsm->regmap, state_opt.gpio_retention,
state_opt.ddr_retention);
if (ret)
return ret;
/* Modify NSLEEP1-2 bits */
ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
TPS6594_BIT_NSLEEP1B);
if (ret)
return ret;
ret = regmap_set_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
TPS6594_BIT_NSLEEP2B);
break;
case PMIC_SET_RETENTION_STATE:
if (copy_from_user(&state_opt, argp, sizeof(state_opt)))
return -EFAULT;
/* Configure wake-up destination */
if (state_opt.mcu_only_startup_dest)
ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
TPS6594_MASK_STARTUP_DEST,
TPS6594_STARTUP_DEST_MCU_ONLY);
else
ret = regmap_write_bits(pfsm->regmap, TPS6594_REG_RTC_CTRL_2,
TPS6594_MASK_STARTUP_DEST,
TPS6594_STARTUP_DEST_ACTIVE);
if (ret)
return ret;
/* Configure retention triggers */
ret = tps6594_pfsm_configure_ret_trig(pfsm->regmap, state_opt.gpio_retention,
state_opt.ddr_retention);
if (ret)
return ret;
/* Modify NSLEEP1-2 bits */
ret = regmap_clear_bits(pfsm->regmap, TPS6594_REG_FSM_NSLEEP_TRIGGERS,
TPS6594_BIT_NSLEEP2B);
break;
}
return ret;
}
static const struct file_operations tps6594_pfsm_fops = {
.owner = THIS_MODULE,
.llseek = generic_file_llseek,
.read = tps6594_pfsm_read,
.write = tps6594_pfsm_write,
.unlocked_ioctl = tps6594_pfsm_ioctl,
.compat_ioctl = compat_ptr_ioctl,
};
static irqreturn_t tps6594_pfsm_isr(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
int i;
for (i = 0 ; i < pdev->num_resources ; i++) {
if (irq == platform_get_irq_byname(pdev, pdev->resource[i].name)) {
dev_err(pdev->dev.parent, "%s event detected\n", pdev->resource[i].name);
return IRQ_HANDLED;
}
}
return IRQ_NONE;
}
static int tps6594_pfsm_probe(struct platform_device *pdev)
{
struct tps6594_pfsm *pfsm;
struct tps6594 *tps = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
int irq;
int ret;
int i;
pfsm = devm_kzalloc(dev, sizeof(struct tps6594_pfsm), GFP_KERNEL);
if (!pfsm)
return -ENOMEM;
pfsm->regmap = tps->regmap;
pfsm->miscdev.minor = MISC_DYNAMIC_MINOR;
pfsm->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "pfsm-%ld-0x%02x",
tps->chip_id, tps->reg);
pfsm->miscdev.fops = &tps6594_pfsm_fops;
pfsm->miscdev.parent = dev->parent;
for (i = 0 ; i < pdev->num_resources ; i++) {
irq = platform_get_irq_byname(pdev, pdev->resource[i].name);
if (irq < 0)
return irq;
ret = devm_request_threaded_irq(dev, irq, NULL,
tps6594_pfsm_isr, IRQF_ONESHOT,
pdev->resource[i].name, pdev);
if (ret)
return dev_err_probe(dev, ret, "Failed to request irq\n");
}
platform_set_drvdata(pdev, pfsm);
return misc_register(&pfsm->miscdev);
}
static void tps6594_pfsm_remove(struct platform_device *pdev)
{
struct tps6594_pfsm *pfsm = platform_get_drvdata(pdev);
misc_deregister(&pfsm->miscdev);
}
static struct platform_driver tps6594_pfsm_driver = {
.driver = {
.name = "tps6594-pfsm",
},
.probe = tps6594_pfsm_probe,
.remove_new = tps6594_pfsm_remove,
};
module_platform_driver(tps6594_pfsm_driver);
MODULE_ALIAS("platform:tps6594-pfsm");
MODULE_AUTHOR("Julien Panis <[email protected]>");
MODULE_DESCRIPTION("TPS6594 Pre-configurable Finite State Machine Driver");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/tps6594-pfsm.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* tifm_7xx1.c - TI FlashMedia driver
*
* Copyright (C) 2006 Alex Dubov <[email protected]>
*/
#include <linux/tifm.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#define DRIVER_NAME "tifm_7xx1"
#define DRIVER_VERSION "0.8"
#define TIFM_IRQ_ENABLE 0x80000000
#define TIFM_IRQ_SOCKMASK(x) (x)
#define TIFM_IRQ_CARDMASK(x) ((x) << 8)
#define TIFM_IRQ_FIFOMASK(x) ((x) << 16)
#define TIFM_IRQ_SETALL 0xffffffff
static void tifm_7xx1_dummy_eject(struct tifm_adapter *fm,
struct tifm_dev *sock)
{
}
static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock)
{
unsigned long flags;
spin_lock_irqsave(&fm->lock, flags);
fm->socket_change_set |= 1 << sock->socket_id;
tifm_queue_work(&fm->media_switcher);
spin_unlock_irqrestore(&fm->lock, flags);
}
static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id)
{
struct tifm_adapter *fm = dev_id;
struct tifm_dev *sock;
unsigned int irq_status, cnt;
spin_lock(&fm->lock);
irq_status = readl(fm->addr + FM_INTERRUPT_STATUS);
if (irq_status == 0 || irq_status == (~0)) {
spin_unlock(&fm->lock);
return IRQ_NONE;
}
if (irq_status & TIFM_IRQ_ENABLE) {
writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
for (cnt = 0; cnt < fm->num_sockets; cnt++) {
sock = fm->sockets[cnt];
if (sock) {
if ((irq_status >> cnt) & TIFM_IRQ_FIFOMASK(1))
sock->data_event(sock);
if ((irq_status >> cnt) & TIFM_IRQ_CARDMASK(1))
sock->card_event(sock);
}
}
fm->socket_change_set |= irq_status
& ((1 << fm->num_sockets) - 1);
}
writel(irq_status, fm->addr + FM_INTERRUPT_STATUS);
if (fm->finish_me)
complete_all(fm->finish_me);
else if (!fm->socket_change_set)
writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
else
tifm_queue_work(&fm->media_switcher);
spin_unlock(&fm->lock);
return IRQ_HANDLED;
}
static unsigned char tifm_7xx1_toggle_sock_power(char __iomem *sock_addr)
{
unsigned int s_state;
int cnt;
writel(0x0e00, sock_addr + SOCK_CONTROL);
for (cnt = 16; cnt <= 256; cnt <<= 1) {
if (!(TIFM_SOCK_STATE_POWERED
& readl(sock_addr + SOCK_PRESENT_STATE)))
break;
msleep(cnt);
}
s_state = readl(sock_addr + SOCK_PRESENT_STATE);
if (!(TIFM_SOCK_STATE_OCCUPIED & s_state))
return 0;
writel(readl(sock_addr + SOCK_CONTROL) | TIFM_CTRL_LED,
sock_addr + SOCK_CONTROL);
/* xd needs some extra time before power on */
if (((readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7)
== TIFM_TYPE_XD)
msleep(40);
writel((s_state & TIFM_CTRL_POWER_MASK) | 0x0c00,
sock_addr + SOCK_CONTROL);
/* wait for power to stabilize */
msleep(20);
for (cnt = 16; cnt <= 256; cnt <<= 1) {
if ((TIFM_SOCK_STATE_POWERED
& readl(sock_addr + SOCK_PRESENT_STATE)))
break;
msleep(cnt);
}
writel(readl(sock_addr + SOCK_CONTROL) & (~TIFM_CTRL_LED),
sock_addr + SOCK_CONTROL);
return (readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7;
}
inline static void tifm_7xx1_sock_power_off(char __iomem *sock_addr)
{
writel((~TIFM_CTRL_POWER_MASK) & readl(sock_addr + SOCK_CONTROL),
sock_addr + SOCK_CONTROL);
}
inline static char __iomem *
tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num)
{
return base_addr + ((sock_num + 1) << 10);
}
static void tifm_7xx1_switch_media(struct work_struct *work)
{
struct tifm_adapter *fm = container_of(work, struct tifm_adapter,
media_switcher);
struct tifm_dev *sock;
char __iomem *sock_addr;
unsigned long flags;
unsigned char media_id;
unsigned int socket_change_set, cnt;
spin_lock_irqsave(&fm->lock, flags);
socket_change_set = fm->socket_change_set;
fm->socket_change_set = 0;
dev_dbg(fm->dev.parent, "checking media set %x\n",
socket_change_set);
if (!socket_change_set) {
spin_unlock_irqrestore(&fm->lock, flags);
return;
}
for (cnt = 0; cnt < fm->num_sockets; cnt++) {
if (!(socket_change_set & (1 << cnt)))
continue;
sock = fm->sockets[cnt];
if (sock) {
printk(KERN_INFO
"%s : demand removing card from socket %u:%u\n",
dev_name(&fm->dev), fm->id, cnt);
fm->sockets[cnt] = NULL;
sock_addr = sock->addr;
spin_unlock_irqrestore(&fm->lock, flags);
device_unregister(&sock->dev);
spin_lock_irqsave(&fm->lock, flags);
tifm_7xx1_sock_power_off(sock_addr);
writel(0x0e00, sock_addr + SOCK_CONTROL);
}
spin_unlock_irqrestore(&fm->lock, flags);
media_id = tifm_7xx1_toggle_sock_power(
tifm_7xx1_sock_addr(fm->addr, cnt));
// tifm_alloc_device will check if media_id is valid
sock = tifm_alloc_device(fm, cnt, media_id);
if (sock) {
sock->addr = tifm_7xx1_sock_addr(fm->addr, cnt);
if (!device_register(&sock->dev)) {
spin_lock_irqsave(&fm->lock, flags);
if (!fm->sockets[cnt]) {
fm->sockets[cnt] = sock;
sock = NULL;
}
spin_unlock_irqrestore(&fm->lock, flags);
}
if (sock)
put_device(&sock->dev);
}
spin_lock_irqsave(&fm->lock, flags);
}
writel(TIFM_IRQ_FIFOMASK(socket_change_set)
| TIFM_IRQ_CARDMASK(socket_change_set),
fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
writel(TIFM_IRQ_FIFOMASK(socket_change_set)
| TIFM_IRQ_CARDMASK(socket_change_set),
fm->addr + FM_SET_INTERRUPT_ENABLE);
writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
spin_unlock_irqrestore(&fm->lock, flags);
}
static int __maybe_unused tifm_7xx1_suspend(struct device *dev_d)
{
struct pci_dev *dev = to_pci_dev(dev_d);
struct tifm_adapter *fm = pci_get_drvdata(dev);
int cnt;
dev_dbg(&dev->dev, "suspending host\n");
for (cnt = 0; cnt < fm->num_sockets; cnt++) {
if (fm->sockets[cnt])
tifm_7xx1_sock_power_off(fm->sockets[cnt]->addr);
}
device_wakeup_disable(dev_d);
return 0;
}
static int __maybe_unused tifm_7xx1_resume(struct device *dev_d)
{
struct pci_dev *dev = to_pci_dev(dev_d);
struct tifm_adapter *fm = pci_get_drvdata(dev);
int rc;
unsigned long timeout;
unsigned int good_sockets = 0, bad_sockets = 0;
unsigned long flags;
/* Maximum number of entries is 4 */
unsigned char new_ids[4];
DECLARE_COMPLETION_ONSTACK(finish_resume);
if (WARN_ON(fm->num_sockets > ARRAY_SIZE(new_ids)))
return -ENXIO;
pci_set_master(dev);
dev_dbg(&dev->dev, "resuming host\n");
for (rc = 0; rc < fm->num_sockets; rc++)
new_ids[rc] = tifm_7xx1_toggle_sock_power(
tifm_7xx1_sock_addr(fm->addr, rc));
spin_lock_irqsave(&fm->lock, flags);
for (rc = 0; rc < fm->num_sockets; rc++) {
if (fm->sockets[rc]) {
if (fm->sockets[rc]->type == new_ids[rc])
good_sockets |= 1 << rc;
else
bad_sockets |= 1 << rc;
}
}
writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
fm->addr + FM_SET_INTERRUPT_ENABLE);
dev_dbg(&dev->dev, "change sets on resume: good %x, bad %x\n",
good_sockets, bad_sockets);
fm->socket_change_set = 0;
if (good_sockets) {
fm->finish_me = &finish_resume;
spin_unlock_irqrestore(&fm->lock, flags);
timeout = wait_for_completion_timeout(&finish_resume, HZ);
dev_dbg(&dev->dev, "wait returned %lu\n", timeout);
writel(TIFM_IRQ_FIFOMASK(good_sockets)
| TIFM_IRQ_CARDMASK(good_sockets),
fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
writel(TIFM_IRQ_FIFOMASK(good_sockets)
| TIFM_IRQ_CARDMASK(good_sockets),
fm->addr + FM_SET_INTERRUPT_ENABLE);
spin_lock_irqsave(&fm->lock, flags);
fm->finish_me = NULL;
fm->socket_change_set ^= good_sockets & fm->socket_change_set;
}
fm->socket_change_set |= bad_sockets;
if (fm->socket_change_set)
tifm_queue_work(&fm->media_switcher);
spin_unlock_irqrestore(&fm->lock, flags);
writel(TIFM_IRQ_ENABLE,
fm->addr + FM_SET_INTERRUPT_ENABLE);
return 0;
}
static int tifm_7xx1_dummy_has_ms_pif(struct tifm_adapter *fm,
struct tifm_dev *sock)
{
return 0;
}
static int tifm_7xx1_has_ms_pif(struct tifm_adapter *fm, struct tifm_dev *sock)
{
if (((fm->num_sockets == 4) && (sock->socket_id == 2))
|| ((fm->num_sockets == 2) && (sock->socket_id == 0)))
return 1;
return 0;
}
static int tifm_7xx1_probe(struct pci_dev *dev,
const struct pci_device_id *dev_id)
{
struct tifm_adapter *fm;
int pci_dev_busy = 0;
int rc;
rc = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
rc = pci_enable_device(dev);
if (rc)
return rc;
pci_set_master(dev);
rc = pci_request_regions(dev, DRIVER_NAME);
if (rc) {
pci_dev_busy = 1;
goto err_out;
}
pci_intx(dev, 1);
fm = tifm_alloc_adapter(dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM
? 4 : 2, &dev->dev);
if (!fm) {
rc = -ENOMEM;
goto err_out_int;
}
INIT_WORK(&fm->media_switcher, tifm_7xx1_switch_media);
fm->eject = tifm_7xx1_eject;
fm->has_ms_pif = tifm_7xx1_has_ms_pif;
pci_set_drvdata(dev, fm);
fm->addr = pci_ioremap_bar(dev, 0);
if (!fm->addr) {
rc = -ENODEV;
goto err_out_free;
}
rc = request_irq(dev->irq, tifm_7xx1_isr, IRQF_SHARED, DRIVER_NAME, fm);
if (rc)
goto err_out_unmap;
rc = tifm_add_adapter(fm);
if (rc)
goto err_out_irq;
writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
fm->addr + FM_SET_INTERRUPT_ENABLE);
return 0;
err_out_irq:
free_irq(dev->irq, fm);
err_out_unmap:
iounmap(fm->addr);
err_out_free:
tifm_free_adapter(fm);
err_out_int:
pci_intx(dev, 0);
pci_release_regions(dev);
err_out:
if (!pci_dev_busy)
pci_disable_device(dev);
return rc;
}
static void tifm_7xx1_remove(struct pci_dev *dev)
{
struct tifm_adapter *fm = pci_get_drvdata(dev);
int cnt;
fm->eject = tifm_7xx1_dummy_eject;
fm->has_ms_pif = tifm_7xx1_dummy_has_ms_pif;
writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
free_irq(dev->irq, fm);
tifm_remove_adapter(fm);
for (cnt = 0; cnt < fm->num_sockets; cnt++)
tifm_7xx1_sock_power_off(tifm_7xx1_sock_addr(fm->addr, cnt));
iounmap(fm->addr);
pci_intx(dev, 0);
pci_release_regions(dev);
pci_disable_device(dev);
tifm_free_adapter(fm);
}
static const struct pci_device_id tifm_7xx1_pci_tbl[] = {
{ PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11_FM, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 }, /* xx21 - the one I have */
{ PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12_FM, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX20_FM, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ }
};
static SIMPLE_DEV_PM_OPS(tifm_7xx1_pm_ops, tifm_7xx1_suspend, tifm_7xx1_resume);
static struct pci_driver tifm_7xx1_driver = {
.name = DRIVER_NAME,
.id_table = tifm_7xx1_pci_tbl,
.probe = tifm_7xx1_probe,
.remove = tifm_7xx1_remove,
.driver.pm = &tifm_7xx1_pm_ops,
};
module_pci_driver(tifm_7xx1_driver);
MODULE_AUTHOR("Alex Dubov");
MODULE_DESCRIPTION("TI FlashMedia host driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, tifm_7xx1_pci_tbl);
MODULE_VERSION(DRIVER_VERSION);
| linux-master | drivers/misc/tifm_7xx1.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* isl29020.c - Intersil ALS Driver
*
* Copyright (C) 2008 Intel Corp
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* Data sheet at: http://www.intersil.com/data/fn/fn6505.pdf
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
#include <linux/pm_runtime.h>
static DEFINE_MUTEX(mutex);
static ssize_t als_sensing_range_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
int val;
val = i2c_smbus_read_byte_data(client, 0x00);
if (val < 0)
return val;
return sprintf(buf, "%d000\n", 1 << (2 * (val & 3)));
}
static ssize_t als_lux_input_data_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
int ret_val, val;
unsigned long int lux;
int temp;
pm_runtime_get_sync(dev);
msleep(100);
mutex_lock(&mutex);
temp = i2c_smbus_read_byte_data(client, 0x02); /* MSB data */
if (temp < 0) {
pm_runtime_put_sync(dev);
mutex_unlock(&mutex);
return temp;
}
ret_val = i2c_smbus_read_byte_data(client, 0x01); /* LSB data */
mutex_unlock(&mutex);
if (ret_val < 0) {
pm_runtime_put_sync(dev);
return ret_val;
}
ret_val |= temp << 8;
val = i2c_smbus_read_byte_data(client, 0x00);
pm_runtime_put_sync(dev);
if (val < 0)
return val;
lux = ((((1 << (2 * (val & 3))))*1000) * ret_val) / 65536;
return sprintf(buf, "%ld\n", lux);
}
static ssize_t als_sensing_range_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
int ret_val;
unsigned long val;
ret_val = kstrtoul(buf, 10, &val);
if (ret_val)
return ret_val;
if (val < 1 || val > 64000)
return -EINVAL;
/* Pick the smallest sensor range that will meet our requirements */
if (val <= 1000)
val = 1;
else if (val <= 4000)
val = 2;
else if (val <= 16000)
val = 3;
else
val = 4;
ret_val = i2c_smbus_read_byte_data(client, 0x00);
if (ret_val < 0)
return ret_val;
ret_val &= 0xFC; /*reset the bit before setting them */
ret_val |= val - 1;
ret_val = i2c_smbus_write_byte_data(client, 0x00, ret_val);
if (ret_val < 0)
return ret_val;
return count;
}
static void als_set_power_state(struct i2c_client *client, int enable)
{
int ret_val;
ret_val = i2c_smbus_read_byte_data(client, 0x00);
if (ret_val < 0)
return;
if (enable)
ret_val |= 0x80;
else
ret_val &= 0x7F;
i2c_smbus_write_byte_data(client, 0x00, ret_val);
}
static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR,
als_sensing_range_show, als_sensing_range_store);
static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux_input_data_show, NULL);
static struct attribute *mid_att_als[] = {
&dev_attr_lux0_sensor_range.attr,
&dev_attr_lux0_input.attr,
NULL
};
static const struct attribute_group m_als_gr = {
.name = "isl29020",
.attrs = mid_att_als
};
static int als_set_default_config(struct i2c_client *client)
{
int retval;
retval = i2c_smbus_write_byte_data(client, 0x00, 0xc0);
if (retval < 0) {
dev_err(&client->dev, "default write failed.");
return retval;
}
return 0;
}
static int isl29020_probe(struct i2c_client *client)
{
int res;
res = als_set_default_config(client);
if (res < 0)
return res;
res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
if (res) {
dev_err(&client->dev, "isl29020: device create file failed\n");
return res;
}
dev_info(&client->dev, "%s isl29020: ALS chip found\n", client->name);
als_set_power_state(client, 0);
pm_runtime_enable(&client->dev);
return res;
}
static void isl29020_remove(struct i2c_client *client)
{
pm_runtime_disable(&client->dev);
sysfs_remove_group(&client->dev.kobj, &m_als_gr);
}
static const struct i2c_device_id isl29020_id[] = {
{ "isl29020", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, isl29020_id);
#ifdef CONFIG_PM
static int isl29020_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
als_set_power_state(client, 0);
return 0;
}
static int isl29020_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
als_set_power_state(client, 1);
return 0;
}
static const struct dev_pm_ops isl29020_pm_ops = {
.runtime_suspend = isl29020_runtime_suspend,
.runtime_resume = isl29020_runtime_resume,
};
#define ISL29020_PM_OPS (&isl29020_pm_ops)
#else /* CONFIG_PM */
#define ISL29020_PM_OPS NULL
#endif /* CONFIG_PM */
static struct i2c_driver isl29020_driver = {
.driver = {
.name = "isl29020",
.pm = ISL29020_PM_OPS,
},
.probe = isl29020_probe,
.remove = isl29020_remove,
.id_table = isl29020_id,
};
module_i2c_driver(isl29020_driver);
MODULE_AUTHOR("Kalhan Trisal <[email protected]>");
MODULE_DESCRIPTION("Intersil isl29020 ALS Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/isl29020.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
* Copyright (c) 2015, Sony Mobile Communications Inc.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/regmap.h>
#include <linux/platform_device.h>
struct qcom_coincell {
struct device *dev;
struct regmap *regmap;
u32 base_addr;
};
#define QCOM_COINCELL_REG_RSET 0x44
#define QCOM_COINCELL_REG_VSET 0x45
#define QCOM_COINCELL_REG_ENABLE 0x46
#define QCOM_COINCELL_ENABLE BIT(7)
static const int qcom_rset_map[] = { 2100, 1700, 1200, 800 };
static const int qcom_vset_map[] = { 2500, 3200, 3100, 3000 };
/* NOTE: for pm8921 and others, voltage of 2500 is 16 (10000b), not 0 */
/* if enable==0, rset and vset are ignored */
static int qcom_coincell_chgr_config(struct qcom_coincell *chgr, int rset,
int vset, bool enable)
{
int i, j, rc;
/* if disabling, just do that and skip other operations */
if (!enable)
return regmap_write(chgr->regmap,
chgr->base_addr + QCOM_COINCELL_REG_ENABLE, 0);
/* find index for current-limiting resistor */
for (i = 0; i < ARRAY_SIZE(qcom_rset_map); i++)
if (rset == qcom_rset_map[i])
break;
if (i >= ARRAY_SIZE(qcom_rset_map)) {
dev_err(chgr->dev, "invalid rset-ohms value %d\n", rset);
return -EINVAL;
}
/* find index for charge voltage */
for (j = 0; j < ARRAY_SIZE(qcom_vset_map); j++)
if (vset == qcom_vset_map[j])
break;
if (j >= ARRAY_SIZE(qcom_vset_map)) {
dev_err(chgr->dev, "invalid vset-millivolts value %d\n", vset);
return -EINVAL;
}
rc = regmap_write(chgr->regmap,
chgr->base_addr + QCOM_COINCELL_REG_RSET, i);
if (rc) {
/*
* This is mainly to flag a bad base_addr (reg) from dts.
* Other failures writing to the registers should be
* extremely rare, or indicative of problems that
* should be reported elsewhere (eg. spmi failure).
*/
dev_err(chgr->dev, "could not write to RSET register\n");
return rc;
}
rc = regmap_write(chgr->regmap,
chgr->base_addr + QCOM_COINCELL_REG_VSET, j);
if (rc)
return rc;
/* set 'enable' register */
return regmap_write(chgr->regmap,
chgr->base_addr + QCOM_COINCELL_REG_ENABLE,
QCOM_COINCELL_ENABLE);
}
static int qcom_coincell_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
struct qcom_coincell chgr;
u32 rset = 0;
u32 vset = 0;
bool enable;
int rc;
chgr.dev = &pdev->dev;
chgr.regmap = dev_get_regmap(pdev->dev.parent, NULL);
if (!chgr.regmap) {
dev_err(chgr.dev, "Unable to get regmap\n");
return -EINVAL;
}
rc = of_property_read_u32(node, "reg", &chgr.base_addr);
if (rc)
return rc;
enable = !of_property_read_bool(node, "qcom,charger-disable");
if (enable) {
rc = of_property_read_u32(node, "qcom,rset-ohms", &rset);
if (rc) {
dev_err(chgr.dev,
"can't find 'qcom,rset-ohms' in DT block");
return rc;
}
rc = of_property_read_u32(node, "qcom,vset-millivolts", &vset);
if (rc) {
dev_err(chgr.dev,
"can't find 'qcom,vset-millivolts' in DT block");
return rc;
}
}
return qcom_coincell_chgr_config(&chgr, rset, vset, enable);
}
static const struct of_device_id qcom_coincell_match_table[] = {
{ .compatible = "qcom,pm8941-coincell", },
{}
};
MODULE_DEVICE_TABLE(of, qcom_coincell_match_table);
static struct platform_driver qcom_coincell_driver = {
.driver = {
.name = "qcom-spmi-coincell",
.of_match_table = qcom_coincell_match_table,
},
.probe = qcom_coincell_probe,
};
module_platform_driver(qcom_coincell_driver);
MODULE_DESCRIPTION("Qualcomm PMIC coincell charger driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/qcom-coincell.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* A driver for the Integrated Circuits ICS932S401
* Copyright (C) 2008 IBM
*
* Author: Darrick J. Wong <[email protected]>
*/
#include <linux/module.h>
#include <linux/jiffies.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/slab.h>
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x69, I2C_CLIENT_END };
/* ICS932S401 registers */
#define ICS932S401_REG_CFG2 0x01
#define ICS932S401_CFG1_SPREAD 0x01
#define ICS932S401_REG_CFG7 0x06
#define ICS932S401_FS_MASK 0x07
#define ICS932S401_REG_VENDOR_REV 0x07
#define ICS932S401_VENDOR 1
#define ICS932S401_VENDOR_MASK 0x0F
#define ICS932S401_REV 4
#define ICS932S401_REV_SHIFT 4
#define ICS932S401_REG_DEVICE 0x09
#define ICS932S401_DEVICE 11
#define ICS932S401_REG_CTRL 0x0A
#define ICS932S401_MN_ENABLED 0x80
#define ICS932S401_CPU_ALT 0x04
#define ICS932S401_SRC_ALT 0x08
#define ICS932S401_REG_CPU_M_CTRL 0x0B
#define ICS932S401_M_MASK 0x3F
#define ICS932S401_REG_CPU_N_CTRL 0x0C
#define ICS932S401_REG_CPU_SPREAD1 0x0D
#define ICS932S401_REG_CPU_SPREAD2 0x0E
#define ICS932S401_SPREAD_MASK 0x7FFF
#define ICS932S401_REG_SRC_M_CTRL 0x0F
#define ICS932S401_REG_SRC_N_CTRL 0x10
#define ICS932S401_REG_SRC_SPREAD1 0x11
#define ICS932S401_REG_SRC_SPREAD2 0x12
#define ICS932S401_REG_CPU_DIVISOR 0x13
#define ICS932S401_CPU_DIVISOR_SHIFT 4
#define ICS932S401_REG_PCISRC_DIVISOR 0x14
#define ICS932S401_SRC_DIVISOR_MASK 0x0F
#define ICS932S401_PCI_DIVISOR_SHIFT 4
/* Base clock is 14.318MHz */
#define BASE_CLOCK 14318
#define NUM_REGS 21
#define NUM_MIRRORED_REGS 15
static int regs_to_copy[NUM_MIRRORED_REGS] = {
ICS932S401_REG_CFG2,
ICS932S401_REG_CFG7,
ICS932S401_REG_VENDOR_REV,
ICS932S401_REG_DEVICE,
ICS932S401_REG_CTRL,
ICS932S401_REG_CPU_M_CTRL,
ICS932S401_REG_CPU_N_CTRL,
ICS932S401_REG_CPU_SPREAD1,
ICS932S401_REG_CPU_SPREAD2,
ICS932S401_REG_SRC_M_CTRL,
ICS932S401_REG_SRC_N_CTRL,
ICS932S401_REG_SRC_SPREAD1,
ICS932S401_REG_SRC_SPREAD2,
ICS932S401_REG_CPU_DIVISOR,
ICS932S401_REG_PCISRC_DIVISOR,
};
/* How often do we reread sensors values? (In jiffies) */
#define SENSOR_REFRESH_INTERVAL (2 * HZ)
/* How often do we reread sensor limit values? (In jiffies) */
#define LIMIT_REFRESH_INTERVAL (60 * HZ)
struct ics932s401_data {
struct attribute_group attrs;
struct mutex lock;
char sensors_valid;
unsigned long sensors_last_updated; /* In jiffies */
u8 regs[NUM_REGS];
};
static int ics932s401_probe(struct i2c_client *client);
static int ics932s401_detect(struct i2c_client *client,
struct i2c_board_info *info);
static void ics932s401_remove(struct i2c_client *client);
static const struct i2c_device_id ics932s401_id[] = {
{ "ics932s401", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, ics932s401_id);
static struct i2c_driver ics932s401_driver = {
.class = I2C_CLASS_HWMON,
.driver = {
.name = "ics932s401",
},
.probe = ics932s401_probe,
.remove = ics932s401_remove,
.id_table = ics932s401_id,
.detect = ics932s401_detect,
.address_list = normal_i2c,
};
static struct ics932s401_data *ics932s401_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct ics932s401_data *data = i2c_get_clientdata(client);
unsigned long local_jiffies = jiffies;
int i, temp;
mutex_lock(&data->lock);
if (time_before(local_jiffies, data->sensors_last_updated +
SENSOR_REFRESH_INTERVAL)
&& data->sensors_valid)
goto out;
/*
* Each register must be read as a word and then right shifted 8 bits.
* Not really sure why this is; setting the "byte count programming"
* register to 1 does not fix this problem.
*/
for (i = 0; i < NUM_MIRRORED_REGS; i++) {
temp = i2c_smbus_read_word_data(client, regs_to_copy[i]);
if (temp < 0)
temp = 0;
data->regs[regs_to_copy[i]] = temp >> 8;
}
data->sensors_last_updated = local_jiffies;
data->sensors_valid = 1;
out:
mutex_unlock(&data->lock);
return data;
}
static ssize_t show_spread_enabled(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct ics932s401_data *data = ics932s401_update_device(dev);
if (data->regs[ICS932S401_REG_CFG2] & ICS932S401_CFG1_SPREAD)
return sprintf(buf, "1\n");
return sprintf(buf, "0\n");
}
/* bit to cpu khz map */
static const int fs_speeds[] = {
266666,
133333,
200000,
166666,
333333,
100000,
400000,
0,
};
/* clock divisor map */
static const int divisors[] = {2, 3, 5, 15, 4, 6, 10, 30, 8, 12, 20, 60, 16,
24, 40, 120};
/* Calculate CPU frequency from the M/N registers. */
static int calculate_cpu_freq(struct ics932s401_data *data)
{
int m, n, freq;
m = data->regs[ICS932S401_REG_CPU_M_CTRL] & ICS932S401_M_MASK;
n = data->regs[ICS932S401_REG_CPU_N_CTRL];
/* Pull in bits 8 & 9 from the M register */
n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x80) << 1;
n |= ((int)data->regs[ICS932S401_REG_CPU_M_CTRL] & 0x40) << 3;
freq = BASE_CLOCK * (n + 8) / (m + 2);
freq /= divisors[data->regs[ICS932S401_REG_CPU_DIVISOR] >>
ICS932S401_CPU_DIVISOR_SHIFT];
return freq;
}
static ssize_t show_cpu_clock(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct ics932s401_data *data = ics932s401_update_device(dev);
return sprintf(buf, "%d\n", calculate_cpu_freq(data));
}
static ssize_t show_cpu_clock_sel(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct ics932s401_data *data = ics932s401_update_device(dev);
int freq;
if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED)
freq = calculate_cpu_freq(data);
else {
/* Freq is neatly wrapped up for us */
int fid = data->regs[ICS932S401_REG_CFG7] & ICS932S401_FS_MASK;
freq = fs_speeds[fid];
if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT) {
switch (freq) {
case 166666:
freq = 160000;
break;
case 333333:
freq = 320000;
break;
}
}
}
return sprintf(buf, "%d\n", freq);
}
/* Calculate SRC frequency from the M/N registers. */
static int calculate_src_freq(struct ics932s401_data *data)
{
int m, n, freq;
m = data->regs[ICS932S401_REG_SRC_M_CTRL] & ICS932S401_M_MASK;
n = data->regs[ICS932S401_REG_SRC_N_CTRL];
/* Pull in bits 8 & 9 from the M register */
n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1;
n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3;
freq = BASE_CLOCK * (n + 8) / (m + 2);
freq /= divisors[data->regs[ICS932S401_REG_PCISRC_DIVISOR] &
ICS932S401_SRC_DIVISOR_MASK];
return freq;
}
static ssize_t show_src_clock(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct ics932s401_data *data = ics932s401_update_device(dev);
return sprintf(buf, "%d\n", calculate_src_freq(data));
}
static ssize_t show_src_clock_sel(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct ics932s401_data *data = ics932s401_update_device(dev);
int freq;
if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED)
freq = calculate_src_freq(data);
else
/* Freq is neatly wrapped up for us */
if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_CPU_ALT &&
data->regs[ICS932S401_REG_CTRL] & ICS932S401_SRC_ALT)
freq = 96000;
else
freq = 100000;
return sprintf(buf, "%d\n", freq);
}
/* Calculate PCI frequency from the SRC M/N registers. */
static int calculate_pci_freq(struct ics932s401_data *data)
{
int m, n, freq;
m = data->regs[ICS932S401_REG_SRC_M_CTRL] & ICS932S401_M_MASK;
n = data->regs[ICS932S401_REG_SRC_N_CTRL];
/* Pull in bits 8 & 9 from the M register */
n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x80) << 1;
n |= ((int)data->regs[ICS932S401_REG_SRC_M_CTRL] & 0x40) << 3;
freq = BASE_CLOCK * (n + 8) / (m + 2);
freq /= divisors[data->regs[ICS932S401_REG_PCISRC_DIVISOR] >>
ICS932S401_PCI_DIVISOR_SHIFT];
return freq;
}
static ssize_t show_pci_clock(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct ics932s401_data *data = ics932s401_update_device(dev);
return sprintf(buf, "%d\n", calculate_pci_freq(data));
}
static ssize_t show_pci_clock_sel(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct ics932s401_data *data = ics932s401_update_device(dev);
int freq;
if (data->regs[ICS932S401_REG_CTRL] & ICS932S401_MN_ENABLED)
freq = calculate_pci_freq(data);
else
freq = 33333;
return sprintf(buf, "%d\n", freq);
}
static ssize_t show_value(struct device *dev,
struct device_attribute *devattr,
char *buf);
static ssize_t show_spread(struct device *dev,
struct device_attribute *devattr,
char *buf);
static DEVICE_ATTR(spread_enabled, S_IRUGO, show_spread_enabled, NULL);
static DEVICE_ATTR(cpu_clock_selection, S_IRUGO, show_cpu_clock_sel, NULL);
static DEVICE_ATTR(cpu_clock, S_IRUGO, show_cpu_clock, NULL);
static DEVICE_ATTR(src_clock_selection, S_IRUGO, show_src_clock_sel, NULL);
static DEVICE_ATTR(src_clock, S_IRUGO, show_src_clock, NULL);
static DEVICE_ATTR(pci_clock_selection, S_IRUGO, show_pci_clock_sel, NULL);
static DEVICE_ATTR(pci_clock, S_IRUGO, show_pci_clock, NULL);
static DEVICE_ATTR(usb_clock, S_IRUGO, show_value, NULL);
static DEVICE_ATTR(ref_clock, S_IRUGO, show_value, NULL);
static DEVICE_ATTR(cpu_spread, S_IRUGO, show_spread, NULL);
static DEVICE_ATTR(src_spread, S_IRUGO, show_spread, NULL);
static struct attribute *ics932s401_attr[] = {
&dev_attr_spread_enabled.attr,
&dev_attr_cpu_clock_selection.attr,
&dev_attr_cpu_clock.attr,
&dev_attr_src_clock_selection.attr,
&dev_attr_src_clock.attr,
&dev_attr_pci_clock_selection.attr,
&dev_attr_pci_clock.attr,
&dev_attr_usb_clock.attr,
&dev_attr_ref_clock.attr,
&dev_attr_cpu_spread.attr,
&dev_attr_src_spread.attr,
NULL
};
static ssize_t show_value(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
int x;
if (devattr == &dev_attr_usb_clock)
x = 48000;
else if (devattr == &dev_attr_ref_clock)
x = BASE_CLOCK;
else
BUG();
return sprintf(buf, "%d\n", x);
}
static ssize_t show_spread(struct device *dev,
struct device_attribute *devattr,
char *buf)
{
struct ics932s401_data *data = ics932s401_update_device(dev);
int reg;
unsigned long val;
if (!(data->regs[ICS932S401_REG_CFG2] & ICS932S401_CFG1_SPREAD))
return sprintf(buf, "0%%\n");
if (devattr == &dev_attr_src_spread)
reg = ICS932S401_REG_SRC_SPREAD1;
else if (devattr == &dev_attr_cpu_spread)
reg = ICS932S401_REG_CPU_SPREAD1;
else
BUG();
val = data->regs[reg] | (data->regs[reg + 1] << 8);
val &= ICS932S401_SPREAD_MASK;
/* Scale 0..2^14 to -0.5. */
val = 500000 * val / 16384;
return sprintf(buf, "-0.%lu%%\n", val);
}
/* Return 0 if detection is successful, -ENODEV otherwise */
static int ics932s401_detect(struct i2c_client *client,
struct i2c_board_info *info)
{
struct i2c_adapter *adapter = client->adapter;
int vendor, device, revision;
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -ENODEV;
vendor = i2c_smbus_read_word_data(client, ICS932S401_REG_VENDOR_REV);
vendor >>= 8;
revision = vendor >> ICS932S401_REV_SHIFT;
vendor &= ICS932S401_VENDOR_MASK;
if (vendor != ICS932S401_VENDOR)
return -ENODEV;
device = i2c_smbus_read_word_data(client, ICS932S401_REG_DEVICE);
device >>= 8;
if (device != ICS932S401_DEVICE)
return -ENODEV;
if (revision != ICS932S401_REV)
dev_info(&adapter->dev, "Unknown revision %d\n", revision);
strscpy(info->type, "ics932s401", I2C_NAME_SIZE);
return 0;
}
static int ics932s401_probe(struct i2c_client *client)
{
struct ics932s401_data *data;
int err;
data = kzalloc(sizeof(struct ics932s401_data), GFP_KERNEL);
if (!data) {
err = -ENOMEM;
goto exit;
}
i2c_set_clientdata(client, data);
mutex_init(&data->lock);
dev_info(&client->dev, "%s chip found\n", client->name);
/* Register sysfs hooks */
data->attrs.attrs = ics932s401_attr;
err = sysfs_create_group(&client->dev.kobj, &data->attrs);
if (err)
goto exit_free;
return 0;
exit_free:
kfree(data);
exit:
return err;
}
static void ics932s401_remove(struct i2c_client *client)
{
struct ics932s401_data *data = i2c_get_clientdata(client);
sysfs_remove_group(&client->dev.kobj, &data->attrs);
kfree(data);
}
module_i2c_driver(ics932s401_driver);
MODULE_AUTHOR("Darrick J. Wong <[email protected]>");
MODULE_DESCRIPTION("ICS932S401 driver");
MODULE_LICENSE("GPL");
/* IBM IntelliStation Z30 */
MODULE_ALIAS("dmi:bvnIBM:*:rn9228:*");
MODULE_ALIAS("dmi:bvnIBM:*:rn9232:*");
/* IBM x3650/x3550 */
MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3650*");
MODULE_ALIAS("dmi:bvnIBM:*:pnIBMSystemx3550*");
| linux-master | drivers/misc/ics932s401.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* kgdbts is a test suite for kgdb for the sole purpose of validating
* that key pieces of the kgdb internals are working properly such as
* HW/SW breakpoints, single stepping, and NMI.
*
* Created by: Jason Wessel <[email protected]>
*
* Copyright (c) 2008 Wind River Systems, Inc.
*/
/* Information about the kgdb test suite.
* -------------------------------------
*
* The kgdb test suite is designed as a KGDB I/O module which
* simulates the communications that a debugger would have with kgdb.
* The tests are broken up in to a line by line and referenced here as
* a "get" which is kgdb requesting input and "put" which is kgdb
* sending a response.
*
* The kgdb suite can be invoked from the kernel command line
* arguments system or executed dynamically at run time. The test
* suite uses the variable "kgdbts" to obtain the information about
* which tests to run and to configure the verbosity level. The
* following are the various characters you can use with the kgdbts=
* line:
*
* When using the "kgdbts=" you only choose one of the following core
* test types:
* A = Run all the core tests silently
* V1 = Run all the core tests with minimal output
* V2 = Run all the core tests in debug mode
*
* You can also specify optional tests:
* N## = Go to sleep with interrupts of for ## seconds
* to test the HW NMI watchdog
* F## = Break at kernel_clone for ## iterations
* S## = Break at sys_open for ## iterations
* I## = Run the single step test ## iterations
*
* NOTE: that the kernel_clone and sys_open tests are mutually exclusive.
*
* To invoke the kgdb test suite from boot you use a kernel start
* argument as follows:
* kgdbts=V1 kgdbwait
* Or if you wanted to perform the NMI test for 6 seconds and kernel_clone
* test for 100 forks, you could use:
* kgdbts=V1N6F100 kgdbwait
*
* The test suite can also be invoked at run time with:
* echo kgdbts=V1N6F100 > /sys/module/kgdbts/parameters/kgdbts
* Or as another example:
* echo kgdbts=V2 > /sys/module/kgdbts/parameters/kgdbts
*
* When developing a new kgdb arch specific implementation or
* using these tests for the purpose of regression testing,
* several invocations are required.
*
* 1) Boot with the test suite enabled by using the kernel arguments
* "kgdbts=V1F100 kgdbwait"
* ## If kgdb arch specific implementation has NMI use
* "kgdbts=V1N6F100
*
* 2) After the system boot run the basic test.
* echo kgdbts=V1 > /sys/module/kgdbts/parameters/kgdbts
*
* 3) Run the concurrency tests. It is best to use n+1
* while loops where n is the number of cpus you have
* in your system. The example below uses only two
* loops.
*
* ## This tests break points on sys_open
* while [ 1 ] ; do find / > /dev/null 2>&1 ; done &
* while [ 1 ] ; do find / > /dev/null 2>&1 ; done &
* echo kgdbts=V1S10000 > /sys/module/kgdbts/parameters/kgdbts
* fg # and hit control-c
* fg # and hit control-c
* ## This tests break points on kernel_clone
* while [ 1 ] ; do date > /dev/null ; done &
* while [ 1 ] ; do date > /dev/null ; done &
* echo kgdbts=V1F1000 > /sys/module/kgdbts/parameters/kgdbts
* fg # and hit control-c
*
*/
#include <linux/kernel.h>
#include <linux/kgdb.h>
#include <linux/ctype.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/nmi.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/sched/task.h>
#include <linux/kallsyms.h>
#include <asm/sections.h>
#define v1printk(a...) do { \
if (verbose) \
printk(KERN_INFO a); \
} while (0)
#define v2printk(a...) do { \
if (verbose > 1) { \
printk(KERN_INFO a); \
} \
touch_nmi_watchdog(); \
} while (0)
#define eprintk(a...) do { \
printk(KERN_ERR a); \
WARN_ON(1); \
} while (0)
#define MAX_CONFIG_LEN 40
static struct kgdb_io kgdbts_io_ops;
static char get_buf[BUFMAX];
static int get_buf_cnt;
static char put_buf[BUFMAX];
static int put_buf_cnt;
static char scratch_buf[BUFMAX];
static int verbose;
static int repeat_test;
static int test_complete;
static int send_ack;
static int final_ack;
static int force_hwbrks;
static int hwbreaks_ok;
static int hw_break_val;
static int hw_break_val2;
static int cont_instead_of_sstep;
static unsigned long cont_thread_id;
static unsigned long sstep_thread_id;
#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC)
static int arch_needs_sstep_emulation = 1;
#else
static int arch_needs_sstep_emulation;
#endif
static unsigned long cont_addr;
static unsigned long sstep_addr;
static int restart_from_top_after_write;
static int sstep_state;
/* Storage for the registers, in GDB format. */
static unsigned long kgdbts_gdb_regs[(NUMREGBYTES +
sizeof(unsigned long) - 1) /
sizeof(unsigned long)];
static struct pt_regs kgdbts_regs;
/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
static int configured = -1;
#ifdef CONFIG_KGDB_TESTS_BOOT_STRING
static char config[MAX_CONFIG_LEN] = CONFIG_KGDB_TESTS_BOOT_STRING;
#else
static char config[MAX_CONFIG_LEN];
#endif
static struct kparam_string kps = {
.string = config,
.maxlen = MAX_CONFIG_LEN,
};
static void fill_get_buf(char *buf);
struct test_struct {
char *get;
char *put;
void (*get_handler)(char *);
int (*put_handler)(char *, char *);
};
struct test_state {
char *name;
struct test_struct *tst;
int idx;
int (*run_test) (int, int);
int (*validate_put) (char *);
};
static struct test_state ts;
static int kgdbts_unreg_thread(void *ptr)
{
/* Wait until the tests are complete and then ungresiter the I/O
* driver.
*/
while (!final_ack)
msleep_interruptible(1500);
/* Pause for any other threads to exit after final ack. */
msleep_interruptible(1000);
if (configured)
kgdb_unregister_io_module(&kgdbts_io_ops);
configured = 0;
return 0;
}
/* This is noinline such that it can be used for a single location to
* place a breakpoint
*/
static noinline void kgdbts_break_test(void)
{
v2printk("kgdbts: breakpoint complete\n");
}
/*
* This is a cached wrapper for kallsyms_lookup_name().
*
* The cache is a big win for several tests. For example it more the doubles
* the cycles per second during the sys_open test. This is not theoretic,
* the performance improvement shows up at human scale, especially when
* testing using emulators.
*
* Obviously neither re-entrant nor thread-safe but that is OK since it
* can only be called from the debug trap (and therefore all other CPUs
* are halted).
*/
static unsigned long lookup_addr(char *arg)
{
static char cached_arg[KSYM_NAME_LEN];
static unsigned long cached_addr;
if (strcmp(arg, cached_arg)) {
strscpy(cached_arg, arg, KSYM_NAME_LEN);
cached_addr = kallsyms_lookup_name(arg);
}
return (unsigned long)dereference_function_descriptor(
(void *)cached_addr);
}
static void break_helper(char *bp_type, char *arg, unsigned long vaddr)
{
unsigned long addr;
if (arg)
addr = lookup_addr(arg);
else
addr = vaddr;
sprintf(scratch_buf, "%s,%lx,%i", bp_type, addr,
BREAK_INSTR_SIZE);
fill_get_buf(scratch_buf);
}
static void sw_break(char *arg)
{
break_helper(force_hwbrks ? "Z1" : "Z0", arg, 0);
}
static void sw_rem_break(char *arg)
{
break_helper(force_hwbrks ? "z1" : "z0", arg, 0);
}
static void hw_break(char *arg)
{
break_helper("Z1", arg, 0);
}
static void hw_rem_break(char *arg)
{
break_helper("z1", arg, 0);
}
static void hw_write_break(char *arg)
{
break_helper("Z2", arg, 0);
}
static void hw_rem_write_break(char *arg)
{
break_helper("z2", arg, 0);
}
static void hw_access_break(char *arg)
{
break_helper("Z4", arg, 0);
}
static void hw_rem_access_break(char *arg)
{
break_helper("z4", arg, 0);
}
static void hw_break_val_access(void)
{
hw_break_val2 = hw_break_val;
}
static void hw_break_val_write(void)
{
hw_break_val++;
}
static int get_thread_id_continue(char *put_str, char *arg)
{
char *ptr = &put_str[11];
if (put_str[1] != 'T' || put_str[2] != '0')
return 1;
kgdb_hex2long(&ptr, &cont_thread_id);
return 0;
}
static int check_and_rewind_pc(char *put_str, char *arg)
{
unsigned long addr = lookup_addr(arg);
unsigned long ip;
int offset = 0;
kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs,
NUMREGBYTES);
gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
ip = instruction_pointer(&kgdbts_regs);
v2printk("Stopped at IP: %lx\n", ip);
#ifdef GDB_ADJUSTS_BREAK_OFFSET
/* On some arches, a breakpoint stop requires it to be decremented */
if (addr + BREAK_INSTR_SIZE == ip)
offset = -BREAK_INSTR_SIZE;
#endif
if (arch_needs_sstep_emulation && sstep_addr &&
ip + offset == sstep_addr &&
((!strcmp(arg, "do_sys_openat2") || !strcmp(arg, "kernel_clone")))) {
/* This is special case for emulated single step */
v2printk("Emul: rewind hit single step bp\n");
restart_from_top_after_write = 1;
} else if (strcmp(arg, "silent") && ip + offset != addr) {
eprintk("kgdbts: BP mismatch %lx expected %lx\n",
ip + offset, addr);
return 1;
}
/* Readjust the instruction pointer if needed */
ip += offset;
cont_addr = ip;
#ifdef GDB_ADJUSTS_BREAK_OFFSET
instruction_pointer_set(&kgdbts_regs, ip);
#endif
return 0;
}
static int check_single_step(char *put_str, char *arg)
{
unsigned long addr = lookup_addr(arg);
static int matched_id;
/*
* From an arch indepent point of view the instruction pointer
* should be on a different instruction
*/
kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs,
NUMREGBYTES);
gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
v2printk("Singlestep stopped at IP: %lx\n",
instruction_pointer(&kgdbts_regs));
if (sstep_thread_id != cont_thread_id) {
/*
* Ensure we stopped in the same thread id as before, else the
* debugger should continue until the original thread that was
* single stepped is scheduled again, emulating gdb's behavior.
*/
v2printk("ThrID does not match: %lx\n", cont_thread_id);
if (arch_needs_sstep_emulation) {
if (matched_id &&
instruction_pointer(&kgdbts_regs) != addr)
goto continue_test;
matched_id++;
ts.idx -= 2;
sstep_state = 0;
return 0;
}
cont_instead_of_sstep = 1;
ts.idx -= 4;
return 0;
}
continue_test:
matched_id = 0;
if (instruction_pointer(&kgdbts_regs) == addr) {
eprintk("kgdbts: SingleStep failed at %lx\n",
instruction_pointer(&kgdbts_regs));
return 1;
}
return 0;
}
static void write_regs(char *arg)
{
memset(scratch_buf, 0, sizeof(scratch_buf));
scratch_buf[0] = 'G';
pt_regs_to_gdb_regs(kgdbts_gdb_regs, &kgdbts_regs);
kgdb_mem2hex((char *)kgdbts_gdb_regs, &scratch_buf[1], NUMREGBYTES);
fill_get_buf(scratch_buf);
}
static void skip_back_repeat_test(char *arg)
{
int go_back = simple_strtol(arg, NULL, 10);
repeat_test--;
if (repeat_test <= 0) {
ts.idx++;
} else {
if (repeat_test % 100 == 0)
v1printk("kgdbts:RUN ... %d remaining\n", repeat_test);
ts.idx -= go_back;
}
fill_get_buf(ts.tst[ts.idx].get);
}
static int got_break(char *put_str, char *arg)
{
test_complete = 1;
if (!strncmp(put_str+1, arg, 2)) {
if (!strncmp(arg, "T0", 2))
test_complete = 2;
return 0;
}
return 1;
}
static void get_cont_catch(char *arg)
{
/* Always send detach because the test is completed at this point */
fill_get_buf("D");
}
static int put_cont_catch(char *put_str, char *arg)
{
/* This is at the end of the test and we catch any and all input */
v2printk("kgdbts: cleanup task: %lx\n", sstep_thread_id);
ts.idx--;
return 0;
}
static int emul_reset(char *put_str, char *arg)
{
if (strncmp(put_str, "$OK", 3))
return 1;
if (restart_from_top_after_write) {
restart_from_top_after_write = 0;
ts.idx = -1;
}
return 0;
}
static void emul_sstep_get(char *arg)
{
if (!arch_needs_sstep_emulation) {
if (cont_instead_of_sstep) {
cont_instead_of_sstep = 0;
fill_get_buf("c");
} else {
fill_get_buf(arg);
}
return;
}
switch (sstep_state) {
case 0:
v2printk("Emulate single step\n");
/* Start by looking at the current PC */
fill_get_buf("g");
break;
case 1:
/* set breakpoint */
break_helper("Z0", NULL, sstep_addr);
break;
case 2:
/* Continue */
fill_get_buf("c");
break;
case 3:
/* Clear breakpoint */
break_helper("z0", NULL, sstep_addr);
break;
default:
eprintk("kgdbts: ERROR failed sstep get emulation\n");
}
sstep_state++;
}
static int emul_sstep_put(char *put_str, char *arg)
{
if (!arch_needs_sstep_emulation) {
char *ptr = &put_str[11];
if (put_str[1] != 'T' || put_str[2] != '0')
return 1;
kgdb_hex2long(&ptr, &sstep_thread_id);
return 0;
}
switch (sstep_state) {
case 1:
/* validate the "g" packet to get the IP */
kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs,
NUMREGBYTES);
gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs);
v2printk("Stopped at IP: %lx\n",
instruction_pointer(&kgdbts_regs));
/* Want to stop at IP + break instruction size by default */
sstep_addr = cont_addr + BREAK_INSTR_SIZE;
break;
case 2:
if (strncmp(put_str, "$OK", 3)) {
eprintk("kgdbts: failed sstep break set\n");
return 1;
}
break;
case 3:
if (strncmp(put_str, "$T0", 3)) {
eprintk("kgdbts: failed continue sstep\n");
return 1;
} else {
char *ptr = &put_str[11];
kgdb_hex2long(&ptr, &sstep_thread_id);
}
break;
case 4:
if (strncmp(put_str, "$OK", 3)) {
eprintk("kgdbts: failed sstep break unset\n");
return 1;
}
/* Single step is complete so continue on! */
sstep_state = 0;
return 0;
default:
eprintk("kgdbts: ERROR failed sstep put emulation\n");
}
/* Continue on the same test line until emulation is complete */
ts.idx--;
return 0;
}
static int final_ack_set(char *put_str, char *arg)
{
if (strncmp(put_str+1, arg, 2))
return 1;
final_ack = 1;
return 0;
}
/*
* Test to plant a breakpoint and detach, which should clear out the
* breakpoint and restore the original instruction.
*/
static struct test_struct plant_and_detach_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
{ "D", "OK" }, /* Detach */
{ "", "" },
};
/*
* Simple test to write in a software breakpoint, check for the
* correct stop location and detach.
*/
static struct test_struct sw_breakpoint_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
{ "c", "T0*", }, /* Continue */
{ "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs },
{ "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
{ "D", "OK" }, /* Detach */
{ "D", "OK", NULL, got_break }, /* On success we made it here */
{ "", "" },
};
/*
* Test a known bad memory read location to test the fault handler and
* read bytes 1-8 at the bad address
*/
static struct test_struct bad_read_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "m0,1", "E*" }, /* read 1 byte at address 1 */
{ "m0,2", "E*" }, /* read 1 byte at address 2 */
{ "m0,3", "E*" }, /* read 1 byte at address 3 */
{ "m0,4", "E*" }, /* read 1 byte at address 4 */
{ "m0,5", "E*" }, /* read 1 byte at address 5 */
{ "m0,6", "E*" }, /* read 1 byte at address 6 */
{ "m0,7", "E*" }, /* read 1 byte at address 7 */
{ "m0,8", "E*" }, /* read 1 byte at address 8 */
{ "D", "OK" }, /* Detach which removes all breakpoints and continues */
{ "", "" },
};
/*
* Test for hitting a breakpoint, remove it, single step, plant it
* again and detach.
*/
static struct test_struct singlestep_break_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
{ "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
{ "kgdbts_break_test", "OK", sw_rem_break }, /*remove breakpoint */
{ "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs }, /* Write registers */
{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
{ "g", "kgdbts_break_test", NULL, check_single_step },
{ "kgdbts_break_test", "OK", sw_break, }, /* set sw breakpoint */
{ "c", "T0*", }, /* Continue */
{ "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs }, /* Write registers */
{ "D", "OK" }, /* Remove all breakpoints and continues */
{ "", "" },
};
/*
* Test for hitting a breakpoint at kernel_clone for what ever the number
* of iterations required by the variable repeat_test.
*/
static struct test_struct do_kernel_clone_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "kernel_clone", "OK", sw_break, }, /* set sw breakpoint */
{ "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
{ "kernel_clone", "OK", sw_rem_break }, /*remove breakpoint */
{ "g", "kernel_clone", NULL, check_and_rewind_pc }, /* check location */
{ "write", "OK", write_regs, emul_reset }, /* Write registers */
{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
{ "g", "kernel_clone", NULL, check_single_step },
{ "kernel_clone", "OK", sw_break, }, /* set sw breakpoint */
{ "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
{ "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
{ "", "", get_cont_catch, put_cont_catch },
};
/* Test for hitting a breakpoint at sys_open for what ever the number
* of iterations required by the variable repeat_test.
*/
static struct test_struct sys_open_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "do_sys_openat2", "OK", sw_break, }, /* set sw breakpoint */
{ "c", "T0*", NULL, get_thread_id_continue }, /* Continue */
{ "do_sys_openat2", "OK", sw_rem_break }, /*remove breakpoint */
{ "g", "do_sys_openat2", NULL, check_and_rewind_pc }, /* check location */
{ "write", "OK", write_regs, emul_reset }, /* Write registers */
{ "s", "T0*", emul_sstep_get, emul_sstep_put }, /* Single step */
{ "g", "do_sys_openat2", NULL, check_single_step },
{ "do_sys_openat2", "OK", sw_break, }, /* set sw breakpoint */
{ "7", "T0*", skip_back_repeat_test }, /* Loop based on repeat_test */
{ "D", "OK", NULL, final_ack_set }, /* detach and unregister I/O */
{ "", "", get_cont_catch, put_cont_catch },
};
/*
* Test for hitting a simple hw breakpoint
*/
static struct test_struct hw_breakpoint_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "kgdbts_break_test", "OK", hw_break, }, /* set hw breakpoint */
{ "c", "T0*", }, /* Continue */
{ "g", "kgdbts_break_test", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs },
{ "kgdbts_break_test", "OK", hw_rem_break }, /*remove breakpoint */
{ "D", "OK" }, /* Detach */
{ "D", "OK", NULL, got_break }, /* On success we made it here */
{ "", "" },
};
/*
* Test for hitting a hw write breakpoint
*/
static struct test_struct hw_write_break_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "hw_break_val", "OK", hw_write_break, }, /* set hw breakpoint */
{ "c", "T0*", NULL, got_break }, /* Continue */
{ "g", "silent", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs },
{ "hw_break_val", "OK", hw_rem_write_break }, /*remove breakpoint */
{ "D", "OK" }, /* Detach */
{ "D", "OK", NULL, got_break }, /* On success we made it here */
{ "", "" },
};
/*
* Test for hitting a hw access breakpoint
*/
static struct test_struct hw_access_break_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "hw_break_val", "OK", hw_access_break, }, /* set hw breakpoint */
{ "c", "T0*", NULL, got_break }, /* Continue */
{ "g", "silent", NULL, check_and_rewind_pc },
{ "write", "OK", write_regs },
{ "hw_break_val", "OK", hw_rem_access_break }, /*remove breakpoint */
{ "D", "OK" }, /* Detach */
{ "D", "OK", NULL, got_break }, /* On success we made it here */
{ "", "" },
};
/*
* Test for hitting a hw access breakpoint
*/
static struct test_struct nmi_sleep_test[] = {
{ "?", "S0*" }, /* Clear break points */
{ "c", "T0*", NULL, got_break }, /* Continue */
{ "D", "OK" }, /* Detach */
{ "D", "OK", NULL, got_break }, /* On success we made it here */
{ "", "" },
};
static void fill_get_buf(char *buf)
{
unsigned char checksum = 0;
int count = 0;
char ch;
strcpy(get_buf, "$");
strcat(get_buf, buf);
while ((ch = buf[count])) {
checksum += ch;
count++;
}
strcat(get_buf, "#");
get_buf[count + 2] = hex_asc_hi(checksum);
get_buf[count + 3] = hex_asc_lo(checksum);
get_buf[count + 4] = '\0';
v2printk("get%i: %s\n", ts.idx, get_buf);
}
static int validate_simple_test(char *put_str)
{
char *chk_str;
if (ts.tst[ts.idx].put_handler)
return ts.tst[ts.idx].put_handler(put_str,
ts.tst[ts.idx].put);
chk_str = ts.tst[ts.idx].put;
if (*put_str == '$')
put_str++;
while (*chk_str != '\0' && *put_str != '\0') {
/* If someone does a * to match the rest of the string, allow
* it, or stop if the received string is complete.
*/
if (*put_str == '#' || *chk_str == '*')
return 0;
if (*put_str != *chk_str)
return 1;
chk_str++;
put_str++;
}
if (*chk_str == '\0' && (*put_str == '\0' || *put_str == '#'))
return 0;
return 1;
}
static int run_simple_test(int is_get_char, int chr)
{
int ret = 0;
if (is_get_char) {
/* Send an ACK on the get if a prior put completed and set the
* send ack variable
*/
if (send_ack) {
send_ack = 0;
return '+';
}
/* On the first get char, fill the transmit buffer and then
* take from the get_string.
*/
if (get_buf_cnt == 0) {
if (ts.tst[ts.idx].get_handler)
ts.tst[ts.idx].get_handler(ts.tst[ts.idx].get);
else
fill_get_buf(ts.tst[ts.idx].get);
}
if (get_buf[get_buf_cnt] == '\0') {
eprintk("kgdbts: ERROR GET: EOB on '%s' at %i\n",
ts.name, ts.idx);
get_buf_cnt = 0;
fill_get_buf("D");
}
ret = get_buf[get_buf_cnt];
get_buf_cnt++;
return ret;
}
/* This callback is a put char which is when kgdb sends data to
* this I/O module.
*/
if (ts.tst[ts.idx].get[0] == '\0' && ts.tst[ts.idx].put[0] == '\0' &&
!ts.tst[ts.idx].get_handler) {
eprintk("kgdbts: ERROR: beyond end of test on"
" '%s' line %i\n", ts.name, ts.idx);
return 0;
}
if (put_buf_cnt >= BUFMAX) {
eprintk("kgdbts: ERROR: put buffer overflow on"
" '%s' line %i\n", ts.name, ts.idx);
put_buf_cnt = 0;
return 0;
}
/* Ignore everything until the first valid packet start '$' */
if (put_buf_cnt == 0 && chr != '$')
return 0;
put_buf[put_buf_cnt] = chr;
put_buf_cnt++;
/* End of packet == #XX so look for the '#' */
if (put_buf_cnt > 3 && put_buf[put_buf_cnt - 3] == '#') {
if (put_buf_cnt >= BUFMAX) {
eprintk("kgdbts: ERROR: put buffer overflow on"
" '%s' line %i\n", ts.name, ts.idx);
put_buf_cnt = 0;
return 0;
}
put_buf[put_buf_cnt] = '\0';
v2printk("put%i: %s\n", ts.idx, put_buf);
/* Trigger check here */
if (ts.validate_put && ts.validate_put(put_buf)) {
eprintk("kgdbts: ERROR PUT: end of test "
"buffer on '%s' line %i expected %s got %s\n",
ts.name, ts.idx, ts.tst[ts.idx].put, put_buf);
}
ts.idx++;
put_buf_cnt = 0;
get_buf_cnt = 0;
send_ack = 1;
}
return 0;
}
static void init_simple_test(void)
{
memset(&ts, 0, sizeof(ts));
ts.run_test = run_simple_test;
ts.validate_put = validate_simple_test;
}
static void run_plant_and_detach_test(int is_early)
{
char before[BREAK_INSTR_SIZE];
char after[BREAK_INSTR_SIZE];
copy_from_kernel_nofault(before, (char *)kgdbts_break_test,
BREAK_INSTR_SIZE);
init_simple_test();
ts.tst = plant_and_detach_test;
ts.name = "plant_and_detach_test";
/* Activate test with initial breakpoint */
if (!is_early)
kgdb_breakpoint();
copy_from_kernel_nofault(after, (char *)kgdbts_break_test,
BREAK_INSTR_SIZE);
if (memcmp(before, after, BREAK_INSTR_SIZE)) {
printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
panic("kgdb memory corruption");
}
/* complete the detach test */
if (!is_early)
kgdbts_break_test();
}
static void run_breakpoint_test(int is_hw_breakpoint)
{
test_complete = 0;
init_simple_test();
if (is_hw_breakpoint) {
ts.tst = hw_breakpoint_test;
ts.name = "hw_breakpoint_test";
} else {
ts.tst = sw_breakpoint_test;
ts.name = "sw_breakpoint_test";
}
/* Activate test with initial breakpoint */
kgdb_breakpoint();
/* run code with the break point in it */
kgdbts_break_test();
kgdb_breakpoint();
if (test_complete)
return;
eprintk("kgdbts: ERROR %s test failed\n", ts.name);
if (is_hw_breakpoint)
hwbreaks_ok = 0;
}
static void run_hw_break_test(int is_write_test)
{
test_complete = 0;
init_simple_test();
if (is_write_test) {
ts.tst = hw_write_break_test;
ts.name = "hw_write_break_test";
} else {
ts.tst = hw_access_break_test;
ts.name = "hw_access_break_test";
}
/* Activate test with initial breakpoint */
kgdb_breakpoint();
hw_break_val_access();
if (is_write_test) {
if (test_complete == 2) {
eprintk("kgdbts: ERROR %s broke on access\n",
ts.name);
hwbreaks_ok = 0;
}
hw_break_val_write();
}
kgdb_breakpoint();
if (test_complete == 1)
return;
eprintk("kgdbts: ERROR %s test failed\n", ts.name);
hwbreaks_ok = 0;
}
static void run_nmi_sleep_test(int nmi_sleep)
{
unsigned long flags;
init_simple_test();
ts.tst = nmi_sleep_test;
ts.name = "nmi_sleep_test";
/* Activate test with initial breakpoint */
kgdb_breakpoint();
local_irq_save(flags);
mdelay(nmi_sleep*1000);
touch_nmi_watchdog();
local_irq_restore(flags);
if (test_complete != 2)
eprintk("kgdbts: ERROR nmi_test did not hit nmi\n");
kgdb_breakpoint();
if (test_complete == 1)
return;
eprintk("kgdbts: ERROR %s test failed\n", ts.name);
}
static void run_bad_read_test(void)
{
init_simple_test();
ts.tst = bad_read_test;
ts.name = "bad_read_test";
/* Activate test with initial breakpoint */
kgdb_breakpoint();
}
static void run_kernel_clone_test(void)
{
init_simple_test();
ts.tst = do_kernel_clone_test;
ts.name = "do_kernel_clone_test";
/* Activate test with initial breakpoint */
kgdb_breakpoint();
}
static void run_sys_open_test(void)
{
init_simple_test();
ts.tst = sys_open_test;
ts.name = "sys_open_test";
/* Activate test with initial breakpoint */
kgdb_breakpoint();
}
static void run_singlestep_break_test(void)
{
init_simple_test();
ts.tst = singlestep_break_test;
ts.name = "singlestep_breakpoint_test";
/* Activate test with initial breakpoint */
kgdb_breakpoint();
kgdbts_break_test();
kgdbts_break_test();
}
static void kgdbts_run_tests(void)
{
char *ptr;
int clone_test = 0;
int do_sys_open_test = 0;
int sstep_test = 1000;
int nmi_sleep = 0;
int i;
verbose = 0;
if (strstr(config, "V1"))
verbose = 1;
if (strstr(config, "V2"))
verbose = 2;
ptr = strchr(config, 'F');
if (ptr)
clone_test = simple_strtol(ptr + 1, NULL, 10);
ptr = strchr(config, 'S');
if (ptr)
do_sys_open_test = simple_strtol(ptr + 1, NULL, 10);
ptr = strchr(config, 'N');
if (ptr)
nmi_sleep = simple_strtol(ptr+1, NULL, 10);
ptr = strchr(config, 'I');
if (ptr)
sstep_test = simple_strtol(ptr+1, NULL, 10);
/* All HW break point tests */
if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) {
hwbreaks_ok = 1;
v1printk("kgdbts:RUN hw breakpoint test\n");
run_breakpoint_test(1);
v1printk("kgdbts:RUN hw write breakpoint test\n");
run_hw_break_test(1);
v1printk("kgdbts:RUN access write breakpoint test\n");
run_hw_break_test(0);
}
/* required internal KGDB tests */
v1printk("kgdbts:RUN plant and detach test\n");
run_plant_and_detach_test(0);
v1printk("kgdbts:RUN sw breakpoint test\n");
run_breakpoint_test(0);
v1printk("kgdbts:RUN bad memory access test\n");
run_bad_read_test();
v1printk("kgdbts:RUN singlestep test %i iterations\n", sstep_test);
for (i = 0; i < sstep_test; i++) {
run_singlestep_break_test();
if (i % 100 == 0)
v1printk("kgdbts:RUN singlestep [%i/%i]\n",
i, sstep_test);
}
/* ===Optional tests=== */
if (nmi_sleep) {
v1printk("kgdbts:RUN NMI sleep %i seconds test\n", nmi_sleep);
run_nmi_sleep_test(nmi_sleep);
}
/* If the kernel_clone test is run it will be the last test that is
* executed because a kernel thread will be spawned at the very
* end to unregister the debug hooks.
*/
if (clone_test) {
repeat_test = clone_test;
printk(KERN_INFO "kgdbts:RUN kernel_clone for %i breakpoints\n",
repeat_test);
kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg");
run_kernel_clone_test();
return;
}
/* If the sys_open test is run it will be the last test that is
* executed because a kernel thread will be spawned at the very
* end to unregister the debug hooks.
*/
if (do_sys_open_test) {
repeat_test = do_sys_open_test;
printk(KERN_INFO "kgdbts:RUN sys_open for %i breakpoints\n",
repeat_test);
kthread_run(kgdbts_unreg_thread, NULL, "kgdbts_unreg");
run_sys_open_test();
return;
}
/* Shutdown and unregister */
kgdb_unregister_io_module(&kgdbts_io_ops);
configured = 0;
}
static int kgdbts_option_setup(char *opt)
{
if (strlen(opt) >= MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdbts: config string too long\n");
return 1;
}
strcpy(config, opt);
return 1;
}
__setup("kgdbts=", kgdbts_option_setup);
static int configure_kgdbts(void)
{
int err = 0;
if (!strlen(config) || isspace(config[0]))
goto noconfig;
final_ack = 0;
run_plant_and_detach_test(1);
err = kgdb_register_io_module(&kgdbts_io_ops);
if (err) {
configured = 0;
return err;
}
configured = 1;
kgdbts_run_tests();
return err;
noconfig:
config[0] = 0;
configured = 0;
return err;
}
static int __init init_kgdbts(void)
{
/* Already configured? */
if (configured == 1)
return 0;
return configure_kgdbts();
}
device_initcall(init_kgdbts);
static int kgdbts_get_char(void)
{
int val = 0;
if (ts.run_test)
val = ts.run_test(1, 0);
return val;
}
static void kgdbts_put_char(u8 chr)
{
if (ts.run_test)
ts.run_test(0, chr);
}
static int param_set_kgdbts_var(const char *kmessage,
const struct kernel_param *kp)
{
size_t len = strlen(kmessage);
if (len >= MAX_CONFIG_LEN) {
printk(KERN_ERR "kgdbts: config string too long\n");
return -ENOSPC;
}
/* Only copy in the string if the init function has not run yet */
if (configured < 0) {
strcpy(config, kmessage);
return 0;
}
if (configured == 1) {
printk(KERN_ERR "kgdbts: ERROR: Already configured and running.\n");
return -EBUSY;
}
strcpy(config, kmessage);
/* Chop out \n char as a result of echo */
if (len && config[len - 1] == '\n')
config[len - 1] = '\0';
/* Go and configure with the new params. */
return configure_kgdbts();
}
static void kgdbts_pre_exp_handler(void)
{
/* Increment the module count when the debugger is active */
if (!kgdb_connected)
try_module_get(THIS_MODULE);
}
static void kgdbts_post_exp_handler(void)
{
/* decrement the module count when the debugger detaches */
if (!kgdb_connected)
module_put(THIS_MODULE);
}
static struct kgdb_io kgdbts_io_ops = {
.name = "kgdbts",
.read_char = kgdbts_get_char,
.write_char = kgdbts_put_char,
.pre_exception = kgdbts_pre_exp_handler,
.post_exception = kgdbts_post_exp_handler,
};
/*
* not really modular, but the easiest way to keep compat with existing
* bootargs behaviour is to continue using module_param here.
*/
module_param_call(kgdbts, param_set_kgdbts_var, param_get_string, &kps, 0644);
MODULE_PARM_DESC(kgdbts, "<A|V1|V2>[F#|S#][N#]");
| linux-master | drivers/misc/kgdbts.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* hmc6352.c - Honeywell Compass Driver
*
* Copyright (C) 2009 Intel Corp
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
#include <linux/nospec.h>
static DEFINE_MUTEX(compass_mutex);
static int compass_command(struct i2c_client *c, u8 cmd)
{
int ret = i2c_master_send(c, &cmd, 1);
if (ret < 0)
dev_warn(&c->dev, "command '%c' failed.\n", cmd);
return ret;
}
static int compass_store(struct device *dev, const char *buf, size_t count,
const char *map)
{
struct i2c_client *c = to_i2c_client(dev);
int ret;
unsigned long val;
ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
if (val >= strlen(map))
return -EINVAL;
val = array_index_nospec(val, strlen(map));
mutex_lock(&compass_mutex);
ret = compass_command(c, map[val]);
mutex_unlock(&compass_mutex);
if (ret < 0)
return ret;
return count;
}
static ssize_t compass_calibration_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return compass_store(dev, buf, count, "EC");
}
static ssize_t compass_power_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
return compass_store(dev, buf, count, "SW");
}
static ssize_t compass_heading_data_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
unsigned char i2c_data[2];
int ret;
mutex_lock(&compass_mutex);
ret = compass_command(client, 'A');
if (ret != 1) {
mutex_unlock(&compass_mutex);
return ret;
}
msleep(10); /* sending 'A' cmd we need to wait for 7-10 millisecs */
ret = i2c_master_recv(client, i2c_data, 2);
mutex_unlock(&compass_mutex);
if (ret < 0) {
dev_warn(dev, "i2c read data cmd failed\n");
return ret;
}
ret = (i2c_data[0] << 8) | i2c_data[1];
return sprintf(buf, "%d.%d\n", ret/10, ret%10);
}
static DEVICE_ATTR(heading0_input, S_IRUGO, compass_heading_data_show, NULL);
static DEVICE_ATTR(calibration, S_IWUSR, NULL, compass_calibration_store);
static DEVICE_ATTR(power_state, S_IWUSR, NULL, compass_power_mode_store);
static struct attribute *mid_att_compass[] = {
&dev_attr_heading0_input.attr,
&dev_attr_calibration.attr,
&dev_attr_power_state.attr,
NULL
};
static const struct attribute_group m_compass_gr = {
.name = "hmc6352",
.attrs = mid_att_compass
};
static int hmc6352_probe(struct i2c_client *client)
{
int res;
res = sysfs_create_group(&client->dev.kobj, &m_compass_gr);
if (res) {
dev_err(&client->dev, "device_create_file failed\n");
return res;
}
dev_info(&client->dev, "%s HMC6352 compass chip found\n",
client->name);
return 0;
}
static void hmc6352_remove(struct i2c_client *client)
{
sysfs_remove_group(&client->dev.kobj, &m_compass_gr);
}
static const struct i2c_device_id hmc6352_id[] = {
{ "hmc6352", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, hmc6352_id);
static struct i2c_driver hmc6352_driver = {
.driver = {
.name = "hmc6352",
},
.probe = hmc6352_probe,
.remove = hmc6352_remove,
.id_table = hmc6352_id,
};
module_i2c_driver(hmc6352_driver);
MODULE_AUTHOR("Kalhan Trisal <[email protected]");
MODULE_DESCRIPTION("hmc6352 Compass Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/hmc6352.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* apds9802als.c - apds9802 ALS Driver
*
* Copyright (C) 2009 Intel Corp
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
#include <linux/pm_runtime.h>
#define ALS_MIN_RANGE_VAL 1
#define ALS_MAX_RANGE_VAL 2
#define POWER_STA_ENABLE 1
#define POWER_STA_DISABLE 0
#define DRIVER_NAME "apds9802als"
struct als_data {
struct mutex mutex;
};
static ssize_t als_sensing_range_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
int val;
val = i2c_smbus_read_byte_data(client, 0x81);
if (val < 0)
return val;
if (val & 1)
return sprintf(buf, "4095\n");
else
return sprintf(buf, "65535\n");
}
static int als_wait_for_data_ready(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
int ret;
int retry = 10;
do {
msleep(30);
ret = i2c_smbus_read_byte_data(client, 0x86);
} while (!(ret & 0x80) && retry--);
if (retry < 0) {
dev_warn(dev, "timeout waiting for data ready\n");
return -ETIMEDOUT;
}
return 0;
}
static ssize_t als_lux0_input_data_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct als_data *data = i2c_get_clientdata(client);
int ret_val;
int temp;
/* Protect against parallel reads */
pm_runtime_get_sync(dev);
mutex_lock(&data->mutex);
/* clear EOC interrupt status */
i2c_smbus_write_byte(client, 0x40);
/* start measurement */
temp = i2c_smbus_read_byte_data(client, 0x81);
i2c_smbus_write_byte_data(client, 0x81, temp | 0x08);
ret_val = als_wait_for_data_ready(dev);
if (ret_val < 0)
goto failed;
temp = i2c_smbus_read_byte_data(client, 0x8C); /* LSB data */
if (temp < 0) {
ret_val = temp;
goto failed;
}
ret_val = i2c_smbus_read_byte_data(client, 0x8D); /* MSB data */
if (ret_val < 0)
goto failed;
mutex_unlock(&data->mutex);
pm_runtime_put_sync(dev);
temp = (ret_val << 8) | temp;
return sprintf(buf, "%d\n", temp);
failed:
mutex_unlock(&data->mutex);
pm_runtime_put_sync(dev);
return ret_val;
}
static ssize_t als_sensing_range_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct als_data *data = i2c_get_clientdata(client);
int ret_val;
unsigned long val;
ret_val = kstrtoul(buf, 10, &val);
if (ret_val)
return ret_val;
if (val < 4096)
val = 1;
else if (val < 65536)
val = 2;
else
return -ERANGE;
pm_runtime_get_sync(dev);
/* Make sure nobody else reads/modifies/writes 0x81 while we
are active */
mutex_lock(&data->mutex);
ret_val = i2c_smbus_read_byte_data(client, 0x81);
if (ret_val < 0)
goto fail;
/* Reset the bits before setting them */
ret_val = ret_val & 0xFA;
if (val == 1) /* Setting detection range up to 4k LUX */
ret_val = (ret_val | 0x01);
else /* Setting detection range up to 64k LUX*/
ret_val = (ret_val | 0x00);
ret_val = i2c_smbus_write_byte_data(client, 0x81, ret_val);
if (ret_val >= 0) {
/* All OK */
mutex_unlock(&data->mutex);
pm_runtime_put_sync(dev);
return count;
}
fail:
mutex_unlock(&data->mutex);
pm_runtime_put_sync(dev);
return ret_val;
}
static int als_set_power_state(struct i2c_client *client, bool on_off)
{
int ret_val;
struct als_data *data = i2c_get_clientdata(client);
mutex_lock(&data->mutex);
ret_val = i2c_smbus_read_byte_data(client, 0x80);
if (ret_val < 0)
goto fail;
if (on_off)
ret_val = ret_val | 0x01;
else
ret_val = ret_val & 0xFE;
ret_val = i2c_smbus_write_byte_data(client, 0x80, ret_val);
fail:
mutex_unlock(&data->mutex);
return ret_val;
}
static DEVICE_ATTR(lux0_sensor_range, S_IRUGO | S_IWUSR,
als_sensing_range_show, als_sensing_range_store);
static DEVICE_ATTR(lux0_input, S_IRUGO, als_lux0_input_data_show, NULL);
static struct attribute *mid_att_als[] = {
&dev_attr_lux0_sensor_range.attr,
&dev_attr_lux0_input.attr,
NULL
};
static const struct attribute_group m_als_gr = {
.name = "apds9802als",
.attrs = mid_att_als
};
static int als_set_default_config(struct i2c_client *client)
{
int ret_val;
/* Write the command and then switch on */
ret_val = i2c_smbus_write_byte_data(client, 0x80, 0x01);
if (ret_val < 0) {
dev_err(&client->dev, "failed default switch on write\n");
return ret_val;
}
/* detection range: 1~64K Lux, maunal measurement */
ret_val = i2c_smbus_write_byte_data(client, 0x81, 0x08);
if (ret_val < 0)
dev_err(&client->dev, "failed default LUX on write\n");
/* We always get 0 for the 1st measurement after system power on,
* so make sure it is finished before user asks for data.
*/
als_wait_for_data_ready(&client->dev);
return ret_val;
}
static int apds9802als_probe(struct i2c_client *client)
{
int res;
struct als_data *data;
data = kzalloc(sizeof(struct als_data), GFP_KERNEL);
if (data == NULL) {
dev_err(&client->dev, "Memory allocation failed\n");
return -ENOMEM;
}
i2c_set_clientdata(client, data);
res = sysfs_create_group(&client->dev.kobj, &m_als_gr);
if (res) {
dev_err(&client->dev, "device create file failed\n");
goto als_error1;
}
dev_info(&client->dev, "ALS chip found\n");
als_set_default_config(client);
mutex_init(&data->mutex);
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
return res;
als_error1:
kfree(data);
return res;
}
static void apds9802als_remove(struct i2c_client *client)
{
struct als_data *data = i2c_get_clientdata(client);
pm_runtime_get_sync(&client->dev);
als_set_power_state(client, false);
sysfs_remove_group(&client->dev.kobj, &m_als_gr);
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
pm_runtime_put_noidle(&client->dev);
kfree(data);
}
#ifdef CONFIG_PM
static int apds9802als_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
als_set_power_state(client, false);
return 0;
}
static int apds9802als_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
als_set_power_state(client, true);
return 0;
}
static UNIVERSAL_DEV_PM_OPS(apds9802als_pm_ops, apds9802als_suspend,
apds9802als_resume, NULL);
#define APDS9802ALS_PM_OPS (&apds9802als_pm_ops)
#else /* CONFIG_PM */
#define APDS9802ALS_PM_OPS NULL
#endif /* CONFIG_PM */
static const struct i2c_device_id apds9802als_id[] = {
{ DRIVER_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, apds9802als_id);
static struct i2c_driver apds9802als_driver = {
.driver = {
.name = DRIVER_NAME,
.pm = APDS9802ALS_PM_OPS,
},
.probe = apds9802als_probe,
.remove = apds9802als_remove,
.id_table = apds9802als_id,
};
module_i2c_driver(apds9802als_driver);
MODULE_AUTHOR("Anantha Narayanan <[email protected]");
MODULE_DESCRIPTION("Avago apds9802als ALS Driver");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/apds9802als.c |
// SPDX-License-Identifier: GPL-2.0-only
//
// VCPU stall detector.
// Copyright (C) Google, 2022
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/of.h>
#include <linux/param.h>
#include <linux/percpu.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#define VCPU_STALL_REG_STATUS (0x00)
#define VCPU_STALL_REG_LOAD_CNT (0x04)
#define VCPU_STALL_REG_CURRENT_CNT (0x08)
#define VCPU_STALL_REG_CLOCK_FREQ_HZ (0x0C)
#define VCPU_STALL_REG_LEN (0x10)
#define VCPU_STALL_DEFAULT_CLOCK_HZ (10)
#define VCPU_STALL_MAX_CLOCK_HZ (100)
#define VCPU_STALL_DEFAULT_TIMEOUT_SEC (8)
#define VCPU_STALL_MAX_TIMEOUT_SEC (600)
struct vcpu_stall_detect_config {
u32 clock_freq_hz;
u32 stall_timeout_sec;
void __iomem *membase;
struct platform_device *dev;
enum cpuhp_state hp_online;
};
struct vcpu_stall_priv {
struct hrtimer vcpu_hrtimer;
bool is_initialized;
};
/* The vcpu stall configuration structure which applies to all the CPUs */
static struct vcpu_stall_detect_config vcpu_stall_config;
#define vcpu_stall_reg_write(vcpu, reg, value) \
writel_relaxed((value), \
(void __iomem *)(vcpu_stall_config.membase + \
(vcpu) * VCPU_STALL_REG_LEN + (reg)))
static struct vcpu_stall_priv __percpu *vcpu_stall_detectors;
static enum hrtimer_restart
vcpu_stall_detect_timer_fn(struct hrtimer *hrtimer)
{
u32 ticks, ping_timeout_ms;
/* Reload the stall detector counter register every
* `ping_timeout_ms` to prevent the virtual device
* from decrementing it to 0. The virtual device decrements this
* register at 'clock_freq_hz' frequency.
*/
ticks = vcpu_stall_config.clock_freq_hz *
vcpu_stall_config.stall_timeout_sec;
vcpu_stall_reg_write(smp_processor_id(),
VCPU_STALL_REG_LOAD_CNT, ticks);
ping_timeout_ms = vcpu_stall_config.stall_timeout_sec *
MSEC_PER_SEC / 2;
hrtimer_forward_now(hrtimer,
ms_to_ktime(ping_timeout_ms));
return HRTIMER_RESTART;
}
static int start_stall_detector_cpu(unsigned int cpu)
{
u32 ticks, ping_timeout_ms;
struct vcpu_stall_priv *vcpu_stall_detector =
this_cpu_ptr(vcpu_stall_detectors);
struct hrtimer *vcpu_hrtimer = &vcpu_stall_detector->vcpu_hrtimer;
vcpu_stall_reg_write(cpu, VCPU_STALL_REG_CLOCK_FREQ_HZ,
vcpu_stall_config.clock_freq_hz);
/* Compute the number of ticks required for the stall detector
* counter register based on the internal clock frequency and the
* timeout value given from the device tree.
*/
ticks = vcpu_stall_config.clock_freq_hz *
vcpu_stall_config.stall_timeout_sec;
vcpu_stall_reg_write(cpu, VCPU_STALL_REG_LOAD_CNT, ticks);
/* Enable the internal clock and start the stall detector */
vcpu_stall_reg_write(cpu, VCPU_STALL_REG_STATUS, 1);
/* Pet the stall detector at half of its expiration timeout
* to prevent spurious resets.
*/
ping_timeout_ms = vcpu_stall_config.stall_timeout_sec *
MSEC_PER_SEC / 2;
hrtimer_init(vcpu_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
vcpu_hrtimer->function = vcpu_stall_detect_timer_fn;
vcpu_stall_detector->is_initialized = true;
hrtimer_start(vcpu_hrtimer, ms_to_ktime(ping_timeout_ms),
HRTIMER_MODE_REL_PINNED);
return 0;
}
static int stop_stall_detector_cpu(unsigned int cpu)
{
struct vcpu_stall_priv *vcpu_stall_detector =
per_cpu_ptr(vcpu_stall_detectors, cpu);
if (!vcpu_stall_detector->is_initialized)
return 0;
/* Disable the stall detector for the current CPU */
hrtimer_cancel(&vcpu_stall_detector->vcpu_hrtimer);
vcpu_stall_reg_write(cpu, VCPU_STALL_REG_STATUS, 0);
vcpu_stall_detector->is_initialized = false;
return 0;
}
static int vcpu_stall_detect_probe(struct platform_device *pdev)
{
int ret;
struct resource *r;
void __iomem *membase;
u32 clock_freq_hz = VCPU_STALL_DEFAULT_CLOCK_HZ;
u32 stall_timeout_sec = VCPU_STALL_DEFAULT_TIMEOUT_SEC;
struct device_node *np = pdev->dev.of_node;
vcpu_stall_detectors = devm_alloc_percpu(&pdev->dev,
typeof(struct vcpu_stall_priv));
if (!vcpu_stall_detectors)
return -ENOMEM;
membase = devm_platform_get_and_ioremap_resource(pdev, 0, &r);
if (IS_ERR(membase)) {
dev_err(&pdev->dev, "Failed to get memory resource\n");
return PTR_ERR(membase);
}
if (!of_property_read_u32(np, "clock-frequency", &clock_freq_hz)) {
if (!(clock_freq_hz > 0 &&
clock_freq_hz < VCPU_STALL_MAX_CLOCK_HZ)) {
dev_warn(&pdev->dev, "clk out of range\n");
clock_freq_hz = VCPU_STALL_DEFAULT_CLOCK_HZ;
}
}
if (!of_property_read_u32(np, "timeout-sec", &stall_timeout_sec)) {
if (!(stall_timeout_sec > 0 &&
stall_timeout_sec < VCPU_STALL_MAX_TIMEOUT_SEC)) {
dev_warn(&pdev->dev, "stall timeout out of range\n");
stall_timeout_sec = VCPU_STALL_DEFAULT_TIMEOUT_SEC;
}
}
vcpu_stall_config = (struct vcpu_stall_detect_config) {
.membase = membase,
.clock_freq_hz = clock_freq_hz,
.stall_timeout_sec = stall_timeout_sec
};
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"virt/vcpu_stall_detector:online",
start_stall_detector_cpu,
stop_stall_detector_cpu);
if (ret < 0) {
dev_err(&pdev->dev, "failed to install cpu hotplug");
goto err;
}
vcpu_stall_config.hp_online = ret;
return 0;
err:
return ret;
}
static int vcpu_stall_detect_remove(struct platform_device *pdev)
{
int cpu;
cpuhp_remove_state(vcpu_stall_config.hp_online);
for_each_possible_cpu(cpu)
stop_stall_detector_cpu(cpu);
return 0;
}
static const struct of_device_id vcpu_stall_detect_of_match[] = {
{ .compatible = "qemu,vcpu-stall-detector", },
{}
};
MODULE_DEVICE_TABLE(of, vcpu_stall_detect_of_match);
static struct platform_driver vcpu_stall_detect_driver = {
.probe = vcpu_stall_detect_probe,
.remove = vcpu_stall_detect_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = vcpu_stall_detect_of_match,
},
};
module_platform_driver(vcpu_stall_detect_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sebastian Ene <[email protected]>");
MODULE_DESCRIPTION("VCPU stall detector");
| linux-master | drivers/misc/vcpu_stall_detector.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of the ROHM BH1770GLC / OSRAM SFH7770 sensor driver.
* Chip is combined proximity and ambient light sensor.
*
* Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
*
* Contact: Samu Onkalo <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/platform_data/bh1770glc.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/slab.h>
#define BH1770_ALS_CONTROL 0x80 /* ALS operation mode control */
#define BH1770_PS_CONTROL 0x81 /* PS operation mode control */
#define BH1770_I_LED 0x82 /* active LED and LED1, LED2 current */
#define BH1770_I_LED3 0x83 /* LED3 current setting */
#define BH1770_ALS_PS_MEAS 0x84 /* Forced mode trigger */
#define BH1770_PS_MEAS_RATE 0x85 /* PS meas. rate at stand alone mode */
#define BH1770_ALS_MEAS_RATE 0x86 /* ALS meas. rate at stand alone mode */
#define BH1770_PART_ID 0x8a /* Part number and revision ID */
#define BH1770_MANUFACT_ID 0x8b /* Manufacturerer ID */
#define BH1770_ALS_DATA_0 0x8c /* ALS DATA low byte */
#define BH1770_ALS_DATA_1 0x8d /* ALS DATA high byte */
#define BH1770_ALS_PS_STATUS 0x8e /* Measurement data and int status */
#define BH1770_PS_DATA_LED1 0x8f /* PS data from LED1 */
#define BH1770_PS_DATA_LED2 0x90 /* PS data from LED2 */
#define BH1770_PS_DATA_LED3 0x91 /* PS data from LED3 */
#define BH1770_INTERRUPT 0x92 /* Interrupt setting */
#define BH1770_PS_TH_LED1 0x93 /* PS interrupt threshold for LED1 */
#define BH1770_PS_TH_LED2 0x94 /* PS interrupt threshold for LED2 */
#define BH1770_PS_TH_LED3 0x95 /* PS interrupt threshold for LED3 */
#define BH1770_ALS_TH_UP_0 0x96 /* ALS upper threshold low byte */
#define BH1770_ALS_TH_UP_1 0x97 /* ALS upper threshold high byte */
#define BH1770_ALS_TH_LOW_0 0x98 /* ALS lower threshold low byte */
#define BH1770_ALS_TH_LOW_1 0x99 /* ALS lower threshold high byte */
/* MANUFACT_ID */
#define BH1770_MANUFACT_ROHM 0x01
#define BH1770_MANUFACT_OSRAM 0x03
/* PART_ID */
#define BH1770_PART 0x90
#define BH1770_PART_MASK 0xf0
#define BH1770_REV_MASK 0x0f
#define BH1770_REV_SHIFT 0
#define BH1770_REV_0 0x00
#define BH1770_REV_1 0x01
/* Operating modes for both */
#define BH1770_STANDBY 0x00
#define BH1770_FORCED 0x02
#define BH1770_STANDALONE 0x03
#define BH1770_SWRESET (0x01 << 2)
#define BH1770_PS_TRIG_MEAS (1 << 0)
#define BH1770_ALS_TRIG_MEAS (1 << 1)
/* Interrupt control */
#define BH1770_INT_OUTPUT_MODE (1 << 3) /* 0 = latched */
#define BH1770_INT_POLARITY (1 << 2) /* 1 = active high */
#define BH1770_INT_ALS_ENA (1 << 1)
#define BH1770_INT_PS_ENA (1 << 0)
/* Interrupt status */
#define BH1770_INT_LED1_DATA (1 << 0)
#define BH1770_INT_LED1_INT (1 << 1)
#define BH1770_INT_LED2_DATA (1 << 2)
#define BH1770_INT_LED2_INT (1 << 3)
#define BH1770_INT_LED3_DATA (1 << 4)
#define BH1770_INT_LED3_INT (1 << 5)
#define BH1770_INT_LEDS_INT ((1 << 1) | (1 << 3) | (1 << 5))
#define BH1770_INT_ALS_DATA (1 << 6)
#define BH1770_INT_ALS_INT (1 << 7)
/* Led channels */
#define BH1770_LED1 0x00
#define BH1770_DISABLE 0
#define BH1770_ENABLE 1
#define BH1770_PROX_CHANNELS 1
#define BH1770_LUX_DEFAULT_RATE 1 /* Index to lux rate table */
#define BH1770_PROX_DEFAULT_RATE 1 /* Direct HW value =~ 50Hz */
#define BH1770_PROX_DEF_RATE_THRESH 6 /* Direct HW value =~ 5 Hz */
#define BH1770_STARTUP_DELAY 50
#define BH1770_RESET_TIME 10
#define BH1770_TIMEOUT 2100 /* Timeout in 2.1 seconds */
#define BH1770_LUX_RANGE 65535
#define BH1770_PROX_RANGE 255
#define BH1770_COEF_SCALER 1024
#define BH1770_CALIB_SCALER 8192
#define BH1770_LUX_NEUTRAL_CALIB_VALUE (1 * BH1770_CALIB_SCALER)
#define BH1770_LUX_DEF_THRES 1000
#define BH1770_PROX_DEF_THRES 70
#define BH1770_PROX_DEF_ABS_THRES 100
#define BH1770_DEFAULT_PERSISTENCE 10
#define BH1770_PROX_MAX_PERSISTENCE 50
#define BH1770_LUX_GA_SCALE 16384
#define BH1770_LUX_CF_SCALE 2048 /* CF ChipFactor */
#define BH1770_NEUTRAL_CF BH1770_LUX_CF_SCALE
#define BH1770_LUX_CORR_SCALE 4096
#define PROX_ABOVE_THRESHOLD 1
#define PROX_BELOW_THRESHOLD 0
#define PROX_IGNORE_LUX_LIMIT 500
struct bh1770_chip {
struct bh1770_platform_data *pdata;
char chipname[10];
u8 revision;
struct i2c_client *client;
struct regulator_bulk_data regs[2];
struct mutex mutex; /* avoid parallel access */
wait_queue_head_t wait;
bool int_mode_prox;
bool int_mode_lux;
struct delayed_work prox_work;
u32 lux_cf; /* Chip specific factor */
u32 lux_ga;
u32 lux_calib;
int lux_rate_index;
u32 lux_corr;
u16 lux_data_raw;
u16 lux_threshold_hi;
u16 lux_threshold_lo;
u16 lux_thres_hi_onchip;
u16 lux_thres_lo_onchip;
bool lux_wait_result;
int prox_enable_count;
u16 prox_coef;
u16 prox_const;
int prox_rate;
int prox_rate_threshold;
u8 prox_persistence;
u8 prox_persistence_counter;
u8 prox_data;
u8 prox_threshold;
u8 prox_threshold_hw;
bool prox_force_update;
u8 prox_abs_thres;
u8 prox_led;
};
static const char reg_vcc[] = "Vcc";
static const char reg_vleds[] = "Vleds";
/*
* Supported stand alone rates in ms from chip data sheet
* {10, 20, 30, 40, 70, 100, 200, 500, 1000, 2000};
*/
static const s16 prox_rates_hz[] = {100, 50, 33, 25, 14, 10, 5, 2};
static const s16 prox_rates_ms[] = {10, 20, 30, 40, 70, 100, 200, 500};
/*
* Supported stand alone rates in ms from chip data sheet
* {100, 200, 500, 1000, 2000};
*/
static const s16 lux_rates_hz[] = {10, 5, 2, 1, 0};
/*
* interrupt control functions are called while keeping chip->mutex
* excluding module probe / remove
*/
static inline int bh1770_lux_interrupt_control(struct bh1770_chip *chip,
int lux)
{
chip->int_mode_lux = lux;
/* Set interrupt modes, interrupt active low, latched */
return i2c_smbus_write_byte_data(chip->client,
BH1770_INTERRUPT,
(lux << 1) | chip->int_mode_prox);
}
static inline int bh1770_prox_interrupt_control(struct bh1770_chip *chip,
int ps)
{
chip->int_mode_prox = ps;
return i2c_smbus_write_byte_data(chip->client,
BH1770_INTERRUPT,
(chip->int_mode_lux << 1) | (ps << 0));
}
/* chip->mutex is always kept here */
static int bh1770_lux_rate(struct bh1770_chip *chip, int rate_index)
{
/* sysfs may call this when the chip is powered off */
if (pm_runtime_suspended(&chip->client->dev))
return 0;
/* Proper proximity response needs fastest lux rate (100ms) */
if (chip->prox_enable_count)
rate_index = 0;
return i2c_smbus_write_byte_data(chip->client,
BH1770_ALS_MEAS_RATE,
rate_index);
}
static int bh1770_prox_rate(struct bh1770_chip *chip, int mode)
{
int rate;
rate = (mode == PROX_ABOVE_THRESHOLD) ?
chip->prox_rate_threshold : chip->prox_rate;
return i2c_smbus_write_byte_data(chip->client,
BH1770_PS_MEAS_RATE,
rate);
}
/* InfraredLED is controlled by the chip during proximity scanning */
static inline int bh1770_led_cfg(struct bh1770_chip *chip)
{
/* LED cfg, current for leds 1 and 2 */
return i2c_smbus_write_byte_data(chip->client,
BH1770_I_LED,
(BH1770_LED1 << 6) |
(BH1770_LED_5mA << 3) |
chip->prox_led);
}
/*
* Following two functions converts raw ps values from HW to normalized
* values. Purpose is to compensate differences between different sensor
* versions and variants so that result means about the same between
* versions.
*/
static inline u8 bh1770_psraw_to_adjusted(struct bh1770_chip *chip, u8 psraw)
{
u16 adjusted;
adjusted = (u16)(((u32)(psraw + chip->prox_const) * chip->prox_coef) /
BH1770_COEF_SCALER);
if (adjusted > BH1770_PROX_RANGE)
adjusted = BH1770_PROX_RANGE;
return adjusted;
}
static inline u8 bh1770_psadjusted_to_raw(struct bh1770_chip *chip, u8 ps)
{
u16 raw;
raw = (((u32)ps * BH1770_COEF_SCALER) / chip->prox_coef);
if (raw > chip->prox_const)
raw = raw - chip->prox_const;
else
raw = 0;
return raw;
}
/*
* Following two functions converts raw lux values from HW to normalized
* values. Purpose is to compensate differences between different sensor
* versions and variants so that result means about the same between
* versions. Chip->mutex is kept when this is called.
*/
static int bh1770_prox_set_threshold(struct bh1770_chip *chip)
{
u8 tmp = 0;
/* sysfs may call this when the chip is powered off */
if (pm_runtime_suspended(&chip->client->dev))
return 0;
tmp = bh1770_psadjusted_to_raw(chip, chip->prox_threshold);
chip->prox_threshold_hw = tmp;
return i2c_smbus_write_byte_data(chip->client, BH1770_PS_TH_LED1,
tmp);
}
static inline u16 bh1770_lux_raw_to_adjusted(struct bh1770_chip *chip, u16 raw)
{
u32 lux;
lux = ((u32)raw * chip->lux_corr) / BH1770_LUX_CORR_SCALE;
return min(lux, (u32)BH1770_LUX_RANGE);
}
static inline u16 bh1770_lux_adjusted_to_raw(struct bh1770_chip *chip,
u16 adjusted)
{
return (u32)adjusted * BH1770_LUX_CORR_SCALE / chip->lux_corr;
}
/* chip->mutex is kept when this is called */
static int bh1770_lux_update_thresholds(struct bh1770_chip *chip,
u16 threshold_hi, u16 threshold_lo)
{
u8 data[4];
int ret;
/* sysfs may call this when the chip is powered off */
if (pm_runtime_suspended(&chip->client->dev))
return 0;
/*
* Compensate threshold values with the correction factors if not
* set to minimum or maximum.
* Min & max values disables interrupts.
*/
if (threshold_hi != BH1770_LUX_RANGE && threshold_hi != 0)
threshold_hi = bh1770_lux_adjusted_to_raw(chip, threshold_hi);
if (threshold_lo != BH1770_LUX_RANGE && threshold_lo != 0)
threshold_lo = bh1770_lux_adjusted_to_raw(chip, threshold_lo);
if (chip->lux_thres_hi_onchip == threshold_hi &&
chip->lux_thres_lo_onchip == threshold_lo)
return 0;
chip->lux_thres_hi_onchip = threshold_hi;
chip->lux_thres_lo_onchip = threshold_lo;
data[0] = threshold_hi;
data[1] = threshold_hi >> 8;
data[2] = threshold_lo;
data[3] = threshold_lo >> 8;
ret = i2c_smbus_write_i2c_block_data(chip->client,
BH1770_ALS_TH_UP_0,
ARRAY_SIZE(data),
data);
return ret;
}
static int bh1770_lux_get_result(struct bh1770_chip *chip)
{
u16 data;
int ret;
ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_0);
if (ret < 0)
return ret;
data = ret & 0xff;
ret = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_DATA_1);
if (ret < 0)
return ret;
chip->lux_data_raw = data | ((ret & 0xff) << 8);
return 0;
}
/* Calculate correction value which contains chip and device specific parts */
static u32 bh1770_get_corr_value(struct bh1770_chip *chip)
{
u32 tmp;
/* Impact of glass attenuation correction */
tmp = (BH1770_LUX_CORR_SCALE * chip->lux_ga) / BH1770_LUX_GA_SCALE;
/* Impact of chip factor correction */
tmp = (tmp * chip->lux_cf) / BH1770_LUX_CF_SCALE;
/* Impact of Device specific calibration correction */
tmp = (tmp * chip->lux_calib) / BH1770_CALIB_SCALER;
return tmp;
}
static int bh1770_lux_read_result(struct bh1770_chip *chip)
{
bh1770_lux_get_result(chip);
return bh1770_lux_raw_to_adjusted(chip, chip->lux_data_raw);
}
/*
* Chip on / off functions are called while keeping mutex except probe
* or remove phase
*/
static int bh1770_chip_on(struct bh1770_chip *chip)
{
int ret = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
chip->regs);
if (ret < 0)
return ret;
usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
/* Reset the chip */
i2c_smbus_write_byte_data(chip->client, BH1770_ALS_CONTROL,
BH1770_SWRESET);
usleep_range(BH1770_RESET_TIME, BH1770_RESET_TIME * 2);
/*
* ALS is started always since proximity needs als results
* for realibility estimation.
* Let's assume dark until the first ALS measurement is ready.
*/
chip->lux_data_raw = 0;
chip->prox_data = 0;
ret = i2c_smbus_write_byte_data(chip->client,
BH1770_ALS_CONTROL, BH1770_STANDALONE);
/* Assume reset defaults */
chip->lux_thres_hi_onchip = BH1770_LUX_RANGE;
chip->lux_thres_lo_onchip = 0;
return ret;
}
static void bh1770_chip_off(struct bh1770_chip *chip)
{
i2c_smbus_write_byte_data(chip->client,
BH1770_INTERRUPT, BH1770_DISABLE);
i2c_smbus_write_byte_data(chip->client,
BH1770_ALS_CONTROL, BH1770_STANDBY);
i2c_smbus_write_byte_data(chip->client,
BH1770_PS_CONTROL, BH1770_STANDBY);
regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
}
/* chip->mutex is kept when this is called */
static int bh1770_prox_mode_control(struct bh1770_chip *chip)
{
if (chip->prox_enable_count) {
chip->prox_force_update = true; /* Force immediate update */
bh1770_lux_rate(chip, chip->lux_rate_index);
bh1770_prox_set_threshold(chip);
bh1770_led_cfg(chip);
bh1770_prox_rate(chip, PROX_BELOW_THRESHOLD);
bh1770_prox_interrupt_control(chip, BH1770_ENABLE);
i2c_smbus_write_byte_data(chip->client,
BH1770_PS_CONTROL, BH1770_STANDALONE);
} else {
chip->prox_data = 0;
bh1770_lux_rate(chip, chip->lux_rate_index);
bh1770_prox_interrupt_control(chip, BH1770_DISABLE);
i2c_smbus_write_byte_data(chip->client,
BH1770_PS_CONTROL, BH1770_STANDBY);
}
return 0;
}
/* chip->mutex is kept when this is called */
static int bh1770_prox_read_result(struct bh1770_chip *chip)
{
int ret;
bool above;
u8 mode;
ret = i2c_smbus_read_byte_data(chip->client, BH1770_PS_DATA_LED1);
if (ret < 0)
goto out;
if (ret > chip->prox_threshold_hw)
above = true;
else
above = false;
/*
* when ALS levels goes above limit, proximity result may be
* false proximity. Thus ignore the result. With real proximity
* there is a shadow causing low als levels.
*/
if (chip->lux_data_raw > PROX_IGNORE_LUX_LIMIT)
ret = 0;
chip->prox_data = bh1770_psraw_to_adjusted(chip, ret);
/* Strong proximity level or force mode requires immediate response */
if (chip->prox_data >= chip->prox_abs_thres ||
chip->prox_force_update)
chip->prox_persistence_counter = chip->prox_persistence;
chip->prox_force_update = false;
/* Persistence filttering to reduce false proximity events */
if (likely(above)) {
if (chip->prox_persistence_counter < chip->prox_persistence) {
chip->prox_persistence_counter++;
ret = -ENODATA;
} else {
mode = PROX_ABOVE_THRESHOLD;
ret = 0;
}
} else {
chip->prox_persistence_counter = 0;
mode = PROX_BELOW_THRESHOLD;
chip->prox_data = 0;
ret = 0;
}
/* Set proximity detection rate based on above or below value */
if (ret == 0) {
bh1770_prox_rate(chip, mode);
sysfs_notify(&chip->client->dev.kobj, NULL, "prox0_raw");
}
out:
return ret;
}
static int bh1770_detect(struct bh1770_chip *chip)
{
struct i2c_client *client = chip->client;
s32 ret;
u8 manu, part;
ret = i2c_smbus_read_byte_data(client, BH1770_MANUFACT_ID);
if (ret < 0)
goto error;
manu = (u8)ret;
ret = i2c_smbus_read_byte_data(client, BH1770_PART_ID);
if (ret < 0)
goto error;
part = (u8)ret;
chip->revision = (part & BH1770_REV_MASK) >> BH1770_REV_SHIFT;
chip->prox_coef = BH1770_COEF_SCALER;
chip->prox_const = 0;
chip->lux_cf = BH1770_NEUTRAL_CF;
if ((manu == BH1770_MANUFACT_ROHM) &&
((part & BH1770_PART_MASK) == BH1770_PART)) {
snprintf(chip->chipname, sizeof(chip->chipname), "BH1770GLC");
return 0;
}
if ((manu == BH1770_MANUFACT_OSRAM) &&
((part & BH1770_PART_MASK) == BH1770_PART)) {
snprintf(chip->chipname, sizeof(chip->chipname), "SFH7770");
/* Values selected by comparing different versions */
chip->prox_coef = 819; /* 0.8 * BH1770_COEF_SCALER */
chip->prox_const = 40;
return 0;
}
ret = -ENODEV;
error:
dev_dbg(&client->dev, "BH1770 or SFH7770 not found\n");
return ret;
}
/*
* This work is re-scheduled at every proximity interrupt.
* If this work is running, it means that there hasn't been any
* proximity interrupt in time. Situation is handled as no-proximity.
* It would be nice to have low-threshold interrupt or interrupt
* when measurement and hi-threshold are both 0. But neither of those exists.
* This is a workaroud for missing HW feature.
*/
static void bh1770_prox_work(struct work_struct *work)
{
struct bh1770_chip *chip =
container_of(work, struct bh1770_chip, prox_work.work);
mutex_lock(&chip->mutex);
bh1770_prox_read_result(chip);
mutex_unlock(&chip->mutex);
}
/* This is threaded irq handler */
static irqreturn_t bh1770_irq(int irq, void *data)
{
struct bh1770_chip *chip = data;
int status;
int rate = 0;
mutex_lock(&chip->mutex);
status = i2c_smbus_read_byte_data(chip->client, BH1770_ALS_PS_STATUS);
/* Acknowledge interrupt by reading this register */
i2c_smbus_read_byte_data(chip->client, BH1770_INTERRUPT);
/*
* Check if there is fresh data available for als.
* If this is the very first data, update thresholds after that.
*/
if (status & BH1770_INT_ALS_DATA) {
bh1770_lux_get_result(chip);
if (unlikely(chip->lux_wait_result)) {
chip->lux_wait_result = false;
wake_up(&chip->wait);
bh1770_lux_update_thresholds(chip,
chip->lux_threshold_hi,
chip->lux_threshold_lo);
}
}
/* Disable interrupt logic to guarantee acknowledgement */
i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
(0 << 1) | (0 << 0));
if ((status & BH1770_INT_ALS_INT))
sysfs_notify(&chip->client->dev.kobj, NULL, "lux0_input");
if (chip->int_mode_prox && (status & BH1770_INT_LEDS_INT)) {
rate = prox_rates_ms[chip->prox_rate_threshold];
bh1770_prox_read_result(chip);
}
/* Re-enable interrupt logic */
i2c_smbus_write_byte_data(chip->client, BH1770_INTERRUPT,
(chip->int_mode_lux << 1) |
(chip->int_mode_prox << 0));
mutex_unlock(&chip->mutex);
/*
* Can't cancel work while keeping mutex since the work uses the
* same mutex.
*/
if (rate) {
/*
* Simulate missing no-proximity interrupt 50ms after the
* next expected interrupt time.
*/
cancel_delayed_work_sync(&chip->prox_work);
schedule_delayed_work(&chip->prox_work,
msecs_to_jiffies(rate + 50));
}
return IRQ_HANDLED;
}
static ssize_t bh1770_power_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
ssize_t ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&chip->mutex);
if (value) {
pm_runtime_get_sync(dev);
ret = bh1770_lux_rate(chip, chip->lux_rate_index);
if (ret < 0) {
pm_runtime_put(dev);
goto leave;
}
ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
if (ret < 0) {
pm_runtime_put(dev);
goto leave;
}
/* This causes interrupt after the next measurement cycle */
bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
BH1770_LUX_DEF_THRES);
/* Inform that we are waiting for a result from ALS */
chip->lux_wait_result = true;
bh1770_prox_mode_control(chip);
} else if (!pm_runtime_suspended(dev)) {
pm_runtime_put(dev);
}
ret = count;
leave:
mutex_unlock(&chip->mutex);
return ret;
}
static ssize_t bh1770_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
}
static ssize_t bh1770_lux_result_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
ssize_t ret;
long timeout;
if (pm_runtime_suspended(dev))
return -EIO; /* Chip is not enabled at all */
timeout = wait_event_interruptible_timeout(chip->wait,
!chip->lux_wait_result,
msecs_to_jiffies(BH1770_TIMEOUT));
if (!timeout)
return -EIO;
mutex_lock(&chip->mutex);
ret = sprintf(buf, "%d\n", bh1770_lux_read_result(chip));
mutex_unlock(&chip->mutex);
return ret;
}
static ssize_t bh1770_lux_range_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", BH1770_LUX_RANGE);
}
static ssize_t bh1770_prox_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&chip->mutex);
/* Assume no proximity. Sensor will tell real state soon */
if (!chip->prox_enable_count)
chip->prox_data = 0;
if (value)
chip->prox_enable_count++;
else if (chip->prox_enable_count > 0)
chip->prox_enable_count--;
else
goto leave;
/* Run control only when chip is powered on */
if (!pm_runtime_suspended(dev))
bh1770_prox_mode_control(chip);
leave:
mutex_unlock(&chip->mutex);
return count;
}
static ssize_t bh1770_prox_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
ssize_t len;
mutex_lock(&chip->mutex);
len = sprintf(buf, "%d\n", chip->prox_enable_count);
mutex_unlock(&chip->mutex);
return len;
}
static ssize_t bh1770_prox_result_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
ssize_t ret;
mutex_lock(&chip->mutex);
if (chip->prox_enable_count && !pm_runtime_suspended(dev))
ret = sprintf(buf, "%d\n", chip->prox_data);
else
ret = -EIO;
mutex_unlock(&chip->mutex);
return ret;
}
static ssize_t bh1770_prox_range_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", BH1770_PROX_RANGE);
}
static ssize_t bh1770_get_prox_rate_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i;
int pos = 0;
for (i = 0; i < ARRAY_SIZE(prox_rates_hz); i++)
pos += sprintf(buf + pos, "%d ", prox_rates_hz[i]);
sprintf(buf + pos - 1, "\n");
return pos;
}
static ssize_t bh1770_get_prox_rate_above(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate_threshold]);
}
static ssize_t bh1770_get_prox_rate_below(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", prox_rates_hz[chip->prox_rate]);
}
static int bh1770_prox_rate_validate(int rate)
{
int i;
for (i = 0; i < ARRAY_SIZE(prox_rates_hz) - 1; i++)
if (rate >= prox_rates_hz[i])
break;
return i;
}
static ssize_t bh1770_set_prox_rate_above(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&chip->mutex);
chip->prox_rate_threshold = bh1770_prox_rate_validate(value);
mutex_unlock(&chip->mutex);
return count;
}
static ssize_t bh1770_set_prox_rate_below(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&chip->mutex);
chip->prox_rate = bh1770_prox_rate_validate(value);
mutex_unlock(&chip->mutex);
return count;
}
static ssize_t bh1770_get_prox_thres(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", chip->prox_threshold);
}
static ssize_t bh1770_set_prox_thres(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
if (value > BH1770_PROX_RANGE)
return -EINVAL;
mutex_lock(&chip->mutex);
chip->prox_threshold = value;
ret = bh1770_prox_set_threshold(chip);
mutex_unlock(&chip->mutex);
if (ret < 0)
return ret;
return count;
}
static ssize_t bh1770_prox_persistence_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", chip->prox_persistence);
}
static ssize_t bh1770_prox_persistence_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
if (value > BH1770_PROX_MAX_PERSISTENCE)
return -EINVAL;
chip->prox_persistence = value;
return len;
}
static ssize_t bh1770_prox_abs_thres_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", chip->prox_abs_thres);
}
static ssize_t bh1770_prox_abs_thres_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
if (value > BH1770_PROX_RANGE)
return -EINVAL;
chip->prox_abs_thres = value;
return len;
}
static ssize_t bh1770_chip_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%s rev %d\n", chip->chipname, chip->revision);
}
static ssize_t bh1770_lux_calib_default_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", BH1770_CALIB_SCALER);
}
static ssize_t bh1770_lux_calib_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
ssize_t len;
mutex_lock(&chip->mutex);
len = sprintf(buf, "%u\n", chip->lux_calib);
mutex_unlock(&chip->mutex);
return len;
}
static ssize_t bh1770_lux_calib_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long value;
u32 old_calib;
u32 new_corr;
int ret;
ret = kstrtoul(buf, 0, &value);
if (ret)
return ret;
mutex_lock(&chip->mutex);
old_calib = chip->lux_calib;
chip->lux_calib = value;
new_corr = bh1770_get_corr_value(chip);
if (new_corr == 0) {
chip->lux_calib = old_calib;
mutex_unlock(&chip->mutex);
return -EINVAL;
}
chip->lux_corr = new_corr;
/* Refresh thresholds on HW after changing correction value */
bh1770_lux_update_thresholds(chip, chip->lux_threshold_hi,
chip->lux_threshold_lo);
mutex_unlock(&chip->mutex);
return len;
}
static ssize_t bh1770_get_lux_rate_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i;
int pos = 0;
for (i = 0; i < ARRAY_SIZE(lux_rates_hz); i++)
pos += sprintf(buf + pos, "%d ", lux_rates_hz[i]);
sprintf(buf + pos - 1, "\n");
return pos;
}
static ssize_t bh1770_get_lux_rate(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", lux_rates_hz[chip->lux_rate_index]);
}
static ssize_t bh1770_set_lux_rate(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
unsigned long rate_hz;
int ret, i;
ret = kstrtoul(buf, 0, &rate_hz);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(lux_rates_hz) - 1; i++)
if (rate_hz >= lux_rates_hz[i])
break;
mutex_lock(&chip->mutex);
chip->lux_rate_index = i;
ret = bh1770_lux_rate(chip, i);
mutex_unlock(&chip->mutex);
if (ret < 0)
return ret;
return count;
}
static ssize_t bh1770_get_lux_thresh_above(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", chip->lux_threshold_hi);
}
static ssize_t bh1770_get_lux_thresh_below(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", chip->lux_threshold_lo);
}
static ssize_t bh1770_set_lux_thresh(struct bh1770_chip *chip, u16 *target,
const char *buf)
{
unsigned long thresh;
int ret;
ret = kstrtoul(buf, 0, &thresh);
if (ret)
return ret;
if (thresh > BH1770_LUX_RANGE)
return -EINVAL;
mutex_lock(&chip->mutex);
*target = thresh;
/*
* Don't update values in HW if we are still waiting for
* first interrupt to come after device handle open call.
*/
if (!chip->lux_wait_result)
ret = bh1770_lux_update_thresholds(chip,
chip->lux_threshold_hi,
chip->lux_threshold_lo);
mutex_unlock(&chip->mutex);
return ret;
}
static ssize_t bh1770_set_lux_thresh_above(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_hi, buf);
if (ret < 0)
return ret;
return len;
}
static ssize_t bh1770_set_lux_thresh_below(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct bh1770_chip *chip = dev_get_drvdata(dev);
int ret = bh1770_set_lux_thresh(chip, &chip->lux_threshold_lo, buf);
if (ret < 0)
return ret;
return len;
}
static DEVICE_ATTR(prox0_raw_en, S_IRUGO | S_IWUSR, bh1770_prox_enable_show,
bh1770_prox_enable_store);
static DEVICE_ATTR(prox0_thresh_above1_value, S_IRUGO | S_IWUSR,
bh1770_prox_abs_thres_show,
bh1770_prox_abs_thres_store);
static DEVICE_ATTR(prox0_thresh_above0_value, S_IRUGO | S_IWUSR,
bh1770_get_prox_thres,
bh1770_set_prox_thres);
static DEVICE_ATTR(prox0_raw, S_IRUGO, bh1770_prox_result_show, NULL);
static DEVICE_ATTR(prox0_sensor_range, S_IRUGO, bh1770_prox_range_show, NULL);
static DEVICE_ATTR(prox0_thresh_above_count, S_IRUGO | S_IWUSR,
bh1770_prox_persistence_show,
bh1770_prox_persistence_store);
static DEVICE_ATTR(prox0_rate_above, S_IRUGO | S_IWUSR,
bh1770_get_prox_rate_above,
bh1770_set_prox_rate_above);
static DEVICE_ATTR(prox0_rate_below, S_IRUGO | S_IWUSR,
bh1770_get_prox_rate_below,
bh1770_set_prox_rate_below);
static DEVICE_ATTR(prox0_rate_avail, S_IRUGO, bh1770_get_prox_rate_avail, NULL);
static DEVICE_ATTR(lux0_calibscale, S_IRUGO | S_IWUSR, bh1770_lux_calib_show,
bh1770_lux_calib_store);
static DEVICE_ATTR(lux0_calibscale_default, S_IRUGO,
bh1770_lux_calib_default_show,
NULL);
static DEVICE_ATTR(lux0_input, S_IRUGO, bh1770_lux_result_show, NULL);
static DEVICE_ATTR(lux0_sensor_range, S_IRUGO, bh1770_lux_range_show, NULL);
static DEVICE_ATTR(lux0_rate, S_IRUGO | S_IWUSR, bh1770_get_lux_rate,
bh1770_set_lux_rate);
static DEVICE_ATTR(lux0_rate_avail, S_IRUGO, bh1770_get_lux_rate_avail, NULL);
static DEVICE_ATTR(lux0_thresh_above_value, S_IRUGO | S_IWUSR,
bh1770_get_lux_thresh_above,
bh1770_set_lux_thresh_above);
static DEVICE_ATTR(lux0_thresh_below_value, S_IRUGO | S_IWUSR,
bh1770_get_lux_thresh_below,
bh1770_set_lux_thresh_below);
static DEVICE_ATTR(chip_id, S_IRUGO, bh1770_chip_id_show, NULL);
static DEVICE_ATTR(power_state, S_IRUGO | S_IWUSR, bh1770_power_state_show,
bh1770_power_state_store);
static struct attribute *sysfs_attrs[] = {
&dev_attr_lux0_calibscale.attr,
&dev_attr_lux0_calibscale_default.attr,
&dev_attr_lux0_input.attr,
&dev_attr_lux0_sensor_range.attr,
&dev_attr_lux0_rate.attr,
&dev_attr_lux0_rate_avail.attr,
&dev_attr_lux0_thresh_above_value.attr,
&dev_attr_lux0_thresh_below_value.attr,
&dev_attr_prox0_raw.attr,
&dev_attr_prox0_sensor_range.attr,
&dev_attr_prox0_raw_en.attr,
&dev_attr_prox0_thresh_above_count.attr,
&dev_attr_prox0_rate_above.attr,
&dev_attr_prox0_rate_below.attr,
&dev_attr_prox0_rate_avail.attr,
&dev_attr_prox0_thresh_above0_value.attr,
&dev_attr_prox0_thresh_above1_value.attr,
&dev_attr_chip_id.attr,
&dev_attr_power_state.attr,
NULL
};
static const struct attribute_group bh1770_attribute_group = {
.attrs = sysfs_attrs
};
static int bh1770_probe(struct i2c_client *client)
{
struct bh1770_chip *chip;
int err;
chip = devm_kzalloc(&client->dev, sizeof *chip, GFP_KERNEL);
if (!chip)
return -ENOMEM;
i2c_set_clientdata(client, chip);
chip->client = client;
mutex_init(&chip->mutex);
init_waitqueue_head(&chip->wait);
INIT_DELAYED_WORK(&chip->prox_work, bh1770_prox_work);
if (client->dev.platform_data == NULL) {
dev_err(&client->dev, "platform data is mandatory\n");
return -EINVAL;
}
chip->pdata = client->dev.platform_data;
chip->lux_calib = BH1770_LUX_NEUTRAL_CALIB_VALUE;
chip->lux_rate_index = BH1770_LUX_DEFAULT_RATE;
chip->lux_threshold_lo = BH1770_LUX_DEF_THRES;
chip->lux_threshold_hi = BH1770_LUX_DEF_THRES;
if (chip->pdata->glass_attenuation == 0)
chip->lux_ga = BH1770_NEUTRAL_GA;
else
chip->lux_ga = chip->pdata->glass_attenuation;
chip->prox_threshold = BH1770_PROX_DEF_THRES;
chip->prox_led = chip->pdata->led_def_curr;
chip->prox_abs_thres = BH1770_PROX_DEF_ABS_THRES;
chip->prox_persistence = BH1770_DEFAULT_PERSISTENCE;
chip->prox_rate_threshold = BH1770_PROX_DEF_RATE_THRESH;
chip->prox_rate = BH1770_PROX_DEFAULT_RATE;
chip->prox_data = 0;
chip->regs[0].supply = reg_vcc;
chip->regs[1].supply = reg_vleds;
err = devm_regulator_bulk_get(&client->dev,
ARRAY_SIZE(chip->regs), chip->regs);
if (err < 0) {
dev_err(&client->dev, "Cannot get regulators\n");
return err;
}
err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
chip->regs);
if (err < 0) {
dev_err(&client->dev, "Cannot enable regulators\n");
return err;
}
usleep_range(BH1770_STARTUP_DELAY, BH1770_STARTUP_DELAY * 2);
err = bh1770_detect(chip);
if (err < 0)
goto fail0;
/* Start chip */
bh1770_chip_on(chip);
pm_runtime_set_active(&client->dev);
pm_runtime_enable(&client->dev);
chip->lux_corr = bh1770_get_corr_value(chip);
if (chip->lux_corr == 0) {
dev_err(&client->dev, "Improper correction values\n");
err = -EINVAL;
goto fail0;
}
if (chip->pdata->setup_resources) {
err = chip->pdata->setup_resources();
if (err) {
err = -EINVAL;
goto fail0;
}
}
err = sysfs_create_group(&chip->client->dev.kobj,
&bh1770_attribute_group);
if (err < 0) {
dev_err(&chip->client->dev, "Sysfs registration failed\n");
goto fail1;
}
/*
* Chip needs level triggered interrupt to work. However,
* level triggering doesn't work always correctly with power
* management. Select both
*/
err = request_threaded_irq(client->irq, NULL,
bh1770_irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
IRQF_TRIGGER_LOW,
"bh1770", chip);
if (err) {
dev_err(&client->dev, "could not get IRQ %d\n",
client->irq);
goto fail2;
}
regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
return err;
fail2:
sysfs_remove_group(&chip->client->dev.kobj,
&bh1770_attribute_group);
fail1:
if (chip->pdata->release_resources)
chip->pdata->release_resources();
fail0:
regulator_bulk_disable(ARRAY_SIZE(chip->regs), chip->regs);
return err;
}
static void bh1770_remove(struct i2c_client *client)
{
struct bh1770_chip *chip = i2c_get_clientdata(client);
free_irq(client->irq, chip);
sysfs_remove_group(&chip->client->dev.kobj,
&bh1770_attribute_group);
if (chip->pdata->release_resources)
chip->pdata->release_resources();
cancel_delayed_work_sync(&chip->prox_work);
if (!pm_runtime_suspended(&client->dev))
bh1770_chip_off(chip);
pm_runtime_disable(&client->dev);
pm_runtime_set_suspended(&client->dev);
}
#ifdef CONFIG_PM_SLEEP
static int bh1770_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
bh1770_chip_off(chip);
return 0;
}
static int bh1770_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
int ret = 0;
bh1770_chip_on(chip);
if (!pm_runtime_suspended(dev)) {
/*
* If we were enabled at suspend time, it is expected
* everything works nice and smoothly
*/
ret = bh1770_lux_rate(chip, chip->lux_rate_index);
ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
/* This causes interrupt after the next measurement cycle */
bh1770_lux_update_thresholds(chip, BH1770_LUX_DEF_THRES,
BH1770_LUX_DEF_THRES);
/* Inform that we are waiting for a result from ALS */
chip->lux_wait_result = true;
bh1770_prox_mode_control(chip);
}
return ret;
}
#endif
#ifdef CONFIG_PM
static int bh1770_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
bh1770_chip_off(chip);
return 0;
}
static int bh1770_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct bh1770_chip *chip = i2c_get_clientdata(client);
bh1770_chip_on(chip);
return 0;
}
#endif
static const struct i2c_device_id bh1770_id[] = {
{"bh1770glc", 0 },
{"sfh7770", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, bh1770_id);
static const struct dev_pm_ops bh1770_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(bh1770_suspend, bh1770_resume)
SET_RUNTIME_PM_OPS(bh1770_runtime_suspend, bh1770_runtime_resume, NULL)
};
static struct i2c_driver bh1770_driver = {
.driver = {
.name = "bh1770glc",
.pm = &bh1770_pm_ops,
},
.probe = bh1770_probe,
.remove = bh1770_remove,
.id_table = bh1770_id,
};
module_i2c_driver(bh1770_driver);
MODULE_DESCRIPTION("BH1770GLC / SFH7770 combined ALS and proximity sensor");
MODULE_AUTHOR("Samu Onkalo, Nokia Corporation");
MODULE_LICENSE("GPL v2");
| linux-master | drivers/misc/bh1770glc.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Rui FENG <[email protected]>
* Wei WANG <[email protected]>
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/rtsx_pci.h>
#include "rts5261.h"
#include "rtsx_pcr.h"
static u8 rts5261_get_ic_version(struct rtsx_pcr *pcr)
{
u8 val;
rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
return val & IC_VERSION_MASK;
}
static void rts5261_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
{
u8 driving_3v3[4][3] = {
{0x96, 0x96, 0x96},
{0x96, 0x96, 0x96},
{0x7F, 0x7F, 0x7F},
{0x13, 0x13, 0x13},
};
u8 driving_1v8[4][3] = {
{0xB3, 0xB3, 0xB3},
{0x3A, 0x3A, 0x3A},
{0xE6, 0xE6, 0xE6},
{0x99, 0x99, 0x99},
};
u8 (*driving)[3], drive_sel;
if (voltage == OUTPUT_3V3) {
driving = driving_3v3;
drive_sel = pcr->sd30_drive_sel_3v3;
} else {
driving = driving_1v8;
drive_sel = pcr->sd30_drive_sel_1v8;
}
rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
0xFF, driving[drive_sel][0]);
rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
0xFF, driving[drive_sel][1]);
rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
0xFF, driving[drive_sel][2]);
}
static void rts5261_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
/* Set relink_time to 0 */
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
RELINK_TIME_MASK, 0);
if (pm_state == HOST_ENTER_S3)
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
if (!runtime) {
rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
CD_RESUME_EN_MASK, 0);
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
} else {
rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
FORCE_PM_CONTROL | FORCE_PM_VALUE, 0);
rtsx_pci_write_register(pcr, RTS5261_FW_CTL,
RTS5261_INFORM_RTD3_COLD, RTS5261_INFORM_RTD3_COLD);
rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4,
RTS5261_FORCE_PRSNT_LOW, RTS5261_FORCE_PRSNT_LOW);
}
rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
SSC_POWER_DOWN, SSC_POWER_DOWN);
}
static int rts5261_enable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL,
LED_SHINE_MASK, LED_SHINE_EN);
}
static int rts5261_disable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL,
LED_SHINE_MASK, LED_SHINE_DISABLE);
}
static int rts5261_turn_on_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, GPIO_CTL,
0x02, 0x02);
}
static int rts5261_turn_off_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, GPIO_CTL,
0x02, 0x00);
}
/* SD Pull Control Enable:
* SD_DAT[3:0] ==> pull up
* SD_CD ==> pull up
* SD_WP ==> pull up
* SD_CMD ==> pull up
* SD_CLK ==> pull down
*/
static const u32 rts5261_sd_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
0,
};
/* SD Pull Control Disable:
* SD_DAT[3:0] ==> pull down
* SD_CD ==> pull up
* SD_WP ==> pull down
* SD_CMD ==> pull down
* SD_CLK ==> pull down
*/
static const u32 rts5261_sd_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
0,
};
static int rts5261_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
{
rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
| SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
return 0;
}
static int rts5261_card_power_on(struct rtsx_pcr *pcr, int card)
{
struct rtsx_cr_option *option = &pcr->option;
if (option->ocp_en)
rtsx_pci_enable_ocp(pcr);
rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0,
CFG_SD_POW_AUTO_PD, CFG_SD_POW_AUTO_PD);
rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG1,
RTS5261_LDO1_TUNE_MASK, RTS5261_LDO1_33);
rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
RTS5261_LDO1_POWERON, RTS5261_LDO1_POWERON);
rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
RTS5261_LDO3318_POWERON, RTS5261_LDO3318_POWERON);
msleep(20);
rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
/* Initialize SD_CFG1 register */
rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT);
rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
0xFF, SD20_RX_POS_EDGE);
rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
SD_STOP | SD_CLR_ERR);
/* Reset SD_CFG3 register */
rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
SD30_CLK_STOP_CFG0, 0);
if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
rts5261_sd_set_sample_push_timing_sd30(pcr);
return 0;
}
static int rts5261_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
int err;
u16 val = 0;
rtsx_pci_write_register(pcr, RTS5261_CARD_PWR_CTL,
RTS5261_PUPDC, RTS5261_PUPDC);
switch (voltage) {
case OUTPUT_3V3:
rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
val |= PHY_TUNE_SDBUS_33;
err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
if (err < 0)
return err;
rtsx_pci_write_register(pcr, RTS5261_DV3318_CFG,
RTS5261_DV3318_TUNE_MASK, RTS5261_DV3318_33);
rtsx_pci_write_register(pcr, SD_PAD_CTL,
SD_IO_USING_1V8, 0);
break;
case OUTPUT_1V8:
rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
val &= ~PHY_TUNE_SDBUS_33;
err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
if (err < 0)
return err;
rtsx_pci_write_register(pcr, RTS5261_DV3318_CFG,
RTS5261_DV3318_TUNE_MASK, RTS5261_DV3318_18);
rtsx_pci_write_register(pcr, SD_PAD_CTL,
SD_IO_USING_1V8, SD_IO_USING_1V8);
break;
default:
return -EINVAL;
}
/* set pad drive */
rts5261_fill_driving(pcr, voltage);
return 0;
}
static void rts5261_stop_cmd(struct rtsx_pcr *pcr)
{
rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
RTS5260_DMA_RST | RTS5260_ADMA3_RST,
RTS5260_DMA_RST | RTS5260_ADMA3_RST);
rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
}
static void rts5261_card_before_power_off(struct rtsx_pcr *pcr)
{
rts5261_stop_cmd(pcr);
rts5261_switch_output_voltage(pcr, OUTPUT_3V3);
}
static void rts5261_enable_ocp(struct rtsx_pcr *pcr)
{
u8 val = 0;
val = SD_OCP_INT_EN | SD_DETECT_EN;
rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN,
RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN);
rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
}
static void rts5261_disable_ocp(struct rtsx_pcr *pcr)
{
u8 mask = 0;
mask = SD_OCP_INT_EN | SD_DETECT_EN;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, 0);
}
static int rts5261_card_power_off(struct rtsx_pcr *pcr, int card)
{
int err = 0;
rts5261_card_before_power_off(pcr);
err = rtsx_pci_write_register(pcr, RTS5261_LDO1233318_POW_CTL,
RTS5261_LDO_POWERON_MASK, 0);
rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0,
CFG_SD_POW_AUTO_PD, 0);
if (pcr->option.ocp_en)
rtsx_pci_disable_ocp(pcr);
return err;
}
static void rts5261_init_ocp(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &pcr->option;
if (option->ocp_en) {
u8 mask, val;
rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN,
RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN);
rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
RTS5261_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd);
rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
RTS5261_LDO1_OCP_LMT_THD_MASK,
RTS5261_LDO1_LMT_THD_2000);
mask = SD_OCP_GLITCH_MASK;
val = pcr->hw_param.ocp_glitch;
rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
rts5261_enable_ocp(pcr);
} else {
rtsx_pci_write_register(pcr, RTS5261_LDO1_CFG0,
RTS5261_LDO1_OCP_EN | RTS5261_LDO1_OCP_LMT_EN, 0);
}
}
static void rts5261_clear_ocpstat(struct rtsx_pcr *pcr)
{
u8 mask = 0;
u8 val = 0;
mask = SD_OCP_INT_CLR | SD_OC_CLR;
val = SD_OCP_INT_CLR | SD_OC_CLR;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
udelay(1000);
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
}
static void rts5261_process_ocp(struct rtsx_pcr *pcr)
{
if (!pcr->option.ocp_en)
return;
rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
rts5261_clear_ocpstat(pcr);
rts5261_card_power_off(pcr, RTSX_SD_CARD);
rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
pcr->ocp_stat = 0;
}
}
static void rts5261_init_from_hw(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
u32 lval1, lval2, i;
u16 setting_reg1, setting_reg2;
u8 valid, efuse_valid, tmp;
rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
REG_EFUSE_POR | REG_EFUSE_POWER_MASK,
REG_EFUSE_POR | REG_EFUSE_POWERON);
udelay(1);
rtsx_pci_write_register(pcr, RTS5261_EFUSE_ADDR,
RTS5261_EFUSE_ADDR_MASK, 0x00);
rtsx_pci_write_register(pcr, RTS5261_EFUSE_CTL,
RTS5261_EFUSE_ENABLE | RTS5261_EFUSE_MODE_MASK,
RTS5261_EFUSE_ENABLE);
/* Wait transfer end */
for (i = 0; i < MAX_RW_REG_CNT; i++) {
rtsx_pci_read_register(pcr, RTS5261_EFUSE_CTL, &tmp);
if ((tmp & 0x80) == 0)
break;
}
rtsx_pci_read_register(pcr, RTS5261_EFUSE_READ_DATA, &tmp);
efuse_valid = ((tmp & 0x0C) >> 2);
pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid);
pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval2);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, lval2);
/* 0x816 */
valid = (u8)((lval2 >> 16) & 0x03);
rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
REG_EFUSE_POR, 0);
pcr_dbg(pcr, "Disable efuse por!\n");
if (efuse_valid == 2 || efuse_valid == 3) {
if (valid == 3) {
/* Bypass efuse */
setting_reg1 = PCR_SETTING_REG1;
setting_reg2 = PCR_SETTING_REG2;
} else {
/* Use efuse data */
setting_reg1 = PCR_SETTING_REG4;
setting_reg2 = PCR_SETTING_REG5;
}
} else if (efuse_valid == 0) {
// default
setting_reg1 = PCR_SETTING_REG1;
setting_reg2 = PCR_SETTING_REG2;
} else {
return;
}
pci_read_config_dword(pdev, setting_reg2, &lval2);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg2, lval2);
if (!rts5261_vendor_setting_valid(lval2)) {
/* Not support MMC default */
pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
pcr_dbg(pcr, "skip fetch vendor setting\n");
return;
}
if (!rts5261_reg_check_mmc_support(lval2))
pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
pcr->rtd3_en = rts5261_reg_to_rtd3(lval2);
if (rts5261_reg_check_reverse_socket(lval2))
pcr->flags |= PCR_REVERSE_SOCKET;
pci_read_config_dword(pdev, setting_reg1, &lval1);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1);
pcr->aspm_en = rts5261_reg_to_aspm(lval1);
pcr->sd30_drive_sel_1v8 = rts5261_reg_to_sd30_drive_sel_1v8(lval1);
pcr->sd30_drive_sel_3v3 = rts5261_reg_to_sd30_drive_sel_3v3(lval1);
if (setting_reg1 == PCR_SETTING_REG1) {
/* store setting */
rtsx_pci_write_register(pcr, 0xFF0C, 0xFF, (u8)(lval1 & 0xFF));
rtsx_pci_write_register(pcr, 0xFF0D, 0xFF, (u8)((lval1 >> 8) & 0xFF));
rtsx_pci_write_register(pcr, 0xFF0E, 0xFF, (u8)((lval1 >> 16) & 0xFF));
rtsx_pci_write_register(pcr, 0xFF0F, 0xFF, (u8)((lval1 >> 24) & 0xFF));
rtsx_pci_write_register(pcr, 0xFF10, 0xFF, (u8)(lval2 & 0xFF));
rtsx_pci_write_register(pcr, 0xFF11, 0xFF, (u8)((lval2 >> 8) & 0xFF));
rtsx_pci_write_register(pcr, 0xFF12, 0xFF, (u8)((lval2 >> 16) & 0xFF));
pci_write_config_dword(pdev, PCR_SETTING_REG4, lval1);
lval2 = lval2 & 0x00FFFFFF;
pci_write_config_dword(pdev, PCR_SETTING_REG5, lval2);
}
}
static void rts5261_init_from_cfg(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
int l1ss;
u32 lval;
struct rtsx_cr_option *option = &pcr->option;
l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
if (!l1ss)
return;
pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
else
rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
else
rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
else
rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
else
rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
if (option->ltr_en) {
u16 val;
pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
} else {
option->ltr_enabled = false;
}
}
}
static int rts5261_extra_init_hw(struct rtsx_pcr *pcr)
{
u32 val;
rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1,
CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
rts5261_init_from_cfg(pcr);
rts5261_init_from_hw(pcr);
/* power off efuse */
rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF);
rtsx_pci_write_register(pcr, L1SUB_CONFIG1,
AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0);
if (is_version_higher_than(pcr, PID_5261, IC_VER_B)) {
val = rtsx_pci_readl(pcr, RTSX_DUM_REG);
rtsx_pci_writel(pcr, RTSX_DUM_REG, val | 0x1);
}
rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4,
RTS5261_AUX_CLK_16M_EN, 0);
/* Release PRSNT# */
rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG4,
RTS5261_FORCE_PRSNT_LOW, 0);
rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG);
rtsx_pci_write_register(pcr, PCLK_CTL,
PCLK_MODE_SEL, PCLK_MODE_SEL);
rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, CLK_PM_EN, CLK_PM_EN);
/* LED shine disabled, set initial shine cycle period */
rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02);
/* Configure driving */
rts5261_fill_driving(pcr, OUTPUT_3V3);
if (pcr->flags & PCR_REVERSE_SOCKET)
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30);
else
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
if (pcr->rtd3_en) {
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x01);
rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
FORCE_PM_CONTROL | FORCE_PM_VALUE,
FORCE_PM_CONTROL | FORCE_PM_VALUE);
} else {
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL,
FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
}
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, D3_DELINK_MODE_EN, 0x00);
/* Clear Enter RTD3_cold Information*/
rtsx_pci_write_register(pcr, RTS5261_FW_CTL,
RTS5261_INFORM_RTD3_COLD, 0);
return 0;
}
static void rts5261_enable_aspm(struct rtsx_pcr *pcr, bool enable)
{
u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
if (pcr->aspm_enabled == enable)
return;
val |= (pcr->aspm_en & 0x02);
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en);
pcr->aspm_enabled = enable;
}
static void rts5261_disable_aspm(struct rtsx_pcr *pcr, bool enable)
{
u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
if (pcr->aspm_enabled == enable)
return;
pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC, 0);
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
udelay(10);
pcr->aspm_enabled = enable;
}
static void rts5261_set_aspm(struct rtsx_pcr *pcr, bool enable)
{
if (enable)
rts5261_enable_aspm(pcr, true);
else
rts5261_disable_aspm(pcr, false);
}
static void rts5261_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
{
struct rtsx_cr_option *option = &pcr->option;
int aspm_L1_1, aspm_L1_2;
u8 val = 0;
aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
if (active) {
/* run, latency: 60us */
if (aspm_L1_1)
val = option->ltr_l1off_snooze_sspwrgate;
} else {
/* l1off, latency: 300us */
if (aspm_L1_2)
val = option->ltr_l1off_sspwrgate;
}
rtsx_set_l1off_sub(pcr, val);
}
static const struct pcr_ops rts5261_pcr_ops = {
.turn_on_led = rts5261_turn_on_led,
.turn_off_led = rts5261_turn_off_led,
.extra_init_hw = rts5261_extra_init_hw,
.enable_auto_blink = rts5261_enable_auto_blink,
.disable_auto_blink = rts5261_disable_auto_blink,
.card_power_on = rts5261_card_power_on,
.card_power_off = rts5261_card_power_off,
.switch_output_voltage = rts5261_switch_output_voltage,
.force_power_down = rts5261_force_power_down,
.stop_cmd = rts5261_stop_cmd,
.set_aspm = rts5261_set_aspm,
.set_l1off_cfg_sub_d0 = rts5261_set_l1off_cfg_sub_d0,
.enable_ocp = rts5261_enable_ocp,
.disable_ocp = rts5261_disable_ocp,
.init_ocp = rts5261_init_ocp,
.process_ocp = rts5261_process_ocp,
.clear_ocpstat = rts5261_clear_ocpstat,
};
static inline u8 double_ssc_depth(u8 depth)
{
return ((depth > 1) ? (depth - 1) : depth);
}
int rts5261_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
{
int err, clk;
u16 n;
u8 clk_divider, mcu_cnt, div;
static const u8 depth[] = {
[RTSX_SSC_DEPTH_4M] = RTS5261_SSC_DEPTH_4M,
[RTSX_SSC_DEPTH_2M] = RTS5261_SSC_DEPTH_2M,
[RTSX_SSC_DEPTH_1M] = RTS5261_SSC_DEPTH_1M,
[RTSX_SSC_DEPTH_500K] = RTS5261_SSC_DEPTH_512K,
};
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
if (is_version_higher_than(pcr, PID_5261, IC_VER_C)) {
clk_divider = SD_CLK_DIVIDE_256;
card_clock = 60000000;
} else {
clk_divider = SD_CLK_DIVIDE_128;
card_clock = 30000000;
}
} else {
clk_divider = SD_CLK_DIVIDE_0;
}
err = rtsx_pci_write_register(pcr, SD_CFG1,
SD_CLK_DIVIDE_MASK, clk_divider);
if (err < 0)
return err;
card_clock /= 1000000;
pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
clk = card_clock;
if (!initial_mode && double_clk)
clk = card_clock * 2;
pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
clk, pcr->cur_clock);
if (clk == pcr->cur_clock)
return 0;
if (pcr->ops->conv_clk_and_div_n)
n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
else
n = clk - 4;
if ((clk <= 4) || (n > 396))
return -EINVAL;
mcu_cnt = 125/clk + 3;
if (mcu_cnt > 15)
mcu_cnt = 15;
div = CLK_DIV_1;
while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) {
if (pcr->ops->conv_clk_and_div_n) {
int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
DIV_N_TO_CLK) * 2;
n = pcr->ops->conv_clk_and_div_n(dbl_clk,
CLK_TO_DIV_N);
} else {
n = (n + 4) * 2 - 4;
}
div++;
}
n = (n / 2) - 1;
pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
ssc_depth = depth[ssc_depth];
if (double_clk)
ssc_depth = double_ssc_depth(ssc_depth);
if (ssc_depth) {
if (div == CLK_DIV_2) {
if (ssc_depth > 1)
ssc_depth -= 1;
else
ssc_depth = RTS5261_SSC_DEPTH_8M;
} else if (div == CLK_DIV_4) {
if (ssc_depth > 2)
ssc_depth -= 2;
else
ssc_depth = RTS5261_SSC_DEPTH_8M;
} else if (div == CLK_DIV_8) {
if (ssc_depth > 3)
ssc_depth -= 3;
else
ssc_depth = RTS5261_SSC_DEPTH_8M;
}
} else {
ssc_depth = 0;
}
pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
CLK_LOW_FREQ, CLK_LOW_FREQ);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
0xFF, (div << 4) | mcu_cnt);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
SSC_DEPTH_MASK, ssc_depth);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
if (vpclk) {
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
PHASE_NOT_RESET, 0);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
PHASE_NOT_RESET, 0);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
}
err = rtsx_pci_send_cmd(pcr, 2000);
if (err < 0)
return err;
/* Wait SSC clock stable */
udelay(SSC_CLOCK_STABLE_WAIT);
err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
if (err < 0)
return err;
pcr->cur_clock = clk;
return 0;
}
void rts5261_init_params(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &pcr->option;
struct rtsx_hw_param *hw_param = &pcr->hw_param;
u8 val;
pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
rtsx_pci_read_register(pcr, RTS5261_FW_STATUS, &val);
if (!(val & RTS5261_EXPRESS_LINK_FAIL_MASK))
pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
pcr->num_slots = 1;
pcr->ops = &rts5261_pcr_ops;
pcr->flags = 0;
pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
pcr->sd30_drive_sel_1v8 = 0x00;
pcr->sd30_drive_sel_3v3 = 0x00;
pcr->aspm_en = ASPM_L1_EN;
pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
pcr->ic_version = rts5261_get_ic_version(pcr);
pcr->sd_pull_ctl_enable_tbl = rts5261_sd_pull_ctl_enable_tbl;
pcr->sd_pull_ctl_disable_tbl = rts5261_sd_pull_ctl_disable_tbl;
pcr->reg_pm_ctrl3 = RTS5261_AUTOLOAD_CFG3;
option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
| LTR_L1SS_PWR_GATE_EN);
option->ltr_en = true;
/* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
option->ltr_l1off_sspwrgate = 0x7F;
option->ltr_l1off_snooze_sspwrgate = 0x78;
option->ocp_en = 1;
hw_param->interrupt_en |= SD_OC_INT_EN;
hw_param->ocp_glitch = SD_OCP_GLITCH_800U;
option->sd_800mA_ocp_thd = RTS5261_LDO1_OCP_THD_1040;
}
| linux-master | drivers/misc/cardreader/rts5261.c |
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 Oleksij Rempel <[email protected]>
*
* Driver for Alcor Micro AU6601 and AU6621 controllers
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/alcor_pci.h>
#define DRV_NAME_ALCOR_PCI "alcor_pci"
static DEFINE_IDA(alcor_pci_idr);
static struct mfd_cell alcor_pci_cells[] = {
[ALCOR_SD_CARD] = {
.name = DRV_NAME_ALCOR_PCI_SDMMC,
},
[ALCOR_MS_CARD] = {
.name = DRV_NAME_ALCOR_PCI_MS,
},
};
static const struct alcor_dev_cfg alcor_cfg = {
.dma = 0,
};
static const struct alcor_dev_cfg au6621_cfg = {
.dma = 1,
};
static const struct alcor_dev_cfg au6625_cfg = {
.dma = 0,
};
static const struct pci_device_id pci_ids[] = {
{ PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6601),
.driver_data = (kernel_ulong_t)&alcor_cfg },
{ PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6621),
.driver_data = (kernel_ulong_t)&au6621_cfg },
{ PCI_DEVICE(PCI_ID_ALCOR_MICRO, PCI_ID_AU6625),
.driver_data = (kernel_ulong_t)&au6625_cfg },
{},
};
MODULE_DEVICE_TABLE(pci, pci_ids);
void alcor_write8(struct alcor_pci_priv *priv, u8 val, unsigned int addr)
{
writeb(val, priv->iobase + addr);
}
EXPORT_SYMBOL_GPL(alcor_write8);
void alcor_write16(struct alcor_pci_priv *priv, u16 val, unsigned int addr)
{
writew(val, priv->iobase + addr);
}
EXPORT_SYMBOL_GPL(alcor_write16);
void alcor_write32(struct alcor_pci_priv *priv, u32 val, unsigned int addr)
{
writel(val, priv->iobase + addr);
}
EXPORT_SYMBOL_GPL(alcor_write32);
void alcor_write32be(struct alcor_pci_priv *priv, u32 val, unsigned int addr)
{
iowrite32be(val, priv->iobase + addr);
}
EXPORT_SYMBOL_GPL(alcor_write32be);
u8 alcor_read8(struct alcor_pci_priv *priv, unsigned int addr)
{
return readb(priv->iobase + addr);
}
EXPORT_SYMBOL_GPL(alcor_read8);
u32 alcor_read32(struct alcor_pci_priv *priv, unsigned int addr)
{
return readl(priv->iobase + addr);
}
EXPORT_SYMBOL_GPL(alcor_read32);
u32 alcor_read32be(struct alcor_pci_priv *priv, unsigned int addr)
{
return ioread32be(priv->iobase + addr);
}
EXPORT_SYMBOL_GPL(alcor_read32be);
static int alcor_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct alcor_dev_cfg *cfg;
struct alcor_pci_priv *priv;
int ret, i, bar = 0;
cfg = (void *)ent->driver_data;
ret = pcim_enable_device(pdev);
if (ret)
return ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
ret = ida_alloc(&alcor_pci_idr, GFP_KERNEL);
if (ret < 0)
return ret;
priv->id = ret;
priv->pdev = pdev;
priv->parent_pdev = pdev->bus->self;
priv->dev = &pdev->dev;
priv->cfg = cfg;
priv->irq = pdev->irq;
ret = pci_request_regions(pdev, DRV_NAME_ALCOR_PCI);
if (ret) {
dev_err(&pdev->dev, "Cannot request region\n");
ret = -ENOMEM;
goto error_free_ida;
}
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
ret = -ENODEV;
goto error_release_regions;
}
priv->iobase = pcim_iomap(pdev, bar, 0);
if (!priv->iobase) {
ret = -ENOMEM;
goto error_release_regions;
}
/* make sure irqs are disabled */
alcor_write32(priv, 0, AU6601_REG_INT_ENABLE);
alcor_write32(priv, 0, AU6601_MS_INT_ENABLE);
ret = dma_set_mask_and_coherent(priv->dev, AU6601_SDMA_MASK);
if (ret) {
dev_err(priv->dev, "Failed to set DMA mask\n");
goto error_release_regions;
}
pci_set_master(pdev);
pci_set_drvdata(pdev, priv);
for (i = 0; i < ARRAY_SIZE(alcor_pci_cells); i++) {
alcor_pci_cells[i].platform_data = priv;
alcor_pci_cells[i].pdata_size = sizeof(*priv);
}
ret = mfd_add_devices(&pdev->dev, priv->id, alcor_pci_cells,
ARRAY_SIZE(alcor_pci_cells), NULL, 0, NULL);
if (ret < 0)
goto error_clear_drvdata;
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
return 0;
error_clear_drvdata:
pci_clear_master(pdev);
pci_set_drvdata(pdev, NULL);
error_release_regions:
pci_release_regions(pdev);
error_free_ida:
ida_free(&alcor_pci_idr, priv->id);
return ret;
}
static void alcor_pci_remove(struct pci_dev *pdev)
{
struct alcor_pci_priv *priv;
priv = pci_get_drvdata(pdev);
mfd_remove_devices(&pdev->dev);
ida_free(&alcor_pci_idr, priv->id);
pci_release_regions(pdev);
pci_clear_master(pdev);
pci_set_drvdata(pdev, NULL);
}
#ifdef CONFIG_PM_SLEEP
static int alcor_suspend(struct device *dev)
{
return 0;
}
static int alcor_resume(struct device *dev)
{
struct alcor_pci_priv *priv = dev_get_drvdata(dev);
pci_disable_link_state(priv->pdev,
PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
return 0;
}
#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(alcor_pci_pm_ops, alcor_suspend, alcor_resume);
static struct pci_driver alcor_driver = {
.name = DRV_NAME_ALCOR_PCI,
.id_table = pci_ids,
.probe = alcor_pci_probe,
.remove = alcor_pci_remove,
.driver = {
.pm = &alcor_pci_pm_ops
},
};
module_pci_driver(alcor_driver);
MODULE_AUTHOR("Oleksij Rempel <[email protected]>");
MODULE_DESCRIPTION("PCI driver for Alcor Micro AU6601 Secure Digital Host Controller Interface");
MODULE_LICENSE("GPL");
| linux-master | drivers/misc/cardreader/alcor_pci.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Ricky WU <[email protected]>
* Rui FENG <[email protected]>
* Wei WANG <[email protected]>
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/rtsx_pci.h>
#include "rts5228.h"
#include "rtsx_pcr.h"
static u8 rts5228_get_ic_version(struct rtsx_pcr *pcr)
{
u8 val;
rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
return val & IC_VERSION_MASK;
}
static void rts5228_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
{
u8 driving_3v3[4][3] = {
{0x13, 0x13, 0x13},
{0x96, 0x96, 0x96},
{0x7F, 0x7F, 0x7F},
{0x96, 0x96, 0x96},
};
u8 driving_1v8[4][3] = {
{0x99, 0x99, 0x99},
{0xB5, 0xB5, 0xB5},
{0xE6, 0x7E, 0xFE},
{0x6B, 0x6B, 0x6B},
};
u8 (*driving)[3], drive_sel;
if (voltage == OUTPUT_3V3) {
driving = driving_3v3;
drive_sel = pcr->sd30_drive_sel_3v3;
} else {
driving = driving_1v8;
drive_sel = pcr->sd30_drive_sel_1v8;
}
rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
0xFF, driving[drive_sel][0]);
rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
0xFF, driving[drive_sel][1]);
rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
0xFF, driving[drive_sel][2]);
}
static void rtsx5228_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
u32 reg;
/* 0x724~0x727 */
pci_read_config_dword(pdev, PCR_SETTING_REG1, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg)) {
pcr_dbg(pcr, "skip fetch vendor setting\n");
return;
}
pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
pcr->aspm_en = rtsx_reg_to_aspm(reg);
/* 0x814~0x817 */
pci_read_config_dword(pdev, PCR_SETTING_REG2, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->rtd3_en = rtsx_reg_to_rtd3(reg);
if (rtsx_check_mmc_support(reg))
pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
}
static int rts5228_optimize_phy(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_phy_register(pcr, 0x07, 0x8F40);
}
static void rts5228_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
/* Set relink_time to 0 */
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
RELINK_TIME_MASK, 0);
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
if (!runtime) {
rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
CD_RESUME_EN_MASK, 0);
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL,
FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
}
rtsx_pci_write_register(pcr, FPDCTL,
SSC_POWER_DOWN, SSC_POWER_DOWN);
}
static int rts5228_enable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL,
LED_SHINE_MASK, LED_SHINE_EN);
}
static int rts5228_disable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL,
LED_SHINE_MASK, LED_SHINE_DISABLE);
}
static int rts5228_turn_on_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, GPIO_CTL,
0x02, 0x02);
}
static int rts5228_turn_off_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, GPIO_CTL,
0x02, 0x00);
}
/* SD Pull Control Enable:
* SD_DAT[3:0] ==> pull up
* SD_CD ==> pull up
* SD_WP ==> pull up
* SD_CMD ==> pull up
* SD_CLK ==> pull down
*/
static const u32 rts5228_sd_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
0,
};
/* SD Pull Control Disable:
* SD_DAT[3:0] ==> pull down
* SD_CD ==> pull up
* SD_WP ==> pull down
* SD_CMD ==> pull down
* SD_CLK ==> pull down
*/
static const u32 rts5228_sd_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
0,
};
static int rts5228_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
{
rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
| SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
return 0;
}
static int rts5228_card_power_on(struct rtsx_pcr *pcr, int card)
{
struct rtsx_cr_option *option = &pcr->option;
if (option->ocp_en)
rtsx_pci_enable_ocp(pcr);
rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0,
CFG_SD_POW_AUTO_PD, CFG_SD_POW_AUTO_PD);
rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1,
RTS5228_LDO1_TUNE_MASK, RTS5228_LDO1_33);
rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
RTS5228_LDO1_POWERON_MASK, RTS5228_LDO1_SOFTSTART);
mdelay(2);
rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
RTS5228_LDO1_POWERON_MASK, RTS5228_LDO1_FULLON);
rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
RTS5228_LDO3318_POWERON, RTS5228_LDO3318_POWERON);
msleep(20);
rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN);
/* Initialize SD_CFG1 register */
rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT);
rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
0xFF, SD20_RX_POS_EDGE);
rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
SD_STOP | SD_CLR_ERR);
/* Reset SD_CFG3 register */
rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
SD30_CLK_STOP_CFG0, 0);
if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
rts5228_sd_set_sample_push_timing_sd30(pcr);
return 0;
}
static int rts5228_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
int err;
u16 val = 0;
rtsx_pci_write_register(pcr, RTS5228_CARD_PWR_CTL,
RTS5228_PUPDC, RTS5228_PUPDC);
switch (voltage) {
case OUTPUT_3V3:
rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
val |= PHY_TUNE_SDBUS_33;
err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
if (err < 0)
return err;
rtsx_pci_write_register(pcr, RTS5228_DV3318_CFG,
RTS5228_DV3318_TUNE_MASK, RTS5228_DV3318_33);
rtsx_pci_write_register(pcr, SD_PAD_CTL,
SD_IO_USING_1V8, 0);
break;
case OUTPUT_1V8:
rtsx_pci_read_phy_register(pcr, PHY_TUNE, &val);
val &= ~PHY_TUNE_SDBUS_33;
err = rtsx_pci_write_phy_register(pcr, PHY_TUNE, val);
if (err < 0)
return err;
rtsx_pci_write_register(pcr, RTS5228_DV3318_CFG,
RTS5228_DV3318_TUNE_MASK, RTS5228_DV3318_18);
rtsx_pci_write_register(pcr, SD_PAD_CTL,
SD_IO_USING_1V8, SD_IO_USING_1V8);
break;
default:
return -EINVAL;
}
/* set pad drive */
rts5228_fill_driving(pcr, voltage);
return 0;
}
static void rts5228_stop_cmd(struct rtsx_pcr *pcr)
{
rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
RTS5260_DMA_RST | RTS5260_ADMA3_RST,
RTS5260_DMA_RST | RTS5260_ADMA3_RST);
rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
}
static void rts5228_card_before_power_off(struct rtsx_pcr *pcr)
{
rts5228_stop_cmd(pcr);
rts5228_switch_output_voltage(pcr, OUTPUT_3V3);
}
static void rts5228_enable_ocp(struct rtsx_pcr *pcr)
{
u8 val = 0;
val = SD_OCP_INT_EN | SD_DETECT_EN;
rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN,
RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN);
}
static void rts5228_disable_ocp(struct rtsx_pcr *pcr)
{
u8 mask = 0;
mask = SD_OCP_INT_EN | SD_DETECT_EN;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, 0);
}
static int rts5228_card_power_off(struct rtsx_pcr *pcr, int card)
{
int err = 0;
rts5228_card_before_power_off(pcr);
err = rtsx_pci_write_register(pcr, RTS5228_LDO1233318_POW_CTL,
RTS5228_LDO_POWERON_MASK, 0);
rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0, CFG_SD_POW_AUTO_PD, 0);
if (pcr->option.ocp_en)
rtsx_pci_disable_ocp(pcr);
return err;
}
static void rts5228_init_ocp(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &pcr->option;
if (option->ocp_en) {
u8 mask, val;
rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN,
RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN);
rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
RTS5228_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd);
rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
RTS5228_LDO1_OCP_LMT_THD_MASK,
RTS5228_LDO1_LMT_THD_1500);
rtsx_pci_read_register(pcr, RTS5228_LDO1_CFG0, &val);
mask = SD_OCP_GLITCH_MASK;
val = pcr->hw_param.ocp_glitch;
rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
rts5228_enable_ocp(pcr);
} else {
rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG0,
RTS5228_LDO1_OCP_EN | RTS5228_LDO1_OCP_LMT_EN, 0);
}
}
static void rts5228_clear_ocpstat(struct rtsx_pcr *pcr)
{
u8 mask = 0;
u8 val = 0;
mask = SD_OCP_INT_CLR | SD_OC_CLR;
val = SD_OCP_INT_CLR | SD_OC_CLR;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
udelay(1000);
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
}
static void rts5228_process_ocp(struct rtsx_pcr *pcr)
{
if (!pcr->option.ocp_en)
return;
rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
rts5228_clear_ocpstat(pcr);
rts5228_card_power_off(pcr, RTSX_SD_CARD);
rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
pcr->ocp_stat = 0;
}
}
static void rts5228_init_from_cfg(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
int l1ss;
u32 lval;
struct rtsx_cr_option *option = &pcr->option;
l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
if (!l1ss)
return;
pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
if (0 == (lval & 0x0F))
rtsx_pci_enable_oobs_polling(pcr);
else
rtsx_pci_disable_oobs_polling(pcr);
if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
else
rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
else
rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
else
rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
else
rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
if (option->ltr_en) {
u16 val;
pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
} else {
option->ltr_enabled = false;
}
}
}
static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
{
rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1,
CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
rts5228_init_from_cfg(pcr);
rtsx_pci_write_register(pcr, L1SUB_CONFIG1,
AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0);
rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
FUNC_FORCE_UPME_XMT_DBG, FUNC_FORCE_UPME_XMT_DBG);
rtsx_pci_write_register(pcr, PCLK_CTL,
PCLK_MODE_SEL, PCLK_MODE_SEL);
rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, CLK_PM_EN, CLK_PM_EN);
/* LED shine disabled, set initial shine cycle period */
rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02);
/* Configure driving */
rts5228_fill_driving(pcr, OUTPUT_3V3);
if (pcr->flags & PCR_REVERSE_SOCKET)
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30);
else
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB);
if (pcr->rtd3_en) {
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x01);
rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL,
FORCE_PM_CONTROL | FORCE_PM_VALUE,
FORCE_PM_CONTROL | FORCE_PM_VALUE);
} else {
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00);
rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL,
FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL);
}
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, D3_DELINK_MODE_EN, 0x00);
return 0;
}
static void rts5228_enable_aspm(struct rtsx_pcr *pcr, bool enable)
{
u8 mask, val;
if (pcr->aspm_enabled == enable)
return;
mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
val |= (pcr->aspm_en & 0x02);
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en);
pcr->aspm_enabled = enable;
}
static void rts5228_disable_aspm(struct rtsx_pcr *pcr, bool enable)
{
u8 mask, val;
if (pcr->aspm_enabled == enable)
return;
pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC, 0);
mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1;
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val);
rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0);
mdelay(10);
pcr->aspm_enabled = enable;
}
static void rts5228_set_aspm(struct rtsx_pcr *pcr, bool enable)
{
if (enable)
rts5228_enable_aspm(pcr, true);
else
rts5228_disable_aspm(pcr, false);
}
static void rts5228_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
{
struct rtsx_cr_option *option = &pcr->option;
int aspm_L1_1, aspm_L1_2;
u8 val = 0;
aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
if (active) {
/* run, latency: 60us */
if (aspm_L1_1)
val = option->ltr_l1off_snooze_sspwrgate;
} else {
/* l1off, latency: 300us */
if (aspm_L1_2)
val = option->ltr_l1off_sspwrgate;
}
rtsx_set_l1off_sub(pcr, val);
}
static const struct pcr_ops rts5228_pcr_ops = {
.fetch_vendor_settings = rtsx5228_fetch_vendor_settings,
.turn_on_led = rts5228_turn_on_led,
.turn_off_led = rts5228_turn_off_led,
.extra_init_hw = rts5228_extra_init_hw,
.enable_auto_blink = rts5228_enable_auto_blink,
.disable_auto_blink = rts5228_disable_auto_blink,
.card_power_on = rts5228_card_power_on,
.card_power_off = rts5228_card_power_off,
.switch_output_voltage = rts5228_switch_output_voltage,
.force_power_down = rts5228_force_power_down,
.stop_cmd = rts5228_stop_cmd,
.set_aspm = rts5228_set_aspm,
.set_l1off_cfg_sub_d0 = rts5228_set_l1off_cfg_sub_d0,
.enable_ocp = rts5228_enable_ocp,
.disable_ocp = rts5228_disable_ocp,
.init_ocp = rts5228_init_ocp,
.process_ocp = rts5228_process_ocp,
.clear_ocpstat = rts5228_clear_ocpstat,
.optimize_phy = rts5228_optimize_phy,
};
static inline u8 double_ssc_depth(u8 depth)
{
return ((depth > 1) ? (depth - 1) : depth);
}
int rts5228_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
{
int err, clk;
u16 n;
u8 clk_divider, mcu_cnt, div;
static const u8 depth[] = {
[RTSX_SSC_DEPTH_4M] = RTS5228_SSC_DEPTH_4M,
[RTSX_SSC_DEPTH_2M] = RTS5228_SSC_DEPTH_2M,
[RTSX_SSC_DEPTH_1M] = RTS5228_SSC_DEPTH_1M,
[RTSX_SSC_DEPTH_500K] = RTS5228_SSC_DEPTH_512K,
};
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
clk_divider = SD_CLK_DIVIDE_128;
card_clock = 30000000;
} else {
clk_divider = SD_CLK_DIVIDE_0;
}
err = rtsx_pci_write_register(pcr, SD_CFG1,
SD_CLK_DIVIDE_MASK, clk_divider);
if (err < 0)
return err;
card_clock /= 1000000;
pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
clk = card_clock;
if (!initial_mode && double_clk)
clk = card_clock * 2;
pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
clk, pcr->cur_clock);
if (clk == pcr->cur_clock)
return 0;
if (pcr->ops->conv_clk_and_div_n)
n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
else
n = clk - 4;
if ((clk <= 4) || (n > 396))
return -EINVAL;
mcu_cnt = 125/clk + 3;
if (mcu_cnt > 15)
mcu_cnt = 15;
div = CLK_DIV_1;
while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) {
if (pcr->ops->conv_clk_and_div_n) {
int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
DIV_N_TO_CLK) * 2;
n = pcr->ops->conv_clk_and_div_n(dbl_clk,
CLK_TO_DIV_N);
} else {
n = (n + 4) * 2 - 4;
}
div++;
}
n = (n / 2) - 1;
pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
ssc_depth = depth[ssc_depth];
if (double_clk)
ssc_depth = double_ssc_depth(ssc_depth);
if (ssc_depth) {
if (div == CLK_DIV_2) {
if (ssc_depth > 1)
ssc_depth -= 1;
else
ssc_depth = RTS5228_SSC_DEPTH_8M;
} else if (div == CLK_DIV_4) {
if (ssc_depth > 2)
ssc_depth -= 2;
else
ssc_depth = RTS5228_SSC_DEPTH_8M;
} else if (div == CLK_DIV_8) {
if (ssc_depth > 3)
ssc_depth -= 3;
else
ssc_depth = RTS5228_SSC_DEPTH_8M;
}
} else {
ssc_depth = 0;
}
pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
CLK_LOW_FREQ, CLK_LOW_FREQ);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
0xFF, (div << 4) | mcu_cnt);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
SSC_DEPTH_MASK, ssc_depth);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
if (vpclk) {
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
PHASE_NOT_RESET, 0);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
PHASE_NOT_RESET, 0);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
}
err = rtsx_pci_send_cmd(pcr, 2000);
if (err < 0)
return err;
/* Wait SSC clock stable */
udelay(SSC_CLOCK_STABLE_WAIT);
err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
if (err < 0)
return err;
pcr->cur_clock = clk;
return 0;
}
void rts5228_init_params(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &pcr->option;
struct rtsx_hw_param *hw_param = &pcr->hw_param;
pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
pcr->num_slots = 1;
pcr->ops = &rts5228_pcr_ops;
pcr->flags = 0;
pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(28, 27, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
pcr->ic_version = rts5228_get_ic_version(pcr);
pcr->sd_pull_ctl_enable_tbl = rts5228_sd_pull_ctl_enable_tbl;
pcr->sd_pull_ctl_disable_tbl = rts5228_sd_pull_ctl_disable_tbl;
pcr->reg_pm_ctrl3 = RTS5228_AUTOLOAD_CFG3;
option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
| LTR_L1SS_PWR_GATE_EN);
option->ltr_en = true;
/* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
option->ltr_l1off_sspwrgate = 0x7F;
option->ltr_l1off_snooze_sspwrgate = 0x78;
option->ocp_en = 1;
hw_param->interrupt_en |= SD_OC_INT_EN;
hw_param->ocp_glitch = SD_OCP_GLITCH_800U;
option->sd_800mA_ocp_thd = RTS5228_LDO1_OCP_THD_930;
}
| linux-master | drivers/misc/cardreader/rts5228.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG <[email protected]>
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
static u8 rts5249_get_ic_version(struct rtsx_pcr *pcr)
{
u8 val;
rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
return val & 0x0F;
}
static void rts5249_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
{
u8 driving_3v3[4][3] = {
{0x11, 0x11, 0x18},
{0x55, 0x55, 0x5C},
{0xFF, 0xFF, 0xFF},
{0x96, 0x96, 0x96},
};
u8 driving_1v8[4][3] = {
{0xC4, 0xC4, 0xC4},
{0x3C, 0x3C, 0x3C},
{0xFE, 0xFE, 0xFE},
{0xB3, 0xB3, 0xB3},
};
u8 (*driving)[3], drive_sel;
if (voltage == OUTPUT_3V3) {
driving = driving_3v3;
drive_sel = pcr->sd30_drive_sel_3v3;
} else {
driving = driving_1v8;
drive_sel = pcr->sd30_drive_sel_1v8;
}
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
0xFF, driving[drive_sel][0]);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
0xFF, driving[drive_sel][1]);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
0xFF, driving[drive_sel][2]);
}
static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
u32 reg;
pci_read_config_dword(pdev, PCR_SETTING_REG1, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg)) {
pcr_dbg(pcr, "skip fetch vendor setting\n");
return;
}
pcr->aspm_en = rtsx_reg_to_aspm(reg);
pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
pci_read_config_dword(pdev, PCR_SETTING_REG2, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A))
pcr->rtd3_en = rtsx_reg_to_rtd3_uhsii(reg);
if (rtsx_check_mmc_support(reg))
pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
}
static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
int l1ss;
struct rtsx_cr_option *option = &(pcr->option);
u32 lval;
l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
if (!l1ss)
return;
pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
if (0 == (lval & 0x0F))
rtsx_pci_enable_oobs_polling(pcr);
else
rtsx_pci_disable_oobs_polling(pcr);
}
if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
if (option->ltr_en) {
u16 val;
pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
} else {
option->ltr_enabled = false;
}
}
}
static int rts5249_init_from_hw(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &(pcr->option);
if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
| PM_L1_1_EN | PM_L1_2_EN))
option->force_clkreq_0 = false;
else
option->force_clkreq_0 = true;
return 0;
}
static void rts52xa_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
/* Set relink_time to 0 */
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
RELINK_TIME_MASK, 0);
rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3,
D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
if (!runtime) {
rtsx_pci_write_register(pcr, RTS524A_AUTOLOAD_CFG1,
CD_RESUME_EN_MASK, 0);
rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00);
rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20);
}
rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
}
static void rts52xa_save_content_from_efuse(struct rtsx_pcr *pcr)
{
u8 cnt, sv;
u16 j = 0;
u8 tmp;
u8 val;
int i;
rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL,
REG_EFUSE_BYPASS | REG_EFUSE_POR, REG_EFUSE_POR);
udelay(1);
pcr_dbg(pcr, "Enable efuse por!");
pcr_dbg(pcr, "save efuse to autoload");
rtsx_pci_write_register(pcr, RTS525A_EFUSE_ADD, REG_EFUSE_ADD_MASK, 0x00);
rtsx_pci_write_register(pcr, RTS525A_EFUSE_CTL,
REG_EFUSE_ENABLE | REG_EFUSE_MODE, REG_EFUSE_ENABLE);
/* Wait transfer end */
for (j = 0; j < 1024; j++) {
rtsx_pci_read_register(pcr, RTS525A_EFUSE_CTL, &tmp);
if ((tmp & 0x80) == 0)
break;
}
rtsx_pci_read_register(pcr, RTS525A_EFUSE_DATA, &val);
cnt = val & 0x0F;
sv = val & 0x10;
if (sv) {
for (i = 0; i < 4; i++) {
rtsx_pci_write_register(pcr, RTS525A_EFUSE_ADD,
REG_EFUSE_ADD_MASK, 0x04 + i);
rtsx_pci_write_register(pcr, RTS525A_EFUSE_CTL,
REG_EFUSE_ENABLE | REG_EFUSE_MODE, REG_EFUSE_ENABLE);
/* Wait transfer end */
for (j = 0; j < 1024; j++) {
rtsx_pci_read_register(pcr, RTS525A_EFUSE_CTL, &tmp);
if ((tmp & 0x80) == 0)
break;
}
rtsx_pci_read_register(pcr, RTS525A_EFUSE_DATA, &val);
rtsx_pci_write_register(pcr, 0xFF04 + i, 0xFF, val);
}
} else {
rtsx_pci_write_register(pcr, 0xFF04, 0xFF, (u8)PCI_VID(pcr));
rtsx_pci_write_register(pcr, 0xFF05, 0xFF, (u8)(PCI_VID(pcr) >> 8));
rtsx_pci_write_register(pcr, 0xFF06, 0xFF, (u8)PCI_PID(pcr));
rtsx_pci_write_register(pcr, 0xFF07, 0xFF, (u8)(PCI_PID(pcr) >> 8));
}
for (i = 0; i < cnt * 4; i++) {
if (sv)
rtsx_pci_write_register(pcr, RTS525A_EFUSE_ADD,
REG_EFUSE_ADD_MASK, 0x08 + i);
else
rtsx_pci_write_register(pcr, RTS525A_EFUSE_ADD,
REG_EFUSE_ADD_MASK, 0x04 + i);
rtsx_pci_write_register(pcr, RTS525A_EFUSE_CTL,
REG_EFUSE_ENABLE | REG_EFUSE_MODE, REG_EFUSE_ENABLE);
/* Wait transfer end */
for (j = 0; j < 1024; j++) {
rtsx_pci_read_register(pcr, RTS525A_EFUSE_CTL, &tmp);
if ((tmp & 0x80) == 0)
break;
}
rtsx_pci_read_register(pcr, RTS525A_EFUSE_DATA, &val);
rtsx_pci_write_register(pcr, 0xFF08 + i, 0xFF, val);
}
rtsx_pci_write_register(pcr, 0xFF00, 0xFF, (cnt & 0x7F) | 0x80);
rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL,
REG_EFUSE_BYPASS | REG_EFUSE_POR, REG_EFUSE_BYPASS);
pcr_dbg(pcr, "Disable efuse por!");
}
static void rts52xa_save_content_to_autoload_space(struct rtsx_pcr *pcr)
{
u8 val;
rtsx_pci_read_register(pcr, RESET_LOAD_REG, &val);
if (val & 0x02) {
rtsx_pci_read_register(pcr, RTS525A_BIOS_CFG, &val);
if (val & RTS525A_LOAD_BIOS_FLAG) {
rtsx_pci_write_register(pcr, RTS525A_BIOS_CFG,
RTS525A_LOAD_BIOS_FLAG, RTS525A_CLEAR_BIOS_FLAG);
rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL,
REG_EFUSE_POWER_MASK, REG_EFUSE_POWERON);
pcr_dbg(pcr, "Power ON efuse!");
mdelay(1);
rts52xa_save_content_from_efuse(pcr);
} else {
rtsx_pci_read_register(pcr, RTS524A_PME_FORCE_CTL, &val);
if (!(val & 0x08))
rts52xa_save_content_from_efuse(pcr);
}
} else {
pcr_dbg(pcr, "Load from autoload");
rtsx_pci_write_register(pcr, 0xFF00, 0xFF, 0x80);
rtsx_pci_write_register(pcr, 0xFF04, 0xFF, (u8)PCI_VID(pcr));
rtsx_pci_write_register(pcr, 0xFF05, 0xFF, (u8)(PCI_VID(pcr) >> 8));
rtsx_pci_write_register(pcr, 0xFF06, 0xFF, (u8)PCI_PID(pcr));
rtsx_pci_write_register(pcr, 0xFF07, 0xFF, (u8)(PCI_PID(pcr) >> 8));
}
}
static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &(pcr->option);
rts5249_init_from_cfg(pcr);
rts5249_init_from_hw(pcr);
rtsx_pci_init_cmd(pcr);
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A))
rts52xa_save_content_to_autoload_space(pcr);
/* Rest L1SUB Config */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00);
/* Configure GPIO as output */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
/* Reset ASPM state to default value */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
/* Switch LDO3318 source from DV33 to card_3v3 */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
/* LED shine disabled, set initial shine cycle period */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
/* Configure driving */
rts5249_fill_driving(pcr, OUTPUT_3V3);
if (pcr->flags & PCR_REVERSE_SOCKET)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN);
rtsx_pci_write_register(pcr, RTS524A_AUTOLOAD_CFG1,
CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
}
if (pcr->rtd3_en) {
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x01);
rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x30);
} else {
rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x01);
rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x33);
}
} else {
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3, 0x01, 0x00);
rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL, 0x30, 0x20);
} else {
rtsx_pci_write_register(pcr, PME_FORCE_CTL, 0xFF, 0x30);
rtsx_pci_write_register(pcr, PM_CTRL3, 0x01, 0x00);
}
}
/*
* If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
* to drive low, and we forcibly request clock.
*/
if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
rtsx_pci_write_register(pcr, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
rtsx_pci_write_register(pcr, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
if (CHK_PCI_PID(pcr, PID_524A) || CHK_PCI_PID(pcr, PID_525A)) {
rtsx_pci_write_register(pcr, RTS524A_PME_FORCE_CTL,
REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF);
pcr_dbg(pcr, "Power OFF efuse!");
}
return 0;
}
static int rts5249_optimize_phy(struct rtsx_pcr *pcr)
{
int err;
err = rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, PHY_REV,
PHY_REV_RESV | PHY_REV_RXIDLE_LATCHED |
PHY_REV_P1_EN | PHY_REV_RXIDLE_EN |
PHY_REV_CLKREQ_TX_EN | PHY_REV_RX_PWST |
PHY_REV_CLKREQ_DT_1_0 | PHY_REV_STOP_CLKRD |
PHY_REV_STOP_CLKWR);
if (err < 0)
return err;
msleep(1);
err = rtsx_pci_write_phy_register(pcr, PHY_BPCR,
PHY_BPCR_IBRXSEL | PHY_BPCR_IBTXSEL |
PHY_BPCR_IB_FILTER | PHY_BPCR_CMIRROR_EN);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, PHY_PCR,
PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 |
PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 |
PHY_PCR_RSSI_EN | PHY_PCR_RX10K);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, PHY_RCR2,
PHY_RCR2_EMPHASE_EN | PHY_RCR2_NADJR |
PHY_RCR2_CDR_SR_2 | PHY_RCR2_FREQSEL_12 |
PHY_RCR2_CDR_SC_12P | PHY_RCR2_CALIB_LATE);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, PHY_FLD4,
PHY_FLD4_FLDEN_SEL | PHY_FLD4_REQ_REF |
PHY_FLD4_RXAMP_OFF | PHY_FLD4_REQ_ADDA |
PHY_FLD4_BER_COUNT | PHY_FLD4_BER_TIMER |
PHY_FLD4_BER_CHK_EN);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, PHY_RDR,
PHY_RDR_RXDSEL_1_9 | PHY_SSC_AUTO_PWD);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, PHY_RCR1,
PHY_RCR1_ADP_TIME_4 | PHY_RCR1_VCO_COARSE);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, PHY_FLD3,
PHY_FLD3_TIMER_4 | PHY_FLD3_TIMER_6 |
PHY_FLD3_RXDELINK);
if (err < 0)
return err;
return rtsx_pci_write_phy_register(pcr, PHY_TUNE,
PHY_TUNE_TUNEREF_1_0 | PHY_TUNE_VBGSEL_1252 |
PHY_TUNE_SDBUS_33 | PHY_TUNE_TUNED18 |
PHY_TUNE_TUNED12 | PHY_TUNE_TUNEA12);
}
static int rtsx_base_turn_on_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
}
static int rtsx_base_turn_off_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
}
static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
}
static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
}
static int rtsx_base_card_power_on(struct rtsx_pcr *pcr, int card)
{
int err;
struct rtsx_cr_option *option = &pcr->option;
if (option->ocp_en)
rtsx_pci_enable_ocp(pcr);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
SD_POWER_MASK, SD_VCC_PARTIAL_POWER_ON);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x02);
err = rtsx_pci_send_cmd(pcr, 100);
if (err < 0)
return err;
msleep(5);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
SD_POWER_MASK, SD_VCC_POWER_ON);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x06);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rtsx_base_card_power_off(struct rtsx_pcr *pcr, int card)
{
struct rtsx_cr_option *option = &pcr->option;
if (option->ocp_en)
rtsx_pci_disable_ocp(pcr);
rtsx_pci_write_register(pcr, CARD_PWR_CTL, SD_POWER_MASK, SD_POWER_OFF);
rtsx_pci_write_register(pcr, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0x00);
return 0;
}
static int rtsx_base_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
int err;
u16 append;
switch (voltage) {
case OUTPUT_3V3:
err = rtsx_pci_update_phy(pcr, PHY_TUNE, PHY_TUNE_VOLTAGE_MASK,
PHY_TUNE_VOLTAGE_3V3);
if (err < 0)
return err;
break;
case OUTPUT_1V8:
append = PHY_TUNE_D18_1V8;
if (CHK_PCI_PID(pcr, 0x5249)) {
err = rtsx_pci_update_phy(pcr, PHY_BACR,
PHY_BACR_BASIC_MASK, 0);
if (err < 0)
return err;
append = PHY_TUNE_D18_1V7;
}
err = rtsx_pci_update_phy(pcr, PHY_TUNE, PHY_TUNE_VOLTAGE_MASK,
append);
if (err < 0)
return err;
break;
default:
pcr_dbg(pcr, "unknown output voltage %d\n", voltage);
return -EINVAL;
}
/* set pad drive */
rtsx_pci_init_cmd(pcr);
rts5249_fill_driving(pcr, voltage);
return rtsx_pci_send_cmd(pcr, 100);
}
static const struct pcr_ops rts5249_pcr_ops = {
.fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
.extra_init_hw = rts5249_extra_init_hw,
.optimize_phy = rts5249_optimize_phy,
.turn_on_led = rtsx_base_turn_on_led,
.turn_off_led = rtsx_base_turn_off_led,
.enable_auto_blink = rtsx_base_enable_auto_blink,
.disable_auto_blink = rtsx_base_disable_auto_blink,
.card_power_on = rtsx_base_card_power_on,
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
};
/* SD Pull Control Enable:
* SD_DAT[3:0] ==> pull up
* SD_CD ==> pull up
* SD_WP ==> pull up
* SD_CMD ==> pull up
* SD_CLK ==> pull down
*/
static const u32 rts5249_sd_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA),
0,
};
/* SD Pull Control Disable:
* SD_DAT[3:0] ==> pull down
* SD_CD ==> pull up
* SD_WP ==> pull down
* SD_CMD ==> pull down
* SD_CLK ==> pull down
*/
static const u32 rts5249_sd_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
0,
};
/* MS Pull Control Enable:
* MS CD ==> pull up
* others ==> pull down
*/
static const u32 rts5249_ms_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
0,
};
/* MS Pull Control Disable:
* MS CD ==> pull up
* others ==> pull down
*/
static const u32 rts5249_ms_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
0,
};
void rts5249_init_params(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &(pcr->option);
pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
pcr->num_slots = 2;
pcr->ops = &rts5249_pcr_ops;
pcr->flags = 0;
pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(1, 29, 16);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
pcr->ic_version = rts5249_get_ic_version(pcr);
pcr->sd_pull_ctl_enable_tbl = rts5249_sd_pull_ctl_enable_tbl;
pcr->sd_pull_ctl_disable_tbl = rts5249_sd_pull_ctl_disable_tbl;
pcr->ms_pull_ctl_enable_tbl = rts5249_ms_pull_ctl_enable_tbl;
pcr->ms_pull_ctl_disable_tbl = rts5249_ms_pull_ctl_disable_tbl;
pcr->reg_pm_ctrl3 = PM_CTRL3;
option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
| LTR_L1SS_PWR_GATE_EN);
option->ltr_en = true;
/* Init latency of active, idle, L1OFF to 60us, 300us, 3ms */
option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5249_DEF;
option->ltr_l1off_snooze_sspwrgate =
LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF;
}
static int rts524a_write_phy(struct rtsx_pcr *pcr, u8 addr, u16 val)
{
addr = addr & 0x80 ? (addr & 0x7F) | 0x40 : addr;
return __rtsx_pci_write_phy_register(pcr, addr, val);
}
static int rts524a_read_phy(struct rtsx_pcr *pcr, u8 addr, u16 *val)
{
addr = addr & 0x80 ? (addr & 0x7F) | 0x40 : addr;
return __rtsx_pci_read_phy_register(pcr, addr, val);
}
static int rts524a_optimize_phy(struct rtsx_pcr *pcr)
{
int err;
err = rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3,
D3_DELINK_MODE_EN, 0x00);
if (err < 0)
return err;
rtsx_pci_write_phy_register(pcr, PHY_PCR,
PHY_PCR_FORCE_CODE | PHY_PCR_OOBS_CALI_50 |
PHY_PCR_OOBS_VCM_08 | PHY_PCR_OOBS_SEN_90 | PHY_PCR_RSSI_EN);
rtsx_pci_write_phy_register(pcr, PHY_SSCCR3,
PHY_SSCCR3_STEP_IN | PHY_SSCCR3_CHECK_DELAY);
if (is_version(pcr, 0x524A, IC_VER_A)) {
rtsx_pci_write_phy_register(pcr, PHY_SSCCR3,
PHY_SSCCR3_STEP_IN | PHY_SSCCR3_CHECK_DELAY);
rtsx_pci_write_phy_register(pcr, PHY_SSCCR2,
PHY_SSCCR2_PLL_NCODE | PHY_SSCCR2_TIME0 |
PHY_SSCCR2_TIME2_WIDTH);
rtsx_pci_write_phy_register(pcr, PHY_ANA1A,
PHY_ANA1A_TXR_LOOPBACK | PHY_ANA1A_RXT_BIST |
PHY_ANA1A_TXR_BIST | PHY_ANA1A_REV);
rtsx_pci_write_phy_register(pcr, PHY_ANA1D,
PHY_ANA1D_DEBUG_ADDR);
rtsx_pci_write_phy_register(pcr, PHY_DIG1E,
PHY_DIG1E_REV | PHY_DIG1E_D0_X_D1 |
PHY_DIG1E_RX_ON_HOST | PHY_DIG1E_RCLK_REF_HOST |
PHY_DIG1E_RCLK_TX_EN_KEEP |
PHY_DIG1E_RCLK_TX_TERM_KEEP |
PHY_DIG1E_RCLK_RX_EIDLE_ON | PHY_DIG1E_TX_TERM_KEEP |
PHY_DIG1E_RX_TERM_KEEP | PHY_DIG1E_TX_EN_KEEP |
PHY_DIG1E_RX_EN_KEEP);
}
rtsx_pci_write_phy_register(pcr, PHY_ANA08,
PHY_ANA08_RX_EQ_DCGAIN | PHY_ANA08_SEL_RX_EN |
PHY_ANA08_RX_EQ_VAL | PHY_ANA08_SCP | PHY_ANA08_SEL_IPI);
return 0;
}
static int rts524a_extra_init_hw(struct rtsx_pcr *pcr)
{
rts5249_extra_init_hw(pcr);
rtsx_pci_write_register(pcr, FUNC_FORCE_CTL,
FORCE_ASPM_L1_EN, FORCE_ASPM_L1_EN);
rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
rtsx_pci_write_register(pcr, LDO_VCC_CFG1, LDO_VCC_LMT_EN,
LDO_VCC_LMT_EN);
rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
if (is_version(pcr, 0x524A, IC_VER_A)) {
rtsx_pci_write_register(pcr, LDO_DV18_CFG,
LDO_DV18_SR_MASK, LDO_DV18_SR_DF);
rtsx_pci_write_register(pcr, LDO_VCC_CFG1,
LDO_VCC_REF_TUNE_MASK, LDO_VCC_REF_1V2);
rtsx_pci_write_register(pcr, LDO_VIO_CFG,
LDO_VIO_REF_TUNE_MASK, LDO_VIO_REF_1V2);
rtsx_pci_write_register(pcr, LDO_VIO_CFG,
LDO_VIO_SR_MASK, LDO_VIO_SR_DF);
rtsx_pci_write_register(pcr, LDO_DV12S_CFG,
LDO_REF12_TUNE_MASK, LDO_REF12_TUNE_DF);
rtsx_pci_write_register(pcr, SD40_LDO_CTL1,
SD40_VIO_TUNE_MASK, SD40_VIO_TUNE_1V7);
}
return 0;
}
static void rts5250_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
{
struct rtsx_cr_option *option = &(pcr->option);
u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
int aspm_L1_1, aspm_L1_2;
u8 val = 0;
aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
if (active) {
/* Run, latency: 60us */
if (aspm_L1_1)
val = option->ltr_l1off_snooze_sspwrgate;
} else {
/* L1off, latency: 300us */
if (aspm_L1_2)
val = option->ltr_l1off_sspwrgate;
}
if (aspm_L1_1 || aspm_L1_2) {
if (rtsx_check_dev_flag(pcr,
LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
if (card_exist)
val &= ~L1OFF_MBIAS2_EN_5250;
else
val |= L1OFF_MBIAS2_EN_5250;
}
}
rtsx_set_l1off_sub(pcr, val);
}
static const struct pcr_ops rts524a_pcr_ops = {
.write_phy = rts524a_write_phy,
.read_phy = rts524a_read_phy,
.fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
.extra_init_hw = rts524a_extra_init_hw,
.optimize_phy = rts524a_optimize_phy,
.turn_on_led = rtsx_base_turn_on_led,
.turn_off_led = rtsx_base_turn_off_led,
.enable_auto_blink = rtsx_base_enable_auto_blink,
.disable_auto_blink = rtsx_base_disable_auto_blink,
.card_power_on = rtsx_base_card_power_on,
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rtsx_base_switch_output_voltage,
.force_power_down = rts52xa_force_power_down,
.set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
};
void rts524a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
pcr->option.ltr_l1off_snooze_sspwrgate =
LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts524a_pcr_ops;
pcr->option.ocp_en = 1;
if (pcr->option.ocp_en)
pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
pcr->option.sd_800mA_ocp_thd = RTS524A_OCP_THD_800;
}
static int rts525a_card_power_on(struct rtsx_pcr *pcr, int card)
{
rtsx_pci_write_register(pcr, LDO_VCC_CFG1,
LDO_VCC_TUNE_MASK, LDO_VCC_3V3);
return rtsx_base_card_power_on(pcr, card);
}
static int rts525a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
switch (voltage) {
case OUTPUT_3V3:
rtsx_pci_write_register(pcr, LDO_CONFIG2,
LDO_D3318_MASK, LDO_D3318_33V);
rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0);
break;
case OUTPUT_1V8:
rtsx_pci_write_register(pcr, LDO_CONFIG2,
LDO_D3318_MASK, LDO_D3318_18V);
rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8,
SD_IO_USING_1V8);
break;
default:
return -EINVAL;
}
rtsx_pci_init_cmd(pcr);
rts5249_fill_driving(pcr, voltage);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rts525a_optimize_phy(struct rtsx_pcr *pcr)
{
int err;
err = rtsx_pci_write_register(pcr, RTS524A_PM_CTRL3,
D3_DELINK_MODE_EN, 0x00);
if (err < 0)
return err;
rtsx_pci_write_phy_register(pcr, _PHY_FLD0,
_PHY_FLD0_CLK_REQ_20C | _PHY_FLD0_RX_IDLE_EN |
_PHY_FLD0_BIT_ERR_RSTN | _PHY_FLD0_BER_COUNT |
_PHY_FLD0_BER_TIMER | _PHY_FLD0_CHECK_EN);
rtsx_pci_write_phy_register(pcr, _PHY_ANA03,
_PHY_ANA03_TIMER_MAX | _PHY_ANA03_OOBS_DEB_EN |
_PHY_CMU_DEBUG_EN);
if (is_version(pcr, 0x525A, IC_VER_A))
rtsx_pci_write_phy_register(pcr, _PHY_REV0,
_PHY_REV0_FILTER_OUT | _PHY_REV0_CDR_BYPASS_PFD |
_PHY_REV0_CDR_RX_IDLE_BYPASS);
return 0;
}
static int rts525a_extra_init_hw(struct rtsx_pcr *pcr)
{
rts5249_extra_init_hw(pcr);
rtsx_pci_write_register(pcr, RTS5250_CLK_CFG3, RTS525A_CFG_MEM_PD, RTS525A_CFG_MEM_PD);
rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
if (is_version(pcr, 0x525A, IC_VER_A)) {
rtsx_pci_write_register(pcr, L1SUB_CONFIG2,
L1SUB_AUTO_CFG, L1SUB_AUTO_CFG);
rtsx_pci_write_register(pcr, RREF_CFG,
RREF_VBGSEL_MASK, RREF_VBGSEL_1V25);
rtsx_pci_write_register(pcr, LDO_VIO_CFG,
LDO_VIO_TUNE_MASK, LDO_VIO_1V7);
rtsx_pci_write_register(pcr, LDO_DV12S_CFG,
LDO_D12_TUNE_MASK, LDO_D12_TUNE_DF);
rtsx_pci_write_register(pcr, LDO_AV12S_CFG,
LDO_AV12S_TUNE_MASK, LDO_AV12S_TUNE_DF);
rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
LDO_VCC_LMTVTH_MASK, LDO_VCC_LMTVTH_2A);
rtsx_pci_write_register(pcr, OOBS_CONFIG,
OOBS_AUTOK_DIS | OOBS_VAL_MASK, 0x89);
}
return 0;
}
static const struct pcr_ops rts525a_pcr_ops = {
.fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
.extra_init_hw = rts525a_extra_init_hw,
.optimize_phy = rts525a_optimize_phy,
.turn_on_led = rtsx_base_turn_on_led,
.turn_off_led = rtsx_base_turn_off_led,
.enable_auto_blink = rtsx_base_enable_auto_blink,
.disable_auto_blink = rtsx_base_disable_auto_blink,
.card_power_on = rts525a_card_power_on,
.card_power_off = rtsx_base_card_power_off,
.switch_output_voltage = rts525a_switch_output_voltage,
.force_power_down = rts52xa_force_power_down,
.set_l1off_cfg_sub_d0 = rts5250_set_l1off_cfg_sub_d0,
};
void rts525a_init_params(struct rtsx_pcr *pcr)
{
rts5249_init_params(pcr);
pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(25, 29, 11);
pcr->option.ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
pcr->option.ltr_l1off_snooze_sspwrgate =
LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts525a_pcr_ops;
pcr->option.ocp_en = 1;
if (pcr->option.ocp_en)
pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
pcr->option.sd_800mA_ocp_thd = RTS525A_OCP_THD_800;
}
| linux-master | drivers/misc/cardreader/rts5249.c |
// SPDX-License-Identifier: GPL-2.0-only
/* Driver for Realtek USB card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Roger Tseng <[email protected]>
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/usb.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/rtsx_usb.h>
static int polling_pipe = 1;
module_param(polling_pipe, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(polling_pipe, "polling pipe (0: ctl, 1: bulk)");
static const struct mfd_cell rtsx_usb_cells[] = {
[RTSX_USB_SD_CARD] = {
.name = "rtsx_usb_sdmmc",
.pdata_size = 0,
},
[RTSX_USB_MS_CARD] = {
.name = "rtsx_usb_ms",
.pdata_size = 0,
},
};
static void rtsx_usb_sg_timed_out(struct timer_list *t)
{
struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer);
dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
usb_sg_cancel(&ucr->current_sg);
}
static int rtsx_usb_bulk_transfer_sglist(struct rtsx_ucr *ucr,
unsigned int pipe, struct scatterlist *sg, int num_sg,
unsigned int length, unsigned int *act_len, int timeout)
{
int ret;
dev_dbg(&ucr->pusb_intf->dev, "%s: xfer %u bytes, %d entries\n",
__func__, length, num_sg);
ret = usb_sg_init(&ucr->current_sg, ucr->pusb_dev, pipe, 0,
sg, num_sg, length, GFP_NOIO);
if (ret)
return ret;
ucr->sg_timer.expires = jiffies + msecs_to_jiffies(timeout);
add_timer(&ucr->sg_timer);
usb_sg_wait(&ucr->current_sg);
if (!del_timer_sync(&ucr->sg_timer))
ret = -ETIMEDOUT;
else
ret = ucr->current_sg.status;
if (act_len)
*act_len = ucr->current_sg.bytes;
return ret;
}
int rtsx_usb_transfer_data(struct rtsx_ucr *ucr, unsigned int pipe,
void *buf, unsigned int len, int num_sg,
unsigned int *act_len, int timeout)
{
if (timeout < 600)
timeout = 600;
if (num_sg)
return rtsx_usb_bulk_transfer_sglist(ucr, pipe,
(struct scatterlist *)buf, num_sg, len, act_len,
timeout);
else
return usb_bulk_msg(ucr->pusb_dev, pipe, buf, len, act_len,
timeout);
}
EXPORT_SYMBOL_GPL(rtsx_usb_transfer_data);
static inline void rtsx_usb_seq_cmd_hdr(struct rtsx_ucr *ucr,
u16 addr, u16 len, u8 seq_type)
{
rtsx_usb_cmd_hdr_tag(ucr);
ucr->cmd_buf[PACKET_TYPE] = seq_type;
ucr->cmd_buf[5] = (u8)(len >> 8);
ucr->cmd_buf[6] = (u8)len;
ucr->cmd_buf[8] = (u8)(addr >> 8);
ucr->cmd_buf[9] = (u8)addr;
if (seq_type == SEQ_WRITE)
ucr->cmd_buf[STAGE_FLAG] = 0;
else
ucr->cmd_buf[STAGE_FLAG] = STAGE_R;
}
static int rtsx_usb_seq_write_register(struct rtsx_ucr *ucr,
u16 addr, u16 len, u8 *data)
{
u16 cmd_len = ALIGN(SEQ_WRITE_DATA_OFFSET + len, 4);
if (!data)
return -EINVAL;
if (cmd_len > IOBUF_SIZE)
return -EINVAL;
rtsx_usb_seq_cmd_hdr(ucr, addr, len, SEQ_WRITE);
memcpy(ucr->cmd_buf + SEQ_WRITE_DATA_OFFSET, data, len);
return rtsx_usb_transfer_data(ucr,
usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT),
ucr->cmd_buf, cmd_len, 0, NULL, 100);
}
static int rtsx_usb_seq_read_register(struct rtsx_ucr *ucr,
u16 addr, u16 len, u8 *data)
{
int i, ret;
u16 rsp_len = round_down(len, 4);
u16 res_len = len - rsp_len;
if (!data)
return -EINVAL;
/* 4-byte aligned part */
if (rsp_len) {
rtsx_usb_seq_cmd_hdr(ucr, addr, len, SEQ_READ);
ret = rtsx_usb_transfer_data(ucr,
usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT),
ucr->cmd_buf, 12, 0, NULL, 100);
if (ret)
return ret;
ret = rtsx_usb_transfer_data(ucr,
usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN),
data, rsp_len, 0, NULL, 100);
if (ret)
return ret;
}
/* unaligned part */
for (i = 0; i < res_len; i++) {
ret = rtsx_usb_read_register(ucr, addr + rsp_len + i,
data + rsp_len + i);
if (ret)
return ret;
}
return 0;
}
int rtsx_usb_read_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len)
{
return rtsx_usb_seq_read_register(ucr, PPBUF_BASE2, (u16)buf_len, buf);
}
EXPORT_SYMBOL_GPL(rtsx_usb_read_ppbuf);
int rtsx_usb_write_ppbuf(struct rtsx_ucr *ucr, u8 *buf, int buf_len)
{
return rtsx_usb_seq_write_register(ucr, PPBUF_BASE2, (u16)buf_len, buf);
}
EXPORT_SYMBOL_GPL(rtsx_usb_write_ppbuf);
int rtsx_usb_ep0_write_register(struct rtsx_ucr *ucr, u16 addr,
u8 mask, u8 data)
{
u16 value, index;
addr |= EP0_WRITE_REG_CMD << EP0_OP_SHIFT;
value = swab16(addr);
index = mask | data << 8;
return usb_control_msg(ucr->pusb_dev,
usb_sndctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 100);
}
EXPORT_SYMBOL_GPL(rtsx_usb_ep0_write_register);
int rtsx_usb_ep0_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
{
u16 value;
u8 *buf;
int ret;
if (!data)
return -EINVAL;
buf = kzalloc(sizeof(u8), GFP_KERNEL);
if (!buf)
return -ENOMEM;
addr |= EP0_READ_REG_CMD << EP0_OP_SHIFT;
value = swab16(addr);
ret = usb_control_msg(ucr->pusb_dev,
usb_rcvctrlpipe(ucr->pusb_dev, 0), RTSX_USB_REQ_REG_OP,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, 0, buf, 1, 100);
*data = *buf;
kfree(buf);
return ret;
}
EXPORT_SYMBOL_GPL(rtsx_usb_ep0_read_register);
void rtsx_usb_add_cmd(struct rtsx_ucr *ucr, u8 cmd_type, u16 reg_addr,
u8 mask, u8 data)
{
int i;
if (ucr->cmd_idx < (IOBUF_SIZE - CMD_OFFSET) / 4) {
i = CMD_OFFSET + ucr->cmd_idx * 4;
ucr->cmd_buf[i++] = ((cmd_type & 0x03) << 6) |
(u8)((reg_addr >> 8) & 0x3F);
ucr->cmd_buf[i++] = (u8)reg_addr;
ucr->cmd_buf[i++] = mask;
ucr->cmd_buf[i++] = data;
ucr->cmd_idx++;
}
}
EXPORT_SYMBOL_GPL(rtsx_usb_add_cmd);
int rtsx_usb_send_cmd(struct rtsx_ucr *ucr, u8 flag, int timeout)
{
int ret;
ucr->cmd_buf[CNT_H] = (u8)(ucr->cmd_idx >> 8);
ucr->cmd_buf[CNT_L] = (u8)(ucr->cmd_idx);
ucr->cmd_buf[STAGE_FLAG] = flag;
ret = rtsx_usb_transfer_data(ucr,
usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT),
ucr->cmd_buf, ucr->cmd_idx * 4 + CMD_OFFSET,
0, NULL, timeout);
if (ret) {
rtsx_usb_clear_fsm_err(ucr);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_usb_send_cmd);
int rtsx_usb_get_rsp(struct rtsx_ucr *ucr, int rsp_len, int timeout)
{
if (rsp_len <= 0)
return -EINVAL;
rsp_len = ALIGN(rsp_len, 4);
return rtsx_usb_transfer_data(ucr,
usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN),
ucr->rsp_buf, rsp_len, 0, NULL, timeout);
}
EXPORT_SYMBOL_GPL(rtsx_usb_get_rsp);
static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
{
int ret;
rtsx_usb_init_cmd(ucr);
rtsx_usb_add_cmd(ucr, READ_REG_CMD, CARD_EXIST, 0x00, 0x00);
rtsx_usb_add_cmd(ucr, READ_REG_CMD, OCPSTAT, 0x00, 0x00);
ret = rtsx_usb_send_cmd(ucr, MODE_CR, 100);
if (ret)
return ret;
ret = rtsx_usb_get_rsp(ucr, 2, 100);
if (ret)
return ret;
*status = ((ucr->rsp_buf[0] >> 2) & 0x0f) |
((ucr->rsp_buf[1] & 0x03) << 4);
return 0;
}
int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
{
int ret;
u16 *buf;
if (!status)
return -EINVAL;
if (polling_pipe == 0) {
buf = kzalloc(sizeof(u16), GFP_KERNEL);
if (!buf)
return -ENOMEM;
ret = usb_control_msg(ucr->pusb_dev,
usb_rcvctrlpipe(ucr->pusb_dev, 0),
RTSX_USB_REQ_POLL,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, 0, buf, 2, 100);
*status = *buf;
kfree(buf);
} else {
ret = rtsx_usb_get_status_with_bulk(ucr, status);
}
/* usb_control_msg may return positive when success */
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_usb_get_card_status);
static int rtsx_usb_write_phy_register(struct rtsx_ucr *ucr, u8 addr, u8 val)
{
dev_dbg(&ucr->pusb_intf->dev, "Write 0x%x to phy register 0x%x\n",
val, addr);
rtsx_usb_init_cmd(ucr);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VSTAIN, 0xFF, val);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VCONTROL, 0xFF, addr & 0x0F);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VCONTROL,
0xFF, (addr >> 4) & 0x0F);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x00);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, HS_VLOADM, 0xFF, 0x01);
return rtsx_usb_send_cmd(ucr, MODE_C, 100);
}
int rtsx_usb_write_register(struct rtsx_ucr *ucr, u16 addr, u8 mask, u8 data)
{
rtsx_usb_init_cmd(ucr);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, addr, mask, data);
return rtsx_usb_send_cmd(ucr, MODE_C, 100);
}
EXPORT_SYMBOL_GPL(rtsx_usb_write_register);
int rtsx_usb_read_register(struct rtsx_ucr *ucr, u16 addr, u8 *data)
{
int ret;
if (data != NULL)
*data = 0;
rtsx_usb_init_cmd(ucr);
rtsx_usb_add_cmd(ucr, READ_REG_CMD, addr, 0, 0);
ret = rtsx_usb_send_cmd(ucr, MODE_CR, 100);
if (ret)
return ret;
ret = rtsx_usb_get_rsp(ucr, 1, 100);
if (ret)
return ret;
if (data != NULL)
*data = ucr->rsp_buf[0];
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_usb_read_register);
static inline u8 double_ssc_depth(u8 depth)
{
return (depth > 1) ? (depth - 1) : depth;
}
static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
{
if (div > CLK_DIV_1) {
if (ssc_depth > div - 1)
ssc_depth -= (div - 1);
else
ssc_depth = SSC_DEPTH_2M;
}
return ssc_depth;
}
int rtsx_usb_switch_clock(struct rtsx_ucr *ucr, unsigned int card_clock,
u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
{
int ret;
u8 n, clk_divider, mcu_cnt, div;
if (!card_clock) {
ucr->cur_clk = 0;
return 0;
}
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
clk_divider = SD_CLK_DIVIDE_128;
card_clock = 30000000;
} else {
clk_divider = SD_CLK_DIVIDE_0;
}
ret = rtsx_usb_write_register(ucr, SD_CFG1,
SD_CLK_DIVIDE_MASK, clk_divider);
if (ret < 0)
return ret;
card_clock /= 1000000;
dev_dbg(&ucr->pusb_intf->dev,
"Switch card clock to %dMHz\n", card_clock);
if (!initial_mode && double_clk)
card_clock *= 2;
dev_dbg(&ucr->pusb_intf->dev,
"Internal SSC clock: %dMHz (cur_clk = %d)\n",
card_clock, ucr->cur_clk);
if (card_clock == ucr->cur_clk)
return 0;
/* Converting clock value into internal settings: n and div */
n = card_clock - 2;
if ((card_clock <= 2) || (n > MAX_DIV_N))
return -EINVAL;
mcu_cnt = 60/card_clock + 3;
if (mcu_cnt > 15)
mcu_cnt = 15;
/* Make sure that the SSC clock div_n is not less than MIN_DIV_N */
div = CLK_DIV_1;
while (n < MIN_DIV_N && div < CLK_DIV_4) {
n = (n + 2) * 2 - 2;
div++;
}
dev_dbg(&ucr->pusb_intf->dev, "n = %d, div = %d\n", n, div);
if (double_clk)
ssc_depth = double_ssc_depth(ssc_depth);
ssc_depth = revise_ssc_depth(ssc_depth, div);
dev_dbg(&ucr->pusb_intf->dev, "ssc_depth = %d\n", ssc_depth);
rtsx_usb_init_cmd(ucr);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, CLK_CHANGE);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV,
0x3F, (div << 4) | mcu_cnt);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL2,
SSC_DEPTH_MASK, ssc_depth);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
if (vpclk) {
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL,
PHASE_NOT_RESET, 0);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
}
ret = rtsx_usb_send_cmd(ucr, MODE_C, 2000);
if (ret < 0)
return ret;
ret = rtsx_usb_write_register(ucr, SSC_CTL1, 0xff,
SSC_RSTB | SSC_8X_EN | SSC_SEL_4M);
if (ret < 0)
return ret;
/* Wait SSC clock stable */
usleep_range(100, 1000);
ret = rtsx_usb_write_register(ucr, CLK_DIV, CLK_CHANGE, 0);
if (ret < 0)
return ret;
ucr->cur_clk = card_clock;
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_usb_switch_clock);
int rtsx_usb_card_exclusive_check(struct rtsx_ucr *ucr, int card)
{
int ret;
u16 val;
u16 cd_mask[] = {
[RTSX_USB_SD_CARD] = (CD_MASK & ~SD_CD),
[RTSX_USB_MS_CARD] = (CD_MASK & ~MS_CD)
};
ret = rtsx_usb_get_card_status(ucr, &val);
/*
* If get status fails, return 0 (ok) for the exclusive check
* and let the flow fail at somewhere else.
*/
if (ret)
return 0;
if (val & cd_mask[card])
return -EIO;
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_usb_card_exclusive_check);
static int rtsx_usb_reset_chip(struct rtsx_ucr *ucr)
{
int ret;
u8 val;
rtsx_usb_init_cmd(ucr);
if (CHECK_PKG(ucr, LQFP48)) {
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
LDO3318_PWR_MASK, LDO_SUSPEND);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL,
FORCE_LDO_POWERB, FORCE_LDO_POWERB);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1,
0x30, 0x10);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5,
0x03, 0x01);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6,
0x0C, 0x04);
}
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SYS_DUMMY0, NYET_MSAK, NYET_EN);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CD_DEGLITCH_WIDTH, 0xFF, 0x08);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
CD_DEGLITCH_EN, XD_CD_DEGLITCH_EN, 0x0);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD30_DRIVE_SEL,
SD30_DRIVE_MASK, DRIVER_TYPE_D);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
CARD_DRIVE_SEL, SD20_DRIVE_MASK, 0x0);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG, 0xE0, 0x0);
if (ucr->is_rts5179)
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD,
CARD_PULL_CTL5, 0x03, 0x01);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DMA1_CTL,
EXTEND_DMA1_ASYNC_SIGNAL, EXTEND_DMA1_ASYNC_SIGNAL);
rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_INT_PEND,
XD_INT | MS_INT | SD_INT,
XD_INT | MS_INT | SD_INT);
ret = rtsx_usb_send_cmd(ucr, MODE_C, 100);
if (ret)
return ret;
/* config non-crystal mode */
rtsx_usb_read_register(ucr, CFG_MODE, &val);
if ((val & XTAL_FREE) || ((val & CLK_MODE_MASK) == CLK_MODE_NON_XTAL)) {
ret = rtsx_usb_write_phy_register(ucr, 0xC2, 0x7C);
if (ret)
return ret;
}
return 0;
}
static int rtsx_usb_init_chip(struct rtsx_ucr *ucr)
{
int ret;
u8 val;
rtsx_usb_clear_fsm_err(ucr);
/* power on SSC */
ret = rtsx_usb_write_register(ucr,
FPDCTL, SSC_POWER_MASK, SSC_POWER_ON);
if (ret)
return ret;
usleep_range(100, 1000);
ret = rtsx_usb_write_register(ucr, CLK_DIV, CLK_CHANGE, 0x00);
if (ret)
return ret;
/* determine IC version */
ret = rtsx_usb_read_register(ucr, HW_VERSION, &val);
if (ret)
return ret;
ucr->ic_version = val & HW_VER_MASK;
/* determine package */
ret = rtsx_usb_read_register(ucr, CARD_SHARE_MODE, &val);
if (ret)
return ret;
if (val & CARD_SHARE_LQFP_SEL) {
ucr->package = LQFP48;
dev_dbg(&ucr->pusb_intf->dev, "Package: LQFP48\n");
} else {
ucr->package = QFN24;
dev_dbg(&ucr->pusb_intf->dev, "Package: QFN24\n");
}
/* determine IC variations */
rtsx_usb_read_register(ucr, CFG_MODE_1, &val);
if (val & RTS5179) {
ucr->is_rts5179 = true;
dev_dbg(&ucr->pusb_intf->dev, "Device is rts5179\n");
} else {
ucr->is_rts5179 = false;
}
return rtsx_usb_reset_chip(ucr);
}
static int rtsx_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct rtsx_ucr *ucr;
int ret;
dev_dbg(&intf->dev,
": Realtek USB Card Reader found at bus %03d address %03d\n",
usb_dev->bus->busnum, usb_dev->devnum);
ucr = devm_kzalloc(&intf->dev, sizeof(*ucr), GFP_KERNEL);
if (!ucr)
return -ENOMEM;
ucr->pusb_dev = usb_dev;
ucr->cmd_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL);
if (!ucr->cmd_buf)
return -ENOMEM;
ucr->rsp_buf = kmalloc(IOBUF_SIZE, GFP_KERNEL);
if (!ucr->rsp_buf) {
ret = -ENOMEM;
goto out_free_cmd_buf;
}
usb_set_intfdata(intf, ucr);
ucr->vendor_id = id->idVendor;
ucr->product_id = id->idProduct;
mutex_init(&ucr->dev_mutex);
ucr->pusb_intf = intf;
/* initialize */
ret = rtsx_usb_init_chip(ucr);
if (ret)
goto out_init_fail;
/* initialize USB SG transfer timer */
timer_setup(&ucr->sg_timer, rtsx_usb_sg_timed_out, 0);
ret = mfd_add_hotplug_devices(&intf->dev, rtsx_usb_cells,
ARRAY_SIZE(rtsx_usb_cells));
if (ret)
goto out_init_fail;
#ifdef CONFIG_PM
intf->needs_remote_wakeup = 1;
usb_enable_autosuspend(usb_dev);
#endif
return 0;
out_init_fail:
usb_set_intfdata(ucr->pusb_intf, NULL);
kfree(ucr->rsp_buf);
ucr->rsp_buf = NULL;
out_free_cmd_buf:
kfree(ucr->cmd_buf);
ucr->cmd_buf = NULL;
return ret;
}
static void rtsx_usb_disconnect(struct usb_interface *intf)
{
struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf);
dev_dbg(&intf->dev, "%s called\n", __func__);
mfd_remove_devices(&intf->dev);
usb_set_intfdata(ucr->pusb_intf, NULL);
kfree(ucr->cmd_buf);
ucr->cmd_buf = NULL;
kfree(ucr->rsp_buf);
ucr->rsp_buf = NULL;
}
#ifdef CONFIG_PM
static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
{
struct rtsx_ucr *ucr =
(struct rtsx_ucr *)usb_get_intfdata(intf);
u16 val = 0;
dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n",
__func__, message.event);
if (PMSG_IS_AUTO(message)) {
if (mutex_trylock(&ucr->dev_mutex)) {
rtsx_usb_get_card_status(ucr, &val);
mutex_unlock(&ucr->dev_mutex);
/* Defer the autosuspend if card exists */
if (val & (SD_CD | MS_CD))
return -EAGAIN;
} else {
/* There is an ongoing operation*/
return -EAGAIN;
}
}
return 0;
}
static int rtsx_usb_resume_child(struct device *dev, void *data)
{
pm_request_resume(dev);
return 0;
}
static int rtsx_usb_resume(struct usb_interface *intf)
{
device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child);
return 0;
}
static int rtsx_usb_reset_resume(struct usb_interface *intf)
{
struct rtsx_ucr *ucr =
(struct rtsx_ucr *)usb_get_intfdata(intf);
rtsx_usb_reset_chip(ucr);
device_for_each_child(&intf->dev, NULL, rtsx_usb_resume_child);
return 0;
}
#else /* CONFIG_PM */
#define rtsx_usb_suspend NULL
#define rtsx_usb_resume NULL
#define rtsx_usb_reset_resume NULL
#endif /* CONFIG_PM */
static int rtsx_usb_pre_reset(struct usb_interface *intf)
{
struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf);
mutex_lock(&ucr->dev_mutex);
return 0;
}
static int rtsx_usb_post_reset(struct usb_interface *intf)
{
struct rtsx_ucr *ucr = (struct rtsx_ucr *)usb_get_intfdata(intf);
mutex_unlock(&ucr->dev_mutex);
return 0;
}
static const struct usb_device_id rtsx_usb_usb_ids[] = {
{ USB_DEVICE(0x0BDA, 0x0129) },
{ USB_DEVICE(0x0BDA, 0x0139) },
{ USB_DEVICE(0x0BDA, 0x0140) },
{ }
};
MODULE_DEVICE_TABLE(usb, rtsx_usb_usb_ids);
static struct usb_driver rtsx_usb_driver = {
.name = "rtsx_usb",
.probe = rtsx_usb_probe,
.disconnect = rtsx_usb_disconnect,
.suspend = rtsx_usb_suspend,
.resume = rtsx_usb_resume,
.reset_resume = rtsx_usb_reset_resume,
.pre_reset = rtsx_usb_pre_reset,
.post_reset = rtsx_usb_post_reset,
.id_table = rtsx_usb_usb_ids,
.supports_autosuspend = 1,
.soft_unbind = 1,
};
module_usb_driver(rtsx_usb_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Roger Tseng <[email protected]>");
MODULE_DESCRIPTION("Realtek USB Card Reader Driver");
| linux-master | drivers/misc/cardreader/rtsx_usb.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG <[email protected]>
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
static u8 rts5209_get_ic_version(struct rtsx_pcr *pcr)
{
u8 val;
val = rtsx_pci_readb(pcr, 0x1C);
return val & 0x0F;
}
static void rts5209_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
u32 reg;
pci_read_config_dword(pdev, PCR_SETTING_REG1, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (rts5209_vendor_setting1_valid(reg)) {
if (rts5209_reg_check_ms_pmos(reg))
pcr->flags |= PCR_MS_PMOS;
pcr->aspm_en = rts5209_reg_to_aspm(reg);
}
pci_read_config_dword(pdev, PCR_SETTING_REG2, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
if (rts5209_vendor_setting2_valid(reg)) {
pcr->sd30_drive_sel_1v8 =
rts5209_reg_to_sd30_drive_sel_1v8(reg);
pcr->sd30_drive_sel_3v3 =
rts5209_reg_to_sd30_drive_sel_3v3(reg);
pcr->card_drive_sel = rts5209_reg_to_card_drive_sel(reg);
}
}
static void rts5209_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07);
}
static int rts5209_extra_init_hw(struct rtsx_pcr *pcr)
{
rtsx_pci_init_cmd(pcr);
/* Turn off LED */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO, 0xFF, 0x03);
/* Reset ASPM state to default value */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
/* Force CLKREQ# PIN to drive 0 to request clock */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
/* Configure GPIO as output */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_GPIO_DIR, 0xFF, 0x03);
/* Configure driving */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
0xFF, pcr->sd30_drive_sel_3v3);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rts5209_optimize_phy(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_phy_register(pcr, 0x00, 0xB966);
}
static int rts5209_turn_on_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x00);
}
static int rts5209_turn_off_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x01);
}
static int rts5209_enable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0xFF, 0x0D);
}
static int rts5209_disable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0x08, 0x00);
}
static int rts5209_card_power_on(struct rtsx_pcr *pcr, int card)
{
int err;
u8 pwr_mask, partial_pwr_on, pwr_on;
pwr_mask = SD_POWER_MASK;
partial_pwr_on = SD_PARTIAL_POWER_ON;
pwr_on = SD_POWER_ON;
if ((pcr->flags & PCR_MS_PMOS) && (card == RTSX_MS_CARD)) {
pwr_mask = MS_POWER_MASK;
partial_pwr_on = MS_PARTIAL_POWER_ON;
pwr_on = MS_POWER_ON;
}
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
pwr_mask, partial_pwr_on);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x04);
err = rtsx_pci_send_cmd(pcr, 100);
if (err < 0)
return err;
/* To avoid too large in-rush current */
udelay(150);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL, pwr_mask, pwr_on);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x00);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rts5209_card_power_off(struct rtsx_pcr *pcr, int card)
{
u8 pwr_mask, pwr_off;
pwr_mask = SD_POWER_MASK;
pwr_off = SD_POWER_OFF;
if ((pcr->flags & PCR_MS_PMOS) && (card == RTSX_MS_CARD)) {
pwr_mask = MS_POWER_MASK;
pwr_off = MS_POWER_OFF;
}
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
pwr_mask | PMOS_STRG_MASK, pwr_off | PMOS_STRG_400mA);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x06);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rts5209_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
int err;
if (voltage == OUTPUT_3V3) {
err = rtsx_pci_write_register(pcr,
SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
if (err < 0)
return err;
} else if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_register(pcr,
SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
if (err < 0)
return err;
} else {
return -EINVAL;
}
return 0;
}
static const struct pcr_ops rts5209_pcr_ops = {
.fetch_vendor_settings = rts5209_fetch_vendor_settings,
.extra_init_hw = rts5209_extra_init_hw,
.optimize_phy = rts5209_optimize_phy,
.turn_on_led = rts5209_turn_on_led,
.turn_off_led = rts5209_turn_off_led,
.enable_auto_blink = rts5209_enable_auto_blink,
.disable_auto_blink = rts5209_disable_auto_blink,
.card_power_on = rts5209_card_power_on,
.card_power_off = rts5209_card_power_off,
.switch_output_voltage = rts5209_switch_output_voltage,
.cd_deglitch = NULL,
.conv_clk_and_div_n = NULL,
.force_power_down = rts5209_force_power_down,
};
/* SD Pull Control Enable:
* SD_DAT[3:0] ==> pull up
* SD_CD ==> pull up
* SD_WP ==> pull up
* SD_CMD ==> pull up
* SD_CLK ==> pull down
*/
static const u32 rts5209_sd_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
0,
};
/* SD Pull Control Disable:
* SD_DAT[3:0] ==> pull down
* SD_CD ==> pull up
* SD_WP ==> pull down
* SD_CMD ==> pull down
* SD_CLK ==> pull down
*/
static const u32 rts5209_sd_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
0,
};
/* MS Pull Control Enable:
* MS CD ==> pull up
* others ==> pull down
*/
static const u32 rts5209_ms_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
0,
};
/* MS Pull Control Disable:
* MS CD ==> pull up
* others ==> pull down
*/
static const u32 rts5209_ms_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
0,
};
void rts5209_init_params(struct rtsx_pcr *pcr)
{
pcr->extra_caps = EXTRA_CAPS_SD_SDR50 |
EXTRA_CAPS_SD_SDR104 | EXTRA_CAPS_MMC_8BIT;
pcr->num_slots = 2;
pcr->ops = &rts5209_pcr_ops;
pcr->flags = 0;
pcr->card_drive_sel = RTS5209_CARD_DRIVE_DEFAULT;
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
pcr->aspm_en = ASPM_L1_EN;
pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 16);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
pcr->ic_version = rts5209_get_ic_version(pcr);
pcr->sd_pull_ctl_enable_tbl = rts5209_sd_pull_ctl_enable_tbl;
pcr->sd_pull_ctl_disable_tbl = rts5209_sd_pull_ctl_disable_tbl;
pcr->ms_pull_ctl_enable_tbl = rts5209_ms_pull_ctl_enable_tbl;
pcr->ms_pull_ctl_disable_tbl = rts5209_ms_pull_ctl_disable_tbl;
}
| linux-master | drivers/misc/cardreader/rts5209.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG <[email protected]>
*/
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/idr.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/rtsx_pci.h>
#include <linux/mmc/card.h>
#include <asm/unaligned.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include "rtsx_pcr.h"
#include "rts5261.h"
#include "rts5228.h"
static bool msi_en = true;
module_param(msi_en, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(msi_en, "Enable MSI");
static DEFINE_IDR(rtsx_pci_idr);
static DEFINE_SPINLOCK(rtsx_pci_lock);
static struct mfd_cell rtsx_pcr_cells[] = {
[RTSX_SD_CARD] = {
.name = DRV_NAME_RTSX_PCI_SDMMC,
},
};
static const struct pci_device_id rtsx_pci_ids[] = {
{ PCI_DEVICE(0x10EC, 0x5209), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5229), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5289), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5227), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x522A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5249), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5287), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5286), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x524A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x525A), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, rtsx_pci_ids);
static int rtsx_comm_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
{
rtsx_pci_write_register(pcr, MSGTXDATA0,
MASK_8_BIT_DEF, (u8) (latency & 0xFF));
rtsx_pci_write_register(pcr, MSGTXDATA1,
MASK_8_BIT_DEF, (u8)((latency >> 8) & 0xFF));
rtsx_pci_write_register(pcr, MSGTXDATA2,
MASK_8_BIT_DEF, (u8)((latency >> 16) & 0xFF));
rtsx_pci_write_register(pcr, MSGTXDATA3,
MASK_8_BIT_DEF, (u8)((latency >> 24) & 0xFF));
rtsx_pci_write_register(pcr, LTR_CTL, LTR_TX_EN_MASK |
LTR_LATENCY_MODE_MASK, LTR_TX_EN_1 | LTR_LATENCY_MODE_SW);
return 0;
}
int rtsx_set_ltr_latency(struct rtsx_pcr *pcr, u32 latency)
{
return rtsx_comm_set_ltr_latency(pcr, latency);
}
static void rtsx_comm_set_aspm(struct rtsx_pcr *pcr, bool enable)
{
if (pcr->aspm_enabled == enable)
return;
if (pcr->aspm_mode == ASPM_MODE_CFG) {
pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
PCI_EXP_LNKCTL_ASPMC,
enable ? pcr->aspm_en : 0);
} else if (pcr->aspm_mode == ASPM_MODE_REG) {
if (pcr->aspm_en & 0x02)
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
FORCE_ASPM_CTL1, enable ? 0 : FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
else
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, FORCE_ASPM_CTL0 |
FORCE_ASPM_CTL1, FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1);
}
if (!enable && (pcr->aspm_en & 0x02))
mdelay(10);
pcr->aspm_enabled = enable;
}
static void rtsx_disable_aspm(struct rtsx_pcr *pcr)
{
if (pcr->ops->set_aspm)
pcr->ops->set_aspm(pcr, false);
else
rtsx_comm_set_aspm(pcr, false);
}
int rtsx_set_l1off_sub(struct rtsx_pcr *pcr, u8 val)
{
rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, val);
return 0;
}
static void rtsx_set_l1off_sub_cfg_d0(struct rtsx_pcr *pcr, int active)
{
if (pcr->ops->set_l1off_cfg_sub_d0)
pcr->ops->set_l1off_cfg_sub_d0(pcr, active);
}
static void rtsx_comm_pm_full_on(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &pcr->option;
rtsx_disable_aspm(pcr);
/* Fixes DMA transfer timeout issue after disabling ASPM on RTS5260 */
msleep(1);
if (option->ltr_enabled)
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
rtsx_set_l1off_sub_cfg_d0(pcr, 1);
}
static void rtsx_pm_full_on(struct rtsx_pcr *pcr)
{
rtsx_comm_pm_full_on(pcr);
}
void rtsx_pci_start_run(struct rtsx_pcr *pcr)
{
/* If pci device removed, don't queue idle work any more */
if (pcr->remove_pci)
return;
if (pcr->state != PDEV_STAT_RUN) {
pcr->state = PDEV_STAT_RUN;
if (pcr->ops->enable_auto_blink)
pcr->ops->enable_auto_blink(pcr);
rtsx_pm_full_on(pcr);
}
}
EXPORT_SYMBOL_GPL(rtsx_pci_start_run);
int rtsx_pci_write_register(struct rtsx_pcr *pcr, u16 addr, u8 mask, u8 data)
{
int i;
u32 val = HAIMR_WRITE_START;
val |= (u32)(addr & 0x3FFF) << 16;
val |= (u32)mask << 8;
val |= (u32)data;
rtsx_pci_writel(pcr, RTSX_HAIMR, val);
for (i = 0; i < MAX_RW_REG_CNT; i++) {
val = rtsx_pci_readl(pcr, RTSX_HAIMR);
if ((val & HAIMR_TRANS_END) == 0) {
if (data != (u8)val)
return -EIO;
return 0;
}
}
return -ETIMEDOUT;
}
EXPORT_SYMBOL_GPL(rtsx_pci_write_register);
int rtsx_pci_read_register(struct rtsx_pcr *pcr, u16 addr, u8 *data)
{
u32 val = HAIMR_READ_START;
int i;
val |= (u32)(addr & 0x3FFF) << 16;
rtsx_pci_writel(pcr, RTSX_HAIMR, val);
for (i = 0; i < MAX_RW_REG_CNT; i++) {
val = rtsx_pci_readl(pcr, RTSX_HAIMR);
if ((val & HAIMR_TRANS_END) == 0)
break;
}
if (i >= MAX_RW_REG_CNT)
return -ETIMEDOUT;
if (data)
*data = (u8)(val & 0xFF);
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_pci_read_register);
int __rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
{
int err, i, finished = 0;
u8 tmp;
rtsx_pci_write_register(pcr, PHYDATA0, 0xFF, (u8)val);
rtsx_pci_write_register(pcr, PHYDATA1, 0xFF, (u8)(val >> 8));
rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x81);
for (i = 0; i < 100000; i++) {
err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
if (err < 0)
return err;
if (!(tmp & 0x80)) {
finished = 1;
break;
}
}
if (!finished)
return -ETIMEDOUT;
return 0;
}
int rtsx_pci_write_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 val)
{
if (pcr->ops->write_phy)
return pcr->ops->write_phy(pcr, addr, val);
return __rtsx_pci_write_phy_register(pcr, addr, val);
}
EXPORT_SYMBOL_GPL(rtsx_pci_write_phy_register);
int __rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
{
int err, i, finished = 0;
u16 data;
u8 tmp, val1, val2;
rtsx_pci_write_register(pcr, PHYADDR, 0xFF, addr);
rtsx_pci_write_register(pcr, PHYRWCTL, 0xFF, 0x80);
for (i = 0; i < 100000; i++) {
err = rtsx_pci_read_register(pcr, PHYRWCTL, &tmp);
if (err < 0)
return err;
if (!(tmp & 0x80)) {
finished = 1;
break;
}
}
if (!finished)
return -ETIMEDOUT;
rtsx_pci_read_register(pcr, PHYDATA0, &val1);
rtsx_pci_read_register(pcr, PHYDATA1, &val2);
data = val1 | (val2 << 8);
if (val)
*val = data;
return 0;
}
int rtsx_pci_read_phy_register(struct rtsx_pcr *pcr, u8 addr, u16 *val)
{
if (pcr->ops->read_phy)
return pcr->ops->read_phy(pcr, addr, val);
return __rtsx_pci_read_phy_register(pcr, addr, val);
}
EXPORT_SYMBOL_GPL(rtsx_pci_read_phy_register);
void rtsx_pci_stop_cmd(struct rtsx_pcr *pcr)
{
if (pcr->ops->stop_cmd)
return pcr->ops->stop_cmd(pcr);
rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
rtsx_pci_write_register(pcr, DMACTL, 0x80, 0x80);
rtsx_pci_write_register(pcr, RBCTL, 0x80, 0x80);
}
EXPORT_SYMBOL_GPL(rtsx_pci_stop_cmd);
void rtsx_pci_add_cmd(struct rtsx_pcr *pcr,
u8 cmd_type, u16 reg_addr, u8 mask, u8 data)
{
unsigned long flags;
u32 val = 0;
u32 *ptr = (u32 *)(pcr->host_cmds_ptr);
val |= (u32)(cmd_type & 0x03) << 30;
val |= (u32)(reg_addr & 0x3FFF) << 16;
val |= (u32)mask << 8;
val |= (u32)data;
spin_lock_irqsave(&pcr->lock, flags);
ptr += pcr->ci;
if (pcr->ci < (HOST_CMDS_BUF_LEN / 4)) {
put_unaligned_le32(val, ptr);
ptr++;
pcr->ci++;
}
spin_unlock_irqrestore(&pcr->lock, flags);
}
EXPORT_SYMBOL_GPL(rtsx_pci_add_cmd);
void rtsx_pci_send_cmd_no_wait(struct rtsx_pcr *pcr)
{
u32 val = 1 << 31;
rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
/* Hardware Auto Response */
val |= 0x40000000;
rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
}
EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd_no_wait);
int rtsx_pci_send_cmd(struct rtsx_pcr *pcr, int timeout)
{
struct completion trans_done;
u32 val = 1 << 31;
long timeleft;
unsigned long flags;
int err = 0;
spin_lock_irqsave(&pcr->lock, flags);
/* set up data structures for the wakeup system */
pcr->done = &trans_done;
pcr->trans_result = TRANS_NOT_READY;
init_completion(&trans_done);
rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
val |= (u32)(pcr->ci * 4) & 0x00FFFFFF;
/* Hardware Auto Response */
val |= 0x40000000;
rtsx_pci_writel(pcr, RTSX_HCBCTLR, val);
spin_unlock_irqrestore(&pcr->lock, flags);
/* Wait for TRANS_OK_INT */
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, msecs_to_jiffies(timeout));
if (timeleft <= 0) {
pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
err = -ETIMEDOUT;
goto finish_send_cmd;
}
spin_lock_irqsave(&pcr->lock, flags);
if (pcr->trans_result == TRANS_RESULT_FAIL)
err = -EINVAL;
else if (pcr->trans_result == TRANS_RESULT_OK)
err = 0;
else if (pcr->trans_result == TRANS_NO_DEVICE)
err = -ENODEV;
spin_unlock_irqrestore(&pcr->lock, flags);
finish_send_cmd:
spin_lock_irqsave(&pcr->lock, flags);
pcr->done = NULL;
spin_unlock_irqrestore(&pcr->lock, flags);
if ((err < 0) && (err != -ENODEV))
rtsx_pci_stop_cmd(pcr);
if (pcr->finish_me)
complete(pcr->finish_me);
return err;
}
EXPORT_SYMBOL_GPL(rtsx_pci_send_cmd);
static void rtsx_pci_add_sg_tbl(struct rtsx_pcr *pcr,
dma_addr_t addr, unsigned int len, int end)
{
u64 *ptr = (u64 *)(pcr->host_sg_tbl_ptr) + pcr->sgi;
u64 val;
u8 option = RTSX_SG_VALID | RTSX_SG_TRANS_DATA;
pcr_dbg(pcr, "DMA addr: 0x%x, Len: 0x%x\n", (unsigned int)addr, len);
if (end)
option |= RTSX_SG_END;
if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5228)) {
if (len > 0xFFFF)
val = ((u64)addr << 32) | (((u64)len & 0xFFFF) << 16)
| (((u64)len >> 16) << 6) | option;
else
val = ((u64)addr << 32) | ((u64)len << 16) | option;
} else {
val = ((u64)addr << 32) | ((u64)len << 12) | option;
}
put_unaligned_le64(val, ptr);
pcr->sgi++;
}
int rtsx_pci_transfer_data(struct rtsx_pcr *pcr, struct scatterlist *sglist,
int num_sg, bool read, int timeout)
{
int err = 0, count;
pcr_dbg(pcr, "--> %s: num_sg = %d\n", __func__, num_sg);
count = rtsx_pci_dma_map_sg(pcr, sglist, num_sg, read);
if (count < 1)
return -EINVAL;
pcr_dbg(pcr, "DMA mapping count: %d\n", count);
err = rtsx_pci_dma_transfer(pcr, sglist, count, read, timeout);
rtsx_pci_dma_unmap_sg(pcr, sglist, num_sg, read);
return err;
}
EXPORT_SYMBOL_GPL(rtsx_pci_transfer_data);
int rtsx_pci_dma_map_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
int num_sg, bool read)
{
enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (pcr->remove_pci)
return -EINVAL;
if ((sglist == NULL) || (num_sg <= 0))
return -EINVAL;
return dma_map_sg(&(pcr->pci->dev), sglist, num_sg, dir);
}
EXPORT_SYMBOL_GPL(rtsx_pci_dma_map_sg);
void rtsx_pci_dma_unmap_sg(struct rtsx_pcr *pcr, struct scatterlist *sglist,
int num_sg, bool read)
{
enum dma_data_direction dir = read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
dma_unmap_sg(&(pcr->pci->dev), sglist, num_sg, dir);
}
EXPORT_SYMBOL_GPL(rtsx_pci_dma_unmap_sg);
int rtsx_pci_dma_transfer(struct rtsx_pcr *pcr, struct scatterlist *sglist,
int count, bool read, int timeout)
{
struct completion trans_done;
struct scatterlist *sg;
dma_addr_t addr;
long timeleft;
unsigned long flags;
unsigned int len;
int i, err = 0;
u32 val;
u8 dir = read ? DEVICE_TO_HOST : HOST_TO_DEVICE;
if (pcr->remove_pci)
return -ENODEV;
if ((sglist == NULL) || (count < 1))
return -EINVAL;
val = ((u32)(dir & 0x01) << 29) | TRIG_DMA | ADMA_MODE;
pcr->sgi = 0;
for_each_sg(sglist, sg, count, i) {
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
rtsx_pci_add_sg_tbl(pcr, addr, len, i == count - 1);
}
spin_lock_irqsave(&pcr->lock, flags);
pcr->done = &trans_done;
pcr->trans_result = TRANS_NOT_READY;
init_completion(&trans_done);
rtsx_pci_writel(pcr, RTSX_HDBAR, pcr->host_sg_tbl_addr);
rtsx_pci_writel(pcr, RTSX_HDBCTLR, val);
spin_unlock_irqrestore(&pcr->lock, flags);
timeleft = wait_for_completion_interruptible_timeout(
&trans_done, msecs_to_jiffies(timeout));
if (timeleft <= 0) {
pcr_dbg(pcr, "Timeout (%s %d)\n", __func__, __LINE__);
err = -ETIMEDOUT;
goto out;
}
spin_lock_irqsave(&pcr->lock, flags);
if (pcr->trans_result == TRANS_RESULT_FAIL) {
err = -EILSEQ;
if (pcr->dma_error_count < RTS_MAX_TIMES_FREQ_REDUCTION)
pcr->dma_error_count++;
}
else if (pcr->trans_result == TRANS_NO_DEVICE)
err = -ENODEV;
spin_unlock_irqrestore(&pcr->lock, flags);
out:
spin_lock_irqsave(&pcr->lock, flags);
pcr->done = NULL;
spin_unlock_irqrestore(&pcr->lock, flags);
if ((err < 0) && (err != -ENODEV))
rtsx_pci_stop_cmd(pcr);
if (pcr->finish_me)
complete(pcr->finish_me);
return err;
}
EXPORT_SYMBOL_GPL(rtsx_pci_dma_transfer);
int rtsx_pci_read_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
{
int err;
int i, j;
u16 reg;
u8 *ptr;
if (buf_len > 512)
buf_len = 512;
ptr = buf;
reg = PPBUF_BASE2;
for (i = 0; i < buf_len / 256; i++) {
rtsx_pci_init_cmd(pcr);
for (j = 0; j < 256; j++)
rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
err = rtsx_pci_send_cmd(pcr, 250);
if (err < 0)
return err;
memcpy(ptr, rtsx_pci_get_cmd_data(pcr), 256);
ptr += 256;
}
if (buf_len % 256) {
rtsx_pci_init_cmd(pcr);
for (j = 0; j < buf_len % 256; j++)
rtsx_pci_add_cmd(pcr, READ_REG_CMD, reg++, 0, 0);
err = rtsx_pci_send_cmd(pcr, 250);
if (err < 0)
return err;
}
memcpy(ptr, rtsx_pci_get_cmd_data(pcr), buf_len % 256);
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_pci_read_ppbuf);
int rtsx_pci_write_ppbuf(struct rtsx_pcr *pcr, u8 *buf, int buf_len)
{
int err;
int i, j;
u16 reg;
u8 *ptr;
if (buf_len > 512)
buf_len = 512;
ptr = buf;
reg = PPBUF_BASE2;
for (i = 0; i < buf_len / 256; i++) {
rtsx_pci_init_cmd(pcr);
for (j = 0; j < 256; j++) {
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
reg++, 0xFF, *ptr);
ptr++;
}
err = rtsx_pci_send_cmd(pcr, 250);
if (err < 0)
return err;
}
if (buf_len % 256) {
rtsx_pci_init_cmd(pcr);
for (j = 0; j < buf_len % 256; j++) {
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
reg++, 0xFF, *ptr);
ptr++;
}
err = rtsx_pci_send_cmd(pcr, 250);
if (err < 0)
return err;
}
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_pci_write_ppbuf);
static int rtsx_pci_set_pull_ctl(struct rtsx_pcr *pcr, const u32 *tbl)
{
rtsx_pci_init_cmd(pcr);
while (*tbl & 0xFFFF0000) {
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
(u16)(*tbl >> 16), 0xFF, (u8)(*tbl));
tbl++;
}
return rtsx_pci_send_cmd(pcr, 100);
}
int rtsx_pci_card_pull_ctl_enable(struct rtsx_pcr *pcr, int card)
{
const u32 *tbl;
if (card == RTSX_SD_CARD)
tbl = pcr->sd_pull_ctl_enable_tbl;
else if (card == RTSX_MS_CARD)
tbl = pcr->ms_pull_ctl_enable_tbl;
else
return -EINVAL;
return rtsx_pci_set_pull_ctl(pcr, tbl);
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_enable);
int rtsx_pci_card_pull_ctl_disable(struct rtsx_pcr *pcr, int card)
{
const u32 *tbl;
if (card == RTSX_SD_CARD)
tbl = pcr->sd_pull_ctl_disable_tbl;
else if (card == RTSX_MS_CARD)
tbl = pcr->ms_pull_ctl_disable_tbl;
else
return -EINVAL;
return rtsx_pci_set_pull_ctl(pcr, tbl);
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_pull_ctl_disable);
static void rtsx_pci_enable_bus_int(struct rtsx_pcr *pcr)
{
struct rtsx_hw_param *hw_param = &pcr->hw_param;
pcr->bier = TRANS_OK_INT_EN | TRANS_FAIL_INT_EN | SD_INT_EN
| hw_param->interrupt_en;
if (pcr->num_slots > 1)
pcr->bier |= MS_INT_EN;
/* Enable Bus Interrupt */
rtsx_pci_writel(pcr, RTSX_BIER, pcr->bier);
pcr_dbg(pcr, "RTSX_BIER: 0x%08x\n", pcr->bier);
}
static inline u8 double_ssc_depth(u8 depth)
{
return ((depth > 1) ? (depth - 1) : depth);
}
static u8 revise_ssc_depth(u8 ssc_depth, u8 div)
{
if (div > CLK_DIV_1) {
if (ssc_depth > (div - 1))
ssc_depth -= (div - 1);
else
ssc_depth = SSC_DEPTH_4M;
}
return ssc_depth;
}
int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock,
u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk)
{
int err, clk;
u8 n, clk_divider, mcu_cnt, div;
static const u8 depth[] = {
[RTSX_SSC_DEPTH_4M] = SSC_DEPTH_4M,
[RTSX_SSC_DEPTH_2M] = SSC_DEPTH_2M,
[RTSX_SSC_DEPTH_1M] = SSC_DEPTH_1M,
[RTSX_SSC_DEPTH_500K] = SSC_DEPTH_500K,
[RTSX_SSC_DEPTH_250K] = SSC_DEPTH_250K,
};
if (PCI_PID(pcr) == PID_5261)
return rts5261_pci_switch_clock(pcr, card_clock,
ssc_depth, initial_mode, double_clk, vpclk);
if (PCI_PID(pcr) == PID_5228)
return rts5228_pci_switch_clock(pcr, card_clock,
ssc_depth, initial_mode, double_clk, vpclk);
if (initial_mode) {
/* We use 250k(around) here, in initial stage */
clk_divider = SD_CLK_DIVIDE_128;
card_clock = 30000000;
} else {
clk_divider = SD_CLK_DIVIDE_0;
}
err = rtsx_pci_write_register(pcr, SD_CFG1,
SD_CLK_DIVIDE_MASK, clk_divider);
if (err < 0)
return err;
/* Reduce card clock by 20MHz each time a DMA transfer error occurs */
if (card_clock == UHS_SDR104_MAX_DTR &&
pcr->dma_error_count &&
PCI_PID(pcr) == RTS5227_DEVICE_ID)
card_clock = UHS_SDR104_MAX_DTR -
(pcr->dma_error_count * 20000000);
card_clock /= 1000000;
pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock);
clk = card_clock;
if (!initial_mode && double_clk)
clk = card_clock * 2;
pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n",
clk, pcr->cur_clock);
if (clk == pcr->cur_clock)
return 0;
if (pcr->ops->conv_clk_and_div_n)
n = (u8)pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N);
else
n = (u8)(clk - 2);
if ((clk <= 2) || (n > MAX_DIV_N_PCR))
return -EINVAL;
mcu_cnt = (u8)(125/clk + 3);
if (mcu_cnt > 15)
mcu_cnt = 15;
/* Make sure that the SSC clock div_n is not less than MIN_DIV_N_PCR */
div = CLK_DIV_1;
while ((n < MIN_DIV_N_PCR) && (div < CLK_DIV_8)) {
if (pcr->ops->conv_clk_and_div_n) {
int dbl_clk = pcr->ops->conv_clk_and_div_n(n,
DIV_N_TO_CLK) * 2;
n = (u8)pcr->ops->conv_clk_and_div_n(dbl_clk,
CLK_TO_DIV_N);
} else {
n = (n + 2) * 2 - 2;
}
div++;
}
pcr_dbg(pcr, "n = %d, div = %d\n", n, div);
ssc_depth = depth[ssc_depth];
if (double_clk)
ssc_depth = double_ssc_depth(ssc_depth);
ssc_depth = revise_ssc_depth(ssc_depth, div);
pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL,
CLK_LOW_FREQ, CLK_LOW_FREQ);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV,
0xFF, (div << 4) | mcu_cnt);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2,
SSC_DEPTH_MASK, ssc_depth);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB);
if (vpclk) {
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
PHASE_NOT_RESET, 0);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL,
PHASE_NOT_RESET, PHASE_NOT_RESET);
}
err = rtsx_pci_send_cmd(pcr, 2000);
if (err < 0)
return err;
/* Wait SSC clock stable */
udelay(SSC_CLOCK_STABLE_WAIT);
err = rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
if (err < 0)
return err;
pcr->cur_clock = clk;
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_pci_switch_clock);
int rtsx_pci_card_power_on(struct rtsx_pcr *pcr, int card)
{
if (pcr->ops->card_power_on)
return pcr->ops->card_power_on(pcr, card);
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_power_on);
int rtsx_pci_card_power_off(struct rtsx_pcr *pcr, int card)
{
if (pcr->ops->card_power_off)
return pcr->ops->card_power_off(pcr, card);
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_power_off);
int rtsx_pci_card_exclusive_check(struct rtsx_pcr *pcr, int card)
{
static const unsigned int cd_mask[] = {
[RTSX_SD_CARD] = SD_EXIST,
[RTSX_MS_CARD] = MS_EXIST
};
if (!(pcr->flags & PCR_MS_PMOS)) {
/* When using single PMOS, accessing card is not permitted
* if the existing card is not the designated one.
*/
if (pcr->card_exist & (~cd_mask[card]))
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_exclusive_check);
int rtsx_pci_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
if (pcr->ops->switch_output_voltage)
return pcr->ops->switch_output_voltage(pcr, voltage);
return 0;
}
EXPORT_SYMBOL_GPL(rtsx_pci_switch_output_voltage);
unsigned int rtsx_pci_card_exist(struct rtsx_pcr *pcr)
{
unsigned int val;
val = rtsx_pci_readl(pcr, RTSX_BIPR);
if (pcr->ops->cd_deglitch)
val = pcr->ops->cd_deglitch(pcr);
return val;
}
EXPORT_SYMBOL_GPL(rtsx_pci_card_exist);
void rtsx_pci_complete_unfinished_transfer(struct rtsx_pcr *pcr)
{
struct completion finish;
pcr->finish_me = &finish;
init_completion(&finish);
if (pcr->done)
complete(pcr->done);
if (!pcr->remove_pci)
rtsx_pci_stop_cmd(pcr);
wait_for_completion_interruptible_timeout(&finish,
msecs_to_jiffies(2));
pcr->finish_me = NULL;
}
EXPORT_SYMBOL_GPL(rtsx_pci_complete_unfinished_transfer);
static void rtsx_pci_card_detect(struct work_struct *work)
{
struct delayed_work *dwork;
struct rtsx_pcr *pcr;
unsigned long flags;
unsigned int card_detect = 0, card_inserted, card_removed;
u32 irq_status;
dwork = to_delayed_work(work);
pcr = container_of(dwork, struct rtsx_pcr, carddet_work);
pcr_dbg(pcr, "--> %s\n", __func__);
mutex_lock(&pcr->pcr_mutex);
spin_lock_irqsave(&pcr->lock, flags);
irq_status = rtsx_pci_readl(pcr, RTSX_BIPR);
pcr_dbg(pcr, "irq_status: 0x%08x\n", irq_status);
irq_status &= CARD_EXIST;
card_inserted = pcr->card_inserted & irq_status;
card_removed = pcr->card_removed;
pcr->card_inserted = 0;
pcr->card_removed = 0;
spin_unlock_irqrestore(&pcr->lock, flags);
if (card_inserted || card_removed) {
pcr_dbg(pcr, "card_inserted: 0x%x, card_removed: 0x%x\n",
card_inserted, card_removed);
if (pcr->ops->cd_deglitch)
card_inserted = pcr->ops->cd_deglitch(pcr);
card_detect = card_inserted | card_removed;
pcr->card_exist |= card_inserted;
pcr->card_exist &= ~card_removed;
}
mutex_unlock(&pcr->pcr_mutex);
if ((card_detect & SD_EXIST) && pcr->slots[RTSX_SD_CARD].card_event)
pcr->slots[RTSX_SD_CARD].card_event(
pcr->slots[RTSX_SD_CARD].p_dev);
if ((card_detect & MS_EXIST) && pcr->slots[RTSX_MS_CARD].card_event)
pcr->slots[RTSX_MS_CARD].card_event(
pcr->slots[RTSX_MS_CARD].p_dev);
}
static void rtsx_pci_process_ocp(struct rtsx_pcr *pcr)
{
if (pcr->ops->process_ocp) {
pcr->ops->process_ocp(pcr);
} else {
if (!pcr->option.ocp_en)
return;
rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
if (pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
rtsx_pci_clear_ocpstat(pcr);
pcr->ocp_stat = 0;
}
}
}
static int rtsx_pci_process_ocp_interrupt(struct rtsx_pcr *pcr)
{
if (pcr->option.ocp_en)
rtsx_pci_process_ocp(pcr);
return 0;
}
static irqreturn_t rtsx_pci_isr(int irq, void *dev_id)
{
struct rtsx_pcr *pcr = dev_id;
u32 int_reg;
if (!pcr)
return IRQ_NONE;
spin_lock(&pcr->lock);
int_reg = rtsx_pci_readl(pcr, RTSX_BIPR);
/* Clear interrupt flag */
rtsx_pci_writel(pcr, RTSX_BIPR, int_reg);
if ((int_reg & pcr->bier) == 0) {
spin_unlock(&pcr->lock);
return IRQ_NONE;
}
if (int_reg == 0xFFFFFFFF) {
spin_unlock(&pcr->lock);
return IRQ_HANDLED;
}
int_reg &= (pcr->bier | 0x7FFFFF);
if (int_reg & SD_OC_INT)
rtsx_pci_process_ocp_interrupt(pcr);
if (int_reg & SD_INT) {
if (int_reg & SD_EXIST) {
pcr->card_inserted |= SD_EXIST;
} else {
pcr->card_removed |= SD_EXIST;
pcr->card_inserted &= ~SD_EXIST;
if (PCI_PID(pcr) == PID_5261) {
rtsx_pci_write_register(pcr, RTS5261_FW_STATUS,
RTS5261_EXPRESS_LINK_FAIL_MASK, 0);
pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS;
}
}
pcr->dma_error_count = 0;
}
if (int_reg & MS_INT) {
if (int_reg & MS_EXIST) {
pcr->card_inserted |= MS_EXIST;
} else {
pcr->card_removed |= MS_EXIST;
pcr->card_inserted &= ~MS_EXIST;
}
}
if (int_reg & (NEED_COMPLETE_INT | DELINK_INT)) {
if (int_reg & (TRANS_FAIL_INT | DELINK_INT)) {
pcr->trans_result = TRANS_RESULT_FAIL;
if (pcr->done)
complete(pcr->done);
} else if (int_reg & TRANS_OK_INT) {
pcr->trans_result = TRANS_RESULT_OK;
if (pcr->done)
complete(pcr->done);
}
}
if ((pcr->card_inserted || pcr->card_removed) && !(int_reg & SD_OC_INT))
schedule_delayed_work(&pcr->carddet_work,
msecs_to_jiffies(200));
spin_unlock(&pcr->lock);
return IRQ_HANDLED;
}
static int rtsx_pci_acquire_irq(struct rtsx_pcr *pcr)
{
pcr_dbg(pcr, "%s: pcr->msi_en = %d, pci->irq = %d\n",
__func__, pcr->msi_en, pcr->pci->irq);
if (request_irq(pcr->pci->irq, rtsx_pci_isr,
pcr->msi_en ? 0 : IRQF_SHARED,
DRV_NAME_RTSX_PCI, pcr)) {
dev_err(&(pcr->pci->dev),
"rtsx_sdmmc: unable to grab IRQ %d, disabling device\n",
pcr->pci->irq);
return -1;
}
pcr->irq = pcr->pci->irq;
pci_intx(pcr->pci, !pcr->msi_en);
return 0;
}
static void rtsx_base_force_power_down(struct rtsx_pcr *pcr)
{
/* Set relink_time to 0 */
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
RELINK_TIME_MASK, 0);
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3,
D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
}
static void __maybe_unused rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
if (pcr->ops->turn_off_led)
pcr->ops->turn_off_led(pcr);
rtsx_pci_writel(pcr, RTSX_BIER, 0);
pcr->bier = 0;
rtsx_pci_write_register(pcr, PETXCFG, 0x08, 0x08);
rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, pm_state);
if (pcr->ops->force_power_down)
pcr->ops->force_power_down(pcr, pm_state, runtime);
else
rtsx_base_force_power_down(pcr);
}
void rtsx_pci_enable_ocp(struct rtsx_pcr *pcr)
{
u8 val = SD_OCP_INT_EN | SD_DETECT_EN;
if (pcr->ops->enable_ocp) {
pcr->ops->enable_ocp(pcr);
} else {
rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
}
}
void rtsx_pci_disable_ocp(struct rtsx_pcr *pcr)
{
u8 mask = SD_OCP_INT_EN | SD_DETECT_EN;
if (pcr->ops->disable_ocp) {
pcr->ops->disable_ocp(pcr);
} else {
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
OC_POWER_DOWN);
}
}
void rtsx_pci_init_ocp(struct rtsx_pcr *pcr)
{
if (pcr->ops->init_ocp) {
pcr->ops->init_ocp(pcr);
} else {
struct rtsx_cr_option *option = &(pcr->option);
if (option->ocp_en) {
u8 val = option->sd_800mA_ocp_thd;
rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN, 0);
rtsx_pci_write_register(pcr, REG_OCPPARA1,
SD_OCP_TIME_MASK, SD_OCP_TIME_800);
rtsx_pci_write_register(pcr, REG_OCPPARA2,
SD_OCP_THD_MASK, val);
rtsx_pci_write_register(pcr, REG_OCPGLITCH,
SD_OCP_GLITCH_MASK, pcr->hw_param.ocp_glitch);
rtsx_pci_enable_ocp(pcr);
}
}
}
int rtsx_pci_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
{
if (pcr->ops->get_ocpstat)
return pcr->ops->get_ocpstat(pcr, val);
else
return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
}
void rtsx_pci_clear_ocpstat(struct rtsx_pcr *pcr)
{
if (pcr->ops->clear_ocpstat) {
pcr->ops->clear_ocpstat(pcr);
} else {
u8 mask = SD_OCP_INT_CLR | SD_OC_CLR;
u8 val = SD_OCP_INT_CLR | SD_OC_CLR;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
udelay(100);
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
}
}
void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr)
{
u16 val;
if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
rtsx_pci_read_phy_register(pcr, 0x01, &val);
val |= 1<<9;
rtsx_pci_write_phy_register(pcr, 0x01, val);
}
rtsx_pci_write_register(pcr, REG_CFG_OOBS_OFF_TIMER, 0xFF, 0x32);
rtsx_pci_write_register(pcr, REG_CFG_OOBS_ON_TIMER, 0xFF, 0x05);
rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x83);
rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0xDE);
}
void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr)
{
u16 val;
if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) {
rtsx_pci_read_phy_register(pcr, 0x01, &val);
val &= ~(1<<9);
rtsx_pci_write_phy_register(pcr, 0x01, val);
}
rtsx_pci_write_register(pcr, REG_CFG_VCM_ON_TIMER, 0xFF, 0x03);
rtsx_pci_write_register(pcr, REG_CFG_OOBS_POLLING, 0xFF, 0x00);
}
int rtsx_sd_power_off_card3v3(struct rtsx_pcr *pcr)
{
rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
MS_CLK_EN | SD40_CLK_EN, 0);
rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
msleep(50);
rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD);
return 0;
}
int rtsx_ms_power_off_card3v3(struct rtsx_pcr *pcr)
{
rtsx_pci_write_register(pcr, CARD_CLK_EN, SD_CLK_EN |
MS_CLK_EN | SD40_CLK_EN, 0);
rtsx_pci_card_pull_ctl_disable(pcr, RTSX_MS_CARD);
rtsx_pci_write_register(pcr, CARD_OE, MS_OUTPUT_EN, 0);
rtsx_pci_card_power_off(pcr, RTSX_MS_CARD);
return 0;
}
static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
int err;
if (PCI_PID(pcr) == PID_5228)
rtsx_pci_write_register(pcr, RTS5228_LDO1_CFG1, RTS5228_LDO1_SR_TIME_MASK,
RTS5228_LDO1_SR_0_5);
rtsx_pci_writel(pcr, RTSX_HCBAR, pcr->host_cmds_addr);
rtsx_pci_enable_bus_int(pcr);
/* Power on SSC */
if (PCI_PID(pcr) == PID_5261) {
/* Gating real mcu clock */
err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1,
RTS5261_MCU_CLOCK_GATING, 0);
err = rtsx_pci_write_register(pcr, RTS5261_REG_FPDCTL,
SSC_POWER_DOWN, 0);
} else {
err = rtsx_pci_write_register(pcr, FPDCTL, SSC_POWER_DOWN, 0);
}
if (err < 0)
return err;
/* Wait SSC power stable */
udelay(200);
rtsx_disable_aspm(pcr);
if (pcr->ops->optimize_phy) {
err = pcr->ops->optimize_phy(pcr);
if (err < 0)
return err;
}
rtsx_pci_init_cmd(pcr);
/* Set mcu_cnt to 7 to ensure data can be sampled properly */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, 0x07, 0x07);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, HOST_SLEEP_STATE, 0x03, 0x00);
/* Disable card clock */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, 0x1E, 0);
/* Reset delink mode */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x0A, 0);
/* Card driving select */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DRIVE_SEL,
0xFF, pcr->card_drive_sel);
/* Enable SSC Clock */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1,
0xFF, SSC_8X_EN | SSC_SEL_4M);
if (PCI_PID(pcr) == PID_5261)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
RTS5261_SSC_DEPTH_2M);
else if (PCI_PID(pcr) == PID_5228)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF,
RTS5228_SSC_DEPTH_2M);
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12);
/* Disable cd_pwr_save */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CHANGE_LINK_STATE, 0x16, 0x10);
/* Clear Link Ready Interrupt */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0,
LINK_RDY_INT, LINK_RDY_INT);
/* Enlarge the estimation window of PERST# glitch
* to reduce the chance of invalid card interrupt
*/
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PERST_GLITCH_WIDTH, 0xFF, 0x80);
/* Update RC oscillator to 400k
* bit[0] F_HIGH: for RC oscillator, Rst_value is 1'b1
* 1: 2M 0: 400k
*/
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RCCTL, 0x01, 0x00);
/* Set interrupt write clear
* bit 1: U_elbi_if_rd_clr_en
* 1: Enable ELBI interrupt[31:22] & [7:0] flag read clear
* 0: ELBI interrupt flag[31:22] & [7:0] only can be write clear
*/
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, NFTS_TX_CTRL, 0x02, 0);
err = rtsx_pci_send_cmd(pcr, 100);
if (err < 0)
return err;
switch (PCI_PID(pcr)) {
case PID_5250:
case PID_524A:
case PID_525A:
case PID_5260:
case PID_5261:
case PID_5228:
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1);
break;
default:
break;
}
/*init ocp*/
rtsx_pci_init_ocp(pcr);
/* Enable clk_request_n to enable clock power management */
pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL,
0, PCI_EXP_LNKCTL_CLKREQ_EN);
/* Enter L1 when host tx idle */
pci_write_config_byte(pdev, 0x70F, 0x5B);
if (pcr->ops->extra_init_hw) {
err = pcr->ops->extra_init_hw(pcr);
if (err < 0)
return err;
}
if (pcr->aspm_mode == ASPM_MODE_REG) {
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30);
rtsx_pci_write_register(pcr, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
}
/* No CD interrupt if probing driver with card inserted.
* So we need to initialize pcr->card_exist here.
*/
if (pcr->ops->cd_deglitch)
pcr->card_exist = pcr->ops->cd_deglitch(pcr);
else
pcr->card_exist = rtsx_pci_readl(pcr, RTSX_BIPR) & CARD_EXIST;
return 0;
}
static int rtsx_pci_init_chip(struct rtsx_pcr *pcr)
{
int err;
u16 cfg_val;
u8 val;
spin_lock_init(&pcr->lock);
mutex_init(&pcr->pcr_mutex);
switch (PCI_PID(pcr)) {
default:
case 0x5209:
rts5209_init_params(pcr);
break;
case 0x5229:
rts5229_init_params(pcr);
break;
case 0x5289:
rtl8411_init_params(pcr);
break;
case 0x5227:
rts5227_init_params(pcr);
break;
case 0x522A:
rts522a_init_params(pcr);
break;
case 0x5249:
rts5249_init_params(pcr);
break;
case 0x524A:
rts524a_init_params(pcr);
break;
case 0x525A:
rts525a_init_params(pcr);
break;
case 0x5287:
rtl8411b_init_params(pcr);
break;
case 0x5286:
rtl8402_init_params(pcr);
break;
case 0x5260:
rts5260_init_params(pcr);
break;
case 0x5261:
rts5261_init_params(pcr);
break;
case 0x5228:
rts5228_init_params(pcr);
break;
}
pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n",
PCI_PID(pcr), pcr->ic_version);
pcr->slots = kcalloc(pcr->num_slots, sizeof(struct rtsx_slot),
GFP_KERNEL);
if (!pcr->slots)
return -ENOMEM;
if (pcr->aspm_mode == ASPM_MODE_CFG) {
pcie_capability_read_word(pcr->pci, PCI_EXP_LNKCTL, &cfg_val);
if (cfg_val & PCI_EXP_LNKCTL_ASPM_L1)
pcr->aspm_enabled = true;
else
pcr->aspm_enabled = false;
} else if (pcr->aspm_mode == ASPM_MODE_REG) {
rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val);
if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1)
pcr->aspm_enabled = false;
else
pcr->aspm_enabled = true;
}
if (pcr->ops->fetch_vendor_settings)
pcr->ops->fetch_vendor_settings(pcr);
pcr_dbg(pcr, "pcr->aspm_en = 0x%x\n", pcr->aspm_en);
pcr_dbg(pcr, "pcr->sd30_drive_sel_1v8 = 0x%x\n",
pcr->sd30_drive_sel_1v8);
pcr_dbg(pcr, "pcr->sd30_drive_sel_3v3 = 0x%x\n",
pcr->sd30_drive_sel_3v3);
pcr_dbg(pcr, "pcr->card_drive_sel = 0x%x\n",
pcr->card_drive_sel);
pcr_dbg(pcr, "pcr->flags = 0x%x\n", pcr->flags);
pcr->state = PDEV_STAT_IDLE;
err = rtsx_pci_init_hw(pcr);
if (err < 0) {
kfree(pcr->slots);
return err;
}
return 0;
}
static int rtsx_pci_probe(struct pci_dev *pcidev,
const struct pci_device_id *id)
{
struct rtsx_pcr *pcr;
struct pcr_handle *handle;
u32 base, len;
int ret, i, bar = 0;
dev_dbg(&(pcidev->dev),
": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n",
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
(int)pcidev->revision);
ret = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
if (ret < 0)
return ret;
ret = pci_enable_device(pcidev);
if (ret)
return ret;
ret = pci_request_regions(pcidev, DRV_NAME_RTSX_PCI);
if (ret)
goto disable;
pcr = kzalloc(sizeof(*pcr), GFP_KERNEL);
if (!pcr) {
ret = -ENOMEM;
goto release_pci;
}
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle) {
ret = -ENOMEM;
goto free_pcr;
}
handle->pcr = pcr;
idr_preload(GFP_KERNEL);
spin_lock(&rtsx_pci_lock);
ret = idr_alloc(&rtsx_pci_idr, pcr, 0, 0, GFP_NOWAIT);
if (ret >= 0)
pcr->id = ret;
spin_unlock(&rtsx_pci_lock);
idr_preload_end();
if (ret < 0)
goto free_handle;
pcr->pci = pcidev;
dev_set_drvdata(&pcidev->dev, handle);
if (CHK_PCI_PID(pcr, 0x525A))
bar = 1;
len = pci_resource_len(pcidev, bar);
base = pci_resource_start(pcidev, bar);
pcr->remap_addr = ioremap(base, len);
if (!pcr->remap_addr) {
ret = -ENOMEM;
goto free_idr;
}
pcr->rtsx_resv_buf = dma_alloc_coherent(&(pcidev->dev),
RTSX_RESV_BUF_LEN, &(pcr->rtsx_resv_buf_addr),
GFP_KERNEL);
if (pcr->rtsx_resv_buf == NULL) {
ret = -ENXIO;
goto unmap;
}
pcr->host_cmds_ptr = pcr->rtsx_resv_buf;
pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr;
pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;
pcr->card_inserted = 0;
pcr->card_removed = 0;
INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect);
pcr->msi_en = msi_en;
if (pcr->msi_en) {
ret = pci_enable_msi(pcidev);
if (ret)
pcr->msi_en = false;
}
ret = rtsx_pci_acquire_irq(pcr);
if (ret < 0)
goto disable_msi;
pci_set_master(pcidev);
synchronize_irq(pcr->irq);
ret = rtsx_pci_init_chip(pcr);
if (ret < 0)
goto disable_irq;
for (i = 0; i < ARRAY_SIZE(rtsx_pcr_cells); i++) {
rtsx_pcr_cells[i].platform_data = handle;
rtsx_pcr_cells[i].pdata_size = sizeof(*handle);
}
ret = mfd_add_devices(&pcidev->dev, pcr->id, rtsx_pcr_cells,
ARRAY_SIZE(rtsx_pcr_cells), NULL, 0, NULL);
if (ret < 0)
goto free_slots;
pm_runtime_allow(&pcidev->dev);
pm_runtime_put(&pcidev->dev);
return 0;
free_slots:
kfree(pcr->slots);
disable_irq:
free_irq(pcr->irq, (void *)pcr);
disable_msi:
if (pcr->msi_en)
pci_disable_msi(pcr->pci);
dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
unmap:
iounmap(pcr->remap_addr);
free_idr:
spin_lock(&rtsx_pci_lock);
idr_remove(&rtsx_pci_idr, pcr->id);
spin_unlock(&rtsx_pci_lock);
free_handle:
kfree(handle);
free_pcr:
kfree(pcr);
release_pci:
pci_release_regions(pcidev);
disable:
pci_disable_device(pcidev);
return ret;
}
static void rtsx_pci_remove(struct pci_dev *pcidev)
{
struct pcr_handle *handle = pci_get_drvdata(pcidev);
struct rtsx_pcr *pcr = handle->pcr;
pcr->remove_pci = true;
pm_runtime_get_sync(&pcidev->dev);
pm_runtime_forbid(&pcidev->dev);
/* Disable interrupts at the pcr level */
spin_lock_irq(&pcr->lock);
rtsx_pci_writel(pcr, RTSX_BIER, 0);
pcr->bier = 0;
spin_unlock_irq(&pcr->lock);
cancel_delayed_work_sync(&pcr->carddet_work);
mfd_remove_devices(&pcidev->dev);
dma_free_coherent(&(pcr->pci->dev), RTSX_RESV_BUF_LEN,
pcr->rtsx_resv_buf, pcr->rtsx_resv_buf_addr);
free_irq(pcr->irq, (void *)pcr);
if (pcr->msi_en)
pci_disable_msi(pcr->pci);
iounmap(pcr->remap_addr);
pci_release_regions(pcidev);
pci_disable_device(pcidev);
spin_lock(&rtsx_pci_lock);
idr_remove(&rtsx_pci_idr, pcr->id);
spin_unlock(&rtsx_pci_lock);
kfree(pcr->slots);
kfree(pcr);
kfree(handle);
dev_dbg(&(pcidev->dev),
": Realtek PCI-E Card Reader at %s [%04x:%04x] has been removed\n",
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device);
}
static int __maybe_unused rtsx_pci_suspend(struct device *dev_d)
{
struct pci_dev *pcidev = to_pci_dev(dev_d);
struct pcr_handle *handle = pci_get_drvdata(pcidev);
struct rtsx_pcr *pcr = handle->pcr;
dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
cancel_delayed_work_sync(&pcr->carddet_work);
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_power_off(pcr, HOST_ENTER_S3, false);
mutex_unlock(&pcr->pcr_mutex);
return 0;
}
static int __maybe_unused rtsx_pci_resume(struct device *dev_d)
{
struct pci_dev *pcidev = to_pci_dev(dev_d);
struct pcr_handle *handle = pci_get_drvdata(pcidev);
struct rtsx_pcr *pcr = handle->pcr;
int ret = 0;
dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
mutex_lock(&pcr->pcr_mutex);
ret = rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
if (ret)
goto out;
ret = rtsx_pci_init_hw(pcr);
if (ret)
goto out;
out:
mutex_unlock(&pcr->pcr_mutex);
return ret;
}
#ifdef CONFIG_PM
static void rtsx_enable_aspm(struct rtsx_pcr *pcr)
{
if (pcr->ops->set_aspm)
pcr->ops->set_aspm(pcr, true);
else
rtsx_comm_set_aspm(pcr, true);
}
static void rtsx_comm_pm_power_saving(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &pcr->option;
if (option->ltr_enabled) {
u32 latency = option->ltr_l1off_latency;
if (rtsx_check_dev_flag(pcr, L1_SNOOZE_TEST_EN))
mdelay(option->l1_snooze_delay);
rtsx_set_ltr_latency(pcr, latency);
}
if (rtsx_check_dev_flag(pcr, LTR_L1SS_PWR_GATE_EN))
rtsx_set_l1off_sub_cfg_d0(pcr, 0);
rtsx_enable_aspm(pcr);
}
static void rtsx_pm_power_saving(struct rtsx_pcr *pcr)
{
rtsx_comm_pm_power_saving(pcr);
}
static void rtsx_pci_shutdown(struct pci_dev *pcidev)
{
struct pcr_handle *handle = pci_get_drvdata(pcidev);
struct rtsx_pcr *pcr = handle->pcr;
dev_dbg(&(pcidev->dev), "--> %s\n", __func__);
rtsx_pci_power_off(pcr, HOST_ENTER_S1, false);
pci_disable_device(pcidev);
free_irq(pcr->irq, (void *)pcr);
if (pcr->msi_en)
pci_disable_msi(pcr->pci);
}
static int rtsx_pci_runtime_idle(struct device *device)
{
struct pci_dev *pcidev = to_pci_dev(device);
struct pcr_handle *handle = pci_get_drvdata(pcidev);
struct rtsx_pcr *pcr = handle->pcr;
dev_dbg(device, "--> %s\n", __func__);
mutex_lock(&pcr->pcr_mutex);
pcr->state = PDEV_STAT_IDLE;
if (pcr->ops->disable_auto_blink)
pcr->ops->disable_auto_blink(pcr);
if (pcr->ops->turn_off_led)
pcr->ops->turn_off_led(pcr);
rtsx_pm_power_saving(pcr);
mutex_unlock(&pcr->pcr_mutex);
if (pcr->rtd3_en)
pm_schedule_suspend(device, 10000);
return -EBUSY;
}
static int rtsx_pci_runtime_suspend(struct device *device)
{
struct pci_dev *pcidev = to_pci_dev(device);
struct pcr_handle *handle = pci_get_drvdata(pcidev);
struct rtsx_pcr *pcr = handle->pcr;
dev_dbg(device, "--> %s\n", __func__);
cancel_delayed_work_sync(&pcr->carddet_work);
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_power_off(pcr, HOST_ENTER_S3, true);
mutex_unlock(&pcr->pcr_mutex);
return 0;
}
static int rtsx_pci_runtime_resume(struct device *device)
{
struct pci_dev *pcidev = to_pci_dev(device);
struct pcr_handle *handle = pci_get_drvdata(pcidev);
struct rtsx_pcr *pcr = handle->pcr;
dev_dbg(device, "--> %s\n", __func__);
mutex_lock(&pcr->pcr_mutex);
rtsx_pci_write_register(pcr, HOST_SLEEP_STATE, 0x03, 0x00);
rtsx_pci_init_hw(pcr);
if (pcr->slots[RTSX_SD_CARD].p_dev != NULL) {
pcr->slots[RTSX_SD_CARD].card_event(
pcr->slots[RTSX_SD_CARD].p_dev);
}
mutex_unlock(&pcr->pcr_mutex);
return 0;
}
#else /* CONFIG_PM */
#define rtsx_pci_shutdown NULL
#define rtsx_pci_runtime_suspend NULL
#define rtsx_pic_runtime_resume NULL
#endif /* CONFIG_PM */
static const struct dev_pm_ops rtsx_pci_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(rtsx_pci_suspend, rtsx_pci_resume)
SET_RUNTIME_PM_OPS(rtsx_pci_runtime_suspend, rtsx_pci_runtime_resume, rtsx_pci_runtime_idle)
};
static struct pci_driver rtsx_pci_driver = {
.name = DRV_NAME_RTSX_PCI,
.id_table = rtsx_pci_ids,
.probe = rtsx_pci_probe,
.remove = rtsx_pci_remove,
.driver.pm = &rtsx_pci_pm_ops,
.shutdown = rtsx_pci_shutdown,
};
module_pci_driver(rtsx_pci_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Wei WANG <[email protected]>");
MODULE_DESCRIPTION("Realtek PCI-E Card Reader Driver");
| linux-master | drivers/misc/cardreader/rtsx_pcr.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG <[email protected]>
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
static u8 rts5229_get_ic_version(struct rtsx_pcr *pcr)
{
u8 val;
rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
return val & 0x0F;
}
static void rts5229_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
u32 reg;
pci_read_config_dword(pdev, PCR_SETTING_REG1, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
return;
pcr->aspm_en = rtsx_reg_to_aspm(reg);
pcr->sd30_drive_sel_1v8 =
map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg));
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
pci_read_config_dword(pdev, PCR_SETTING_REG2, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
pcr->sd30_drive_sel_3v3 =
map_sd_drive(rtsx_reg_to_sd30_drive_sel_3v3(reg));
}
static void rts5229_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
rtsx_pci_write_register(pcr, FPDCTL, 0x03, 0x03);
}
static int rts5229_extra_init_hw(struct rtsx_pcr *pcr)
{
rtsx_pci_init_cmd(pcr);
/* Configure GPIO as output */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
/* Reset ASPM state to default value */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
/* Force CLKREQ# PIN to drive 0 to request clock */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x08, 0x08);
/* Switch LDO3318 source from DV33 to card_3v3 */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
/* LED shine disabled, set initial shine cycle period */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
/* Configure driving */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
0xFF, pcr->sd30_drive_sel_3v3);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rts5229_optimize_phy(struct rtsx_pcr *pcr)
{
/* Optimize RX sensitivity */
return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42);
}
static int rts5229_turn_on_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
}
static int rts5229_turn_off_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
}
static int rts5229_enable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
}
static int rts5229_disable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
}
static int rts5229_card_power_on(struct rtsx_pcr *pcr, int card)
{
int err;
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
SD_POWER_MASK, SD_PARTIAL_POWER_ON);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x02);
err = rtsx_pci_send_cmd(pcr, 100);
if (err < 0)
return err;
/* To avoid too large in-rush current */
udelay(150);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
SD_POWER_MASK, SD_POWER_ON);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x06);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rts5229_card_power_off(struct rtsx_pcr *pcr, int card)
{
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
SD_POWER_MASK | PMOS_STRG_MASK,
SD_POWER_OFF | PMOS_STRG_400mA);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x00);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rts5229_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
int err;
if (voltage == OUTPUT_3V3) {
err = rtsx_pci_write_register(pcr,
SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
if (err < 0)
return err;
} else if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_register(pcr,
SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C40 | 0x24);
if (err < 0)
return err;
} else {
return -EINVAL;
}
return 0;
}
static const struct pcr_ops rts5229_pcr_ops = {
.fetch_vendor_settings = rts5229_fetch_vendor_settings,
.extra_init_hw = rts5229_extra_init_hw,
.optimize_phy = rts5229_optimize_phy,
.turn_on_led = rts5229_turn_on_led,
.turn_off_led = rts5229_turn_off_led,
.enable_auto_blink = rts5229_enable_auto_blink,
.disable_auto_blink = rts5229_disable_auto_blink,
.card_power_on = rts5229_card_power_on,
.card_power_off = rts5229_card_power_off,
.switch_output_voltage = rts5229_switch_output_voltage,
.cd_deglitch = NULL,
.conv_clk_and_div_n = NULL,
.force_power_down = rts5229_force_power_down,
};
/* SD Pull Control Enable:
* SD_DAT[3:0] ==> pull up
* SD_CD ==> pull up
* SD_WP ==> pull up
* SD_CMD ==> pull up
* SD_CLK ==> pull down
*/
static const u32 rts5229_sd_pull_ctl_enable_tbl1[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
0,
};
/* For RTS5229 version C */
static const u32 rts5229_sd_pull_ctl_enable_tbl2[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD9),
0,
};
/* SD Pull Control Disable:
* SD_DAT[3:0] ==> pull down
* SD_CD ==> pull up
* SD_WP ==> pull down
* SD_CMD ==> pull down
* SD_CLK ==> pull down
*/
static const u32 rts5229_sd_pull_ctl_disable_tbl1[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
0,
};
/* For RTS5229 version C */
static const u32 rts5229_sd_pull_ctl_disable_tbl2[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE5),
0,
};
/* MS Pull Control Enable:
* MS CD ==> pull up
* others ==> pull down
*/
static const u32 rts5229_ms_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
0,
};
/* MS Pull Control Disable:
* MS CD ==> pull up
* others ==> pull down
*/
static const u32 rts5229_ms_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
0,
};
void rts5229_init_params(struct rtsx_pcr *pcr)
{
pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
pcr->num_slots = 2;
pcr->ops = &rts5229_pcr_ops;
pcr->flags = 0;
pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
pcr->aspm_en = ASPM_L1_EN;
pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 6, 6);
pcr->ic_version = rts5229_get_ic_version(pcr);
if (pcr->ic_version == IC_VER_C) {
pcr->sd_pull_ctl_enable_tbl = rts5229_sd_pull_ctl_enable_tbl2;
pcr->sd_pull_ctl_disable_tbl = rts5229_sd_pull_ctl_disable_tbl2;
} else {
pcr->sd_pull_ctl_enable_tbl = rts5229_sd_pull_ctl_enable_tbl1;
pcr->sd_pull_ctl_disable_tbl = rts5229_sd_pull_ctl_disable_tbl1;
}
pcr->ms_pull_ctl_enable_tbl = rts5229_ms_pull_ctl_enable_tbl;
pcr->ms_pull_ctl_disable_tbl = rts5229_ms_pull_ctl_disable_tbl;
}
| linux-master | drivers/misc/cardreader/rts5229.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG <[email protected]>
* Roger Tseng <[email protected]>
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
static u8 rts5227_get_ic_version(struct rtsx_pcr *pcr)
{
u8 val;
rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
return val & 0x0F;
}
static void rts5227_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
{
u8 driving_3v3[4][3] = {
{0x13, 0x13, 0x13},
{0x96, 0x96, 0x96},
{0x7F, 0x7F, 0x7F},
{0x96, 0x96, 0x96},
};
u8 driving_1v8[4][3] = {
{0x99, 0x99, 0x99},
{0xAA, 0xAA, 0xAA},
{0xFE, 0xFE, 0xFE},
{0xB3, 0xB3, 0xB3},
};
u8 (*driving)[3], drive_sel;
if (voltage == OUTPUT_3V3) {
driving = driving_3v3;
drive_sel = pcr->sd30_drive_sel_3v3;
} else {
driving = driving_1v8;
drive_sel = pcr->sd30_drive_sel_1v8;
}
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CLK_DRIVE_SEL,
0xFF, driving[drive_sel][0]);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_CMD_DRIVE_SEL,
0xFF, driving[drive_sel][1]);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DAT_DRIVE_SEL,
0xFF, driving[drive_sel][2]);
}
static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
u32 reg;
pci_read_config_dword(pdev, PCR_SETTING_REG1, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
return;
pcr->aspm_en = rtsx_reg_to_aspm(reg);
pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
pci_read_config_dword(pdev, PCR_SETTING_REG2, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
if (CHK_PCI_PID(pcr, 0x522A))
pcr->rtd3_en = rtsx_reg_to_rtd3(reg);
if (rtsx_check_mmc_support(reg))
pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
}
static void rts5227_init_from_cfg(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
int l1ss;
u32 lval;
struct rtsx_cr_option *option = &pcr->option;
l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
if (!l1ss)
return;
pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
if (CHK_PCI_PID(pcr, 0x522A)) {
if (0 == (lval & 0x0F))
rtsx_pci_enable_oobs_polling(pcr);
else
rtsx_pci_disable_oobs_polling(pcr);
}
if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
else
rtsx_clear_dev_flag(pcr, ASPM_L1_1_EN);
if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
else
rtsx_clear_dev_flag(pcr, ASPM_L1_2_EN);
if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
else
rtsx_clear_dev_flag(pcr, PM_L1_1_EN);
if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
else
rtsx_clear_dev_flag(pcr, PM_L1_2_EN);
if (option->ltr_en) {
u16 val;
pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
} else {
option->ltr_enabled = false;
}
}
if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN
| PM_L1_1_EN | PM_L1_2_EN))
option->force_clkreq_0 = false;
else
option->force_clkreq_0 = true;
}
static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
{
u16 cap;
struct rtsx_cr_option *option = &pcr->option;
rts5227_init_from_cfg(pcr);
rtsx_pci_init_cmd(pcr);
/* Configure GPIO as output */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, GPIO_CTL, 0x02, 0x02);
/* Reset ASPM state to default value */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, ASPM_FORCE_CTL, 0x3F, 0);
/* Switch LDO3318 source from DV33 to card_3v3 */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x00);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_PWR_SEL, 0x03, 0x01);
/* LED shine disabled, set initial shine cycle period */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
/* Configure LTR */
pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cap);
if (cap & PCI_EXP_DEVCTL2_LTR_EN)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LTR_CTL, 0xFF, 0xA3);
/* Configure OBFF */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG, 0x03, 0x03);
/* Configure driving */
rts5227_fill_driving(pcr, OUTPUT_3V3);
/* Configure force_clock_req */
if (pcr->flags & PCR_REVERSE_SOCKET)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x30);
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x00);
if (CHK_PCI_PID(pcr, 0x522A))
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_AUTOLOAD_CFG1,
CD_RESUME_EN_MASK, CD_RESUME_EN_MASK);
if (pcr->rtd3_en) {
if (CHK_PCI_PID(pcr, 0x522A)) {
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_PM_CTRL3, 0x01, 0x01);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_PME_FORCE_CTL, 0x30, 0x30);
} else {
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CTRL3, 0x01, 0x01);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PME_FORCE_CTL, 0xFF, 0x33);
}
} else {
if (CHK_PCI_PID(pcr, 0x522A)) {
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_PM_CTRL3, 0x01, 0x00);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_PME_FORCE_CTL, 0x30, 0x20);
} else {
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PME_FORCE_CTL, 0xFF, 0x30);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CTRL3, 0x01, 0x00);
}
}
if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW);
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG,
FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, pcr->reg_pm_ctrl3, 0x10, 0x00);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rts5227_optimize_phy(struct rtsx_pcr *pcr)
{
int err;
err = rtsx_pci_write_register(pcr, PM_CTRL3, D3_DELINK_MODE_EN, 0x00);
if (err < 0)
return err;
/* Optimize RX sensitivity */
return rtsx_pci_write_phy_register(pcr, 0x00, 0xBA42);
}
static int rts5227_turn_on_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x02);
}
static int rts5227_turn_off_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00);
}
static int rts5227_enable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x08);
}
static int rts5227_disable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x08, 0x00);
}
static int rts5227_card_power_on(struct rtsx_pcr *pcr, int card)
{
int err;
if (pcr->option.ocp_en)
rtsx_pci_enable_ocp(pcr);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
SD_POWER_MASK, SD_PARTIAL_POWER_ON);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x02);
err = rtsx_pci_send_cmd(pcr, 100);
if (err < 0)
return err;
/* To avoid too large in-rush current */
msleep(20);
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
SD_POWER_MASK, SD_POWER_ON);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
LDO3318_PWR_MASK, 0x06);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE,
SD_OUTPUT_EN, SD_OUTPUT_EN);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE,
MS_OUTPUT_EN, MS_OUTPUT_EN);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rts5227_card_power_off(struct rtsx_pcr *pcr, int card)
{
if (pcr->option.ocp_en)
rtsx_pci_disable_ocp(pcr);
rtsx_pci_write_register(pcr, CARD_PWR_CTL, SD_POWER_MASK |
PMOS_STRG_MASK, SD_POWER_OFF | PMOS_STRG_400mA);
rtsx_pci_write_register(pcr, PWR_GATE_CTRL, LDO3318_PWR_MASK, 0X00);
return 0;
}
static int rts5227_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
int err;
if (voltage == OUTPUT_3V3) {
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4FC0 | 0x24);
if (err < 0)
return err;
} else if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x4C80 | 0x24);
if (err < 0)
return err;
} else {
return -EINVAL;
}
/* set pad drive */
rtsx_pci_init_cmd(pcr);
rts5227_fill_driving(pcr, voltage);
return rtsx_pci_send_cmd(pcr, 100);
}
static const struct pcr_ops rts5227_pcr_ops = {
.fetch_vendor_settings = rts5227_fetch_vendor_settings,
.extra_init_hw = rts5227_extra_init_hw,
.optimize_phy = rts5227_optimize_phy,
.turn_on_led = rts5227_turn_on_led,
.turn_off_led = rts5227_turn_off_led,
.enable_auto_blink = rts5227_enable_auto_blink,
.disable_auto_blink = rts5227_disable_auto_blink,
.card_power_on = rts5227_card_power_on,
.card_power_off = rts5227_card_power_off,
.switch_output_voltage = rts5227_switch_output_voltage,
.cd_deglitch = NULL,
.conv_clk_and_div_n = NULL,
};
/* SD Pull Control Enable:
* SD_DAT[3:0] ==> pull up
* SD_CD ==> pull up
* SD_WP ==> pull up
* SD_CMD ==> pull up
* SD_CLK ==> pull down
*/
static const u32 rts5227_sd_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
0,
};
/* SD Pull Control Disable:
* SD_DAT[3:0] ==> pull down
* SD_CD ==> pull up
* SD_WP ==> pull down
* SD_CMD ==> pull down
* SD_CLK ==> pull down
*/
static const u32 rts5227_sd_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
0,
};
/* MS Pull Control Enable:
* MS CD ==> pull up
* others ==> pull down
*/
static const u32 rts5227_ms_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
0,
};
/* MS Pull Control Disable:
* MS CD ==> pull up
* others ==> pull down
*/
static const u32 rts5227_ms_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
0,
};
void rts5227_init_params(struct rtsx_pcr *pcr)
{
pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
pcr->num_slots = 2;
pcr->ops = &rts5227_pcr_ops;
pcr->flags = 0;
pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 27, 15);
pcr->rx_initial_phase = SET_CLOCK_PHASE(30, 7, 7);
pcr->ic_version = rts5227_get_ic_version(pcr);
pcr->sd_pull_ctl_enable_tbl = rts5227_sd_pull_ctl_enable_tbl;
pcr->sd_pull_ctl_disable_tbl = rts5227_sd_pull_ctl_disable_tbl;
pcr->ms_pull_ctl_enable_tbl = rts5227_ms_pull_ctl_enable_tbl;
pcr->ms_pull_ctl_disable_tbl = rts5227_ms_pull_ctl_disable_tbl;
pcr->reg_pm_ctrl3 = PM_CTRL3;
}
static int rts522a_optimize_phy(struct rtsx_pcr *pcr)
{
int err;
err = rtsx_pci_write_register(pcr, RTS522A_PM_CTRL3, D3_DELINK_MODE_EN,
0x00);
if (err < 0)
return err;
if (is_version(pcr, 0x522A, IC_VER_A)) {
err = rtsx_pci_write_phy_register(pcr, PHY_RCR2,
PHY_RCR2_INIT_27S);
if (err)
return err;
rtsx_pci_write_phy_register(pcr, PHY_RCR1, PHY_RCR1_INIT_27S);
rtsx_pci_write_phy_register(pcr, PHY_FLD0, PHY_FLD0_INIT_27S);
rtsx_pci_write_phy_register(pcr, PHY_FLD3, PHY_FLD3_INIT_27S);
rtsx_pci_write_phy_register(pcr, PHY_FLD4, PHY_FLD4_INIT_27S);
}
return 0;
}
static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
{
rts5227_extra_init_hw(pcr);
/* Power down OCP for power consumption */
if (!pcr->card_exist)
rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
OC_POWER_DOWN);
rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG,
FUNC_FORCE_UPME_XMT_DBG);
rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04);
rtsx_pci_write_register(pcr, PM_EVENT_DEBUG, PME_DEBUG_0, PME_DEBUG_0);
rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 0xFF, 0x11);
return 0;
}
static int rts522a_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
int err;
if (voltage == OUTPUT_3V3) {
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x57E4);
if (err < 0)
return err;
} else if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_phy_register(pcr, 0x11, 0x3C02);
if (err < 0)
return err;
err = rtsx_pci_write_phy_register(pcr, 0x08, 0x54A4);
if (err < 0)
return err;
} else {
return -EINVAL;
}
/* set pad drive */
rtsx_pci_init_cmd(pcr);
rts5227_fill_driving(pcr, voltage);
return rtsx_pci_send_cmd(pcr, 100);
}
static void rts522a_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
/* Set relink_time to 0 */
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0);
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0);
rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3,
RELINK_TIME_MASK, 0);
rtsx_pci_write_register(pcr, RTS522A_PM_CTRL3,
D3_DELINK_MODE_EN, D3_DELINK_MODE_EN);
if (!runtime) {
rtsx_pci_write_register(pcr, RTS522A_AUTOLOAD_CFG1,
CD_RESUME_EN_MASK, 0);
rtsx_pci_write_register(pcr, RTS522A_PM_CTRL3, 0x01, 0x00);
rtsx_pci_write_register(pcr, RTS522A_PME_FORCE_CTL, 0x30, 0x20);
}
rtsx_pci_write_register(pcr, FPDCTL, ALL_POWER_DOWN, ALL_POWER_DOWN);
}
static void rts522a_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
{
struct rtsx_cr_option *option = &pcr->option;
int aspm_L1_1, aspm_L1_2;
u8 val = 0;
aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
if (active) {
/* run, latency: 60us */
if (aspm_L1_1)
val = option->ltr_l1off_snooze_sspwrgate;
} else {
/* l1off, latency: 300us */
if (aspm_L1_2)
val = option->ltr_l1off_sspwrgate;
}
rtsx_set_l1off_sub(pcr, val);
}
/* rts522a operations mainly derived from rts5227, except phy/hw init setting.
*/
static const struct pcr_ops rts522a_pcr_ops = {
.fetch_vendor_settings = rts5227_fetch_vendor_settings,
.extra_init_hw = rts522a_extra_init_hw,
.optimize_phy = rts522a_optimize_phy,
.turn_on_led = rts5227_turn_on_led,
.turn_off_led = rts5227_turn_off_led,
.enable_auto_blink = rts5227_enable_auto_blink,
.disable_auto_blink = rts5227_disable_auto_blink,
.card_power_on = rts5227_card_power_on,
.card_power_off = rts5227_card_power_off,
.switch_output_voltage = rts522a_switch_output_voltage,
.force_power_down = rts522a_force_power_down,
.cd_deglitch = NULL,
.conv_clk_and_div_n = NULL,
.set_l1off_cfg_sub_d0 = rts522a_set_l1off_cfg_sub_d0,
};
void rts522a_init_params(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &pcr->option;
rts5227_init_params(pcr);
pcr->ops = &rts522a_pcr_ops;
pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(20, 20, 11);
pcr->reg_pm_ctrl3 = RTS522A_PM_CTRL3;
option->dev_flags = LTR_L1SS_PWR_GATE_EN;
option->ltr_en = true;
/* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
option->ltr_l1off_sspwrgate = 0x7F;
option->ltr_l1off_snooze_sspwrgate = 0x78;
pcr->option.ocp_en = 1;
if (pcr->option.ocp_en)
pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
pcr->option.sd_800mA_ocp_thd = RTS522A_OCP_THD_800;
}
| linux-master | drivers/misc/cardreader/rts5227.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Wei WANG <[email protected]>
* Roger Tseng <[email protected]>
*/
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/rtsx_pci.h>
#include "rtsx_pcr.h"
static u8 rtl8411_get_ic_version(struct rtsx_pcr *pcr)
{
u8 val;
rtsx_pci_read_register(pcr, SYS_VER, &val);
return val & 0x0F;
}
static int rtl8411b_is_qfn48(struct rtsx_pcr *pcr)
{
u8 val = 0;
rtsx_pci_read_register(pcr, RTL8411B_PACKAGE_MODE, &val);
if (val & 0x2)
return 1;
else
return 0;
}
static void rtl8411_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
u32 reg1 = 0;
u8 reg3 = 0;
pci_read_config_dword(pdev, PCR_SETTING_REG1, ®1);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg1);
if (!rtsx_vendor_setting_valid(reg1))
return;
pcr->aspm_en = rtsx_reg_to_aspm(reg1);
pcr->sd30_drive_sel_1v8 =
map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg1));
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg1);
pci_read_config_byte(pdev, PCR_SETTING_REG3, ®3);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG3, reg3);
pcr->sd30_drive_sel_3v3 = rtl8411_reg_to_sd30_drive_sel_3v3(reg3);
}
static void rtl8411b_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
u32 reg = 0;
pci_read_config_dword(pdev, PCR_SETTING_REG1, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg))
return;
pcr->aspm_en = rtsx_reg_to_aspm(reg);
pcr->sd30_drive_sel_1v8 =
map_sd_drive(rtsx_reg_to_sd30_drive_sel_1v8(reg));
pcr->sd30_drive_sel_3v3 =
map_sd_drive(rtl8411b_reg_to_sd30_drive_sel_3v3(reg));
}
static void rtl8411_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime)
{
rtsx_pci_write_register(pcr, FPDCTL, 0x07, 0x07);
}
static int rtl8411_extra_init_hw(struct rtsx_pcr *pcr)
{
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
0xFF, pcr->sd30_drive_sel_3v3);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CD_PAD_CTL,
CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rtl8411b_extra_init_hw(struct rtsx_pcr *pcr)
{
rtsx_pci_init_cmd(pcr);
if (rtl8411b_is_qfn48(pcr))
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD,
CARD_PULL_CTL3, 0xFF, 0xF5);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD30_DRIVE_SEL,
0xFF, pcr->sd30_drive_sel_3v3);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CD_PAD_CTL,
CD_DISABLE_MASK | CD_AUTO_DISABLE, CD_ENABLE);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, FUNC_FORCE_CTL,
0x06, 0x00);
return rtsx_pci_send_cmd(pcr, 100);
}
static int rtl8411_turn_on_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x00);
}
static int rtl8411_turn_off_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, CARD_GPIO, 0x01, 0x01);
}
static int rtl8411_enable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0xFF, 0x0D);
}
static int rtl8411_disable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, CARD_AUTO_BLINK, 0x08, 0x00);
}
static int rtl8411_card_power_on(struct rtsx_pcr *pcr, int card)
{
int err;
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_PWR_CTL,
BPP_POWER_MASK, BPP_POWER_5_PERCENT_ON);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LDO_CTL,
BPP_LDO_POWB, BPP_LDO_SUSPEND);
err = rtsx_pci_send_cmd(pcr, 100);
if (err < 0)
return err;
/* To avoid too large in-rush current */
udelay(150);
err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
BPP_POWER_MASK, BPP_POWER_10_PERCENT_ON);
if (err < 0)
return err;
udelay(150);
err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
BPP_POWER_MASK, BPP_POWER_15_PERCENT_ON);
if (err < 0)
return err;
udelay(150);
err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
BPP_POWER_MASK, BPP_POWER_ON);
if (err < 0)
return err;
return rtsx_pci_write_register(pcr, LDO_CTL, BPP_LDO_POWB, BPP_LDO_ON);
}
static int rtl8411_card_power_off(struct rtsx_pcr *pcr, int card)
{
int err;
err = rtsx_pci_write_register(pcr, CARD_PWR_CTL,
BPP_POWER_MASK, BPP_POWER_OFF);
if (err < 0)
return err;
return rtsx_pci_write_register(pcr, LDO_CTL,
BPP_LDO_POWB, BPP_LDO_SUSPEND);
}
static int rtl8411_do_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage,
int bpp_tuned18_shift, int bpp_asic_1v8)
{
u8 mask, val;
int err;
mask = (BPP_REG_TUNED18 << bpp_tuned18_shift) | BPP_PAD_MASK;
if (voltage == OUTPUT_3V3) {
err = rtsx_pci_write_register(pcr,
SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_3v3);
if (err < 0)
return err;
val = (BPP_ASIC_3V3 << bpp_tuned18_shift) | BPP_PAD_3V3;
} else if (voltage == OUTPUT_1V8) {
err = rtsx_pci_write_register(pcr,
SD30_DRIVE_SEL, 0x07, pcr->sd30_drive_sel_1v8);
if (err < 0)
return err;
val = (bpp_asic_1v8 << bpp_tuned18_shift) | BPP_PAD_1V8;
} else {
return -EINVAL;
}
return rtsx_pci_write_register(pcr, LDO_CTL, mask, val);
}
static int rtl8411_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
return rtl8411_do_switch_output_voltage(pcr, voltage,
BPP_TUNED18_SHIFT_8411, BPP_ASIC_1V8);
}
static int rtl8402_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
return rtl8411_do_switch_output_voltage(pcr, voltage,
BPP_TUNED18_SHIFT_8402, BPP_ASIC_2V0);
}
static unsigned int rtl8411_cd_deglitch(struct rtsx_pcr *pcr)
{
unsigned int card_exist;
card_exist = rtsx_pci_readl(pcr, RTSX_BIPR);
card_exist &= CARD_EXIST;
if (!card_exist) {
/* Enable card CD */
rtsx_pci_write_register(pcr, CD_PAD_CTL,
CD_DISABLE_MASK, CD_ENABLE);
/* Enable card interrupt */
rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x00);
return 0;
}
if (hweight32(card_exist) > 1) {
rtsx_pci_write_register(pcr, CARD_PWR_CTL,
BPP_POWER_MASK, BPP_POWER_5_PERCENT_ON);
msleep(100);
card_exist = rtsx_pci_readl(pcr, RTSX_BIPR);
if (card_exist & MS_EXIST)
card_exist = MS_EXIST;
else if (card_exist & SD_EXIST)
card_exist = SD_EXIST;
else
card_exist = 0;
rtsx_pci_write_register(pcr, CARD_PWR_CTL,
BPP_POWER_MASK, BPP_POWER_OFF);
pcr_dbg(pcr, "After CD deglitch, card_exist = 0x%x\n",
card_exist);
}
if (card_exist & MS_EXIST) {
/* Disable SD interrupt */
rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x40);
rtsx_pci_write_register(pcr, CD_PAD_CTL,
CD_DISABLE_MASK, MS_CD_EN_ONLY);
} else if (card_exist & SD_EXIST) {
/* Disable MS interrupt */
rtsx_pci_write_register(pcr, EFUSE_CONTENT, 0xe0, 0x80);
rtsx_pci_write_register(pcr, CD_PAD_CTL,
CD_DISABLE_MASK, SD_CD_EN_ONLY);
}
return card_exist;
}
static int rtl8411_conv_clk_and_div_n(int input, int dir)
{
int output;
if (dir == CLK_TO_DIV_N)
output = input * 4 / 5 - 2;
else
output = (input + 2) * 5 / 4;
return output;
}
static const struct pcr_ops rtl8411_pcr_ops = {
.fetch_vendor_settings = rtl8411_fetch_vendor_settings,
.extra_init_hw = rtl8411_extra_init_hw,
.optimize_phy = NULL,
.turn_on_led = rtl8411_turn_on_led,
.turn_off_led = rtl8411_turn_off_led,
.enable_auto_blink = rtl8411_enable_auto_blink,
.disable_auto_blink = rtl8411_disable_auto_blink,
.card_power_on = rtl8411_card_power_on,
.card_power_off = rtl8411_card_power_off,
.switch_output_voltage = rtl8411_switch_output_voltage,
.cd_deglitch = rtl8411_cd_deglitch,
.conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
.force_power_down = rtl8411_force_power_down,
};
static const struct pcr_ops rtl8402_pcr_ops = {
.fetch_vendor_settings = rtl8411_fetch_vendor_settings,
.extra_init_hw = rtl8411_extra_init_hw,
.optimize_phy = NULL,
.turn_on_led = rtl8411_turn_on_led,
.turn_off_led = rtl8411_turn_off_led,
.enable_auto_blink = rtl8411_enable_auto_blink,
.disable_auto_blink = rtl8411_disable_auto_blink,
.card_power_on = rtl8411_card_power_on,
.card_power_off = rtl8411_card_power_off,
.switch_output_voltage = rtl8402_switch_output_voltage,
.cd_deglitch = rtl8411_cd_deglitch,
.conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
.force_power_down = rtl8411_force_power_down,
};
static const struct pcr_ops rtl8411b_pcr_ops = {
.fetch_vendor_settings = rtl8411b_fetch_vendor_settings,
.extra_init_hw = rtl8411b_extra_init_hw,
.optimize_phy = NULL,
.turn_on_led = rtl8411_turn_on_led,
.turn_off_led = rtl8411_turn_off_led,
.enable_auto_blink = rtl8411_enable_auto_blink,
.disable_auto_blink = rtl8411_disable_auto_blink,
.card_power_on = rtl8411_card_power_on,
.card_power_off = rtl8411_card_power_off,
.switch_output_voltage = rtl8411_switch_output_voltage,
.cd_deglitch = rtl8411_cd_deglitch,
.conv_clk_and_div_n = rtl8411_conv_clk_and_div_n,
.force_power_down = rtl8411_force_power_down,
};
/* SD Pull Control Enable:
* SD_DAT[3:0] ==> pull up
* SD_CD ==> pull up
* SD_WP ==> pull up
* SD_CMD ==> pull up
* SD_CLK ==> pull down
*/
static const u32 rtl8411_sd_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xA9),
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x09),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
0,
};
/* SD Pull Control Disable:
* SD_DAT[3:0] ==> pull down
* SD_CD ==> pull up
* SD_WP ==> pull down
* SD_CMD ==> pull down
* SD_CLK ==> pull down
*/
static const u32 rtl8411_sd_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95),
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
0,
};
/* MS Pull Control Enable:
* MS CD ==> pull up
* others ==> pull down
*/
static const u32 rtl8411_ms_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95),
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x05),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
0,
};
/* MS Pull Control Disable:
* MS CD ==> pull up
* others ==> pull down
*/
static const u32 rtl8411_ms_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0x95),
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04),
0,
};
static const u32 rtl8411b_qfn64_sd_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0x09 | 0xD0),
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09 | 0x50),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
0,
};
static const u32 rtl8411b_qfn48_sd_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0x69 | 0x90),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x08 | 0x11),
0,
};
static const u32 rtl8411b_qfn64_sd_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0x05 | 0xD0),
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09 | 0x50),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
0,
};
static const u32 rtl8411b_qfn48_sd_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0x65 | 0x90),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
0,
};
static const u32 rtl8411b_qfn64_ms_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0x05 | 0xD0),
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x05 | 0x50),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
0,
};
static const u32 rtl8411b_qfn48_ms_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0x65 | 0x90),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
0,
};
static const u32 rtl8411b_qfn64_ms_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0x65),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0x05 | 0xD0),
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x09 | 0x50),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x05 | 0x50),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
0,
};
static const u32 rtl8411b_qfn48_ms_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0x65 | 0x90),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x04 | 0x11),
0,
};
static void rtl8411_init_common_params(struct rtsx_pcr *pcr)
{
pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
pcr->num_slots = 2;
pcr->flags = 0;
pcr->card_drive_sel = RTL8411_CARD_DRIVE_DEFAULT;
pcr->sd30_drive_sel_1v8 = DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = DRIVER_TYPE_D;
pcr->aspm_en = ASPM_L1_EN;
pcr->aspm_mode = ASPM_MODE_CFG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(23, 7, 14);
pcr->rx_initial_phase = SET_CLOCK_PHASE(4, 3, 10);
pcr->ic_version = rtl8411_get_ic_version(pcr);
}
void rtl8411_init_params(struct rtsx_pcr *pcr)
{
rtl8411_init_common_params(pcr);
pcr->ops = &rtl8411_pcr_ops;
set_pull_ctrl_tables(pcr, rtl8411);
}
void rtl8411b_init_params(struct rtsx_pcr *pcr)
{
rtl8411_init_common_params(pcr);
pcr->ops = &rtl8411b_pcr_ops;
if (rtl8411b_is_qfn48(pcr))
set_pull_ctrl_tables(pcr, rtl8411b_qfn48);
else
set_pull_ctrl_tables(pcr, rtl8411b_qfn64);
}
void rtl8402_init_params(struct rtsx_pcr *pcr)
{
rtl8411_init_common_params(pcr);
pcr->ops = &rtl8402_pcr_ops;
set_pull_ctrl_tables(pcr, rtl8411);
}
| linux-master | drivers/misc/cardreader/rtl8411.c |
// SPDX-License-Identifier: GPL-2.0-or-later
/* Driver for Realtek PCI-Express card reader
*
* Copyright(c) 2016-2017 Realtek Semiconductor Corp. All rights reserved.
*
* Author:
* Steven FENG <[email protected]>
* Rui FENG <[email protected]>
* Wei WANG <[email protected]>
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/rtsx_pci.h>
#include "rts5260.h"
#include "rtsx_pcr.h"
static u8 rts5260_get_ic_version(struct rtsx_pcr *pcr)
{
u8 val;
rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val);
return val & IC_VERSION_MASK;
}
static void rts5260_fill_driving(struct rtsx_pcr *pcr, u8 voltage)
{
u8 driving_3v3[4][3] = {
{0x11, 0x11, 0x11},
{0x22, 0x22, 0x22},
{0x55, 0x55, 0x55},
{0x33, 0x33, 0x33},
};
u8 driving_1v8[4][3] = {
{0x35, 0x33, 0x33},
{0x8A, 0x88, 0x88},
{0xBD, 0xBB, 0xBB},
{0x9B, 0x99, 0x99},
};
u8 (*driving)[3], drive_sel;
if (voltage == OUTPUT_3V3) {
driving = driving_3v3;
drive_sel = pcr->sd30_drive_sel_3v3;
} else {
driving = driving_1v8;
drive_sel = pcr->sd30_drive_sel_1v8;
}
rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL,
0xFF, driving[drive_sel][0]);
rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL,
0xFF, driving[drive_sel][1]);
rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL,
0xFF, driving[drive_sel][2]);
}
static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
u32 reg;
pci_read_config_dword(pdev, PCR_SETTING_REG1, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
if (!rtsx_vendor_setting_valid(reg)) {
pcr_dbg(pcr, "skip fetch vendor setting\n");
return;
}
pcr->aspm_en = rtsx_reg_to_aspm(reg);
pcr->sd30_drive_sel_1v8 = rtsx_reg_to_sd30_drive_sel_1v8(reg);
pcr->card_drive_sel &= 0x3F;
pcr->card_drive_sel |= rtsx_reg_to_card_drive_sel(reg);
pci_read_config_dword(pdev, PCR_SETTING_REG2, ®);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, reg);
if (rtsx_check_mmc_support(reg))
pcr->extra_caps |= EXTRA_CAPS_NO_MMC;
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
}
static int rtsx_base_enable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL,
LED_SHINE_MASK, LED_SHINE_EN);
}
static int rtsx_base_disable_auto_blink(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, OLT_LED_CTL,
LED_SHINE_MASK, LED_SHINE_DISABLE);
}
static int rts5260_turn_on_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_ON);
}
static int rts5260_turn_off_led(struct rtsx_pcr *pcr)
{
return rtsx_pci_write_register(pcr, RTS5260_REG_GPIO_CTL0,
RTS5260_REG_GPIO_MASK, RTS5260_REG_GPIO_OFF);
}
/* SD Pull Control Enable:
* SD_DAT[3:0] ==> pull up
* SD_CD ==> pull up
* SD_WP ==> pull up
* SD_CMD ==> pull up
* SD_CLK ==> pull down
*/
static const u32 rts5260_sd_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9),
RTSX_REG_PAIR(CARD_PULL_CTL4, 0xAA),
0,
};
/* SD Pull Control Disable:
* SD_DAT[3:0] ==> pull down
* SD_CD ==> pull up
* SD_WP ==> pull down
* SD_CMD ==> pull down
* SD_CLK ==> pull down
*/
static const u32 rts5260_sd_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL1, 0x66),
RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5),
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
0,
};
/* MS Pull Control Enable:
* MS CD ==> pull up
* others ==> pull down
*/
static const u32 rts5260_ms_pull_ctl_enable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
0,
};
/* MS Pull Control Disable:
* MS CD ==> pull up
* others ==> pull down
*/
static const u32 rts5260_ms_pull_ctl_disable_tbl[] = {
RTSX_REG_PAIR(CARD_PULL_CTL4, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL5, 0x55),
RTSX_REG_PAIR(CARD_PULL_CTL6, 0x15),
0,
};
static int sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr)
{
rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK
| SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST);
rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ);
rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF,
CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1);
rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0);
return 0;
}
static int rts5260_card_power_on(struct rtsx_pcr *pcr, int card)
{
struct rtsx_cr_option *option = &pcr->option;
if (option->ocp_en)
rtsx_pci_enable_ocp(pcr);
rtsx_pci_write_register(pcr, LDO_CONFIG2, DV331812_VDD1, DV331812_VDD1);
rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
rtsx_pci_write_register(pcr, LDO_VCC_CFG1, LDO_POW_SDVDD1_MASK,
LDO_POW_SDVDD1_ON);
rtsx_pci_write_register(pcr, LDO_CONFIG2,
DV331812_POWERON, DV331812_POWERON);
msleep(20);
if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 ||
pcr->extra_caps & EXTRA_CAPS_SD_SDR104)
sd_set_sample_push_timing_sd30(pcr);
/* Initialize SD_CFG1 register */
rtsx_pci_write_register(pcr, SD_CFG1, 0xFF,
SD_CLK_DIVIDE_128 | SD_20_MODE);
rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL,
0xFF, SD20_RX_POS_EDGE);
rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0);
rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR,
SD_STOP | SD_CLR_ERR);
/* Reset SD_CFG3 register */
rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0);
rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG,
SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 |
SD30_CLK_STOP_CFG0, 0);
rtsx_pci_write_register(pcr, REG_PRE_RW_MODE, EN_INFINITE_MODE, 0);
return 0;
}
static int rts5260_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage)
{
switch (voltage) {
case OUTPUT_3V3:
rtsx_pci_write_register(pcr, LDO_CONFIG2,
DV331812_VDD1, DV331812_VDD1);
rtsx_pci_write_register(pcr, LDO_DV18_CFG,
DV331812_MASK, DV331812_33);
rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8, 0);
break;
case OUTPUT_1V8:
rtsx_pci_write_register(pcr, LDO_CONFIG2,
DV331812_VDD1, DV331812_VDD1);
rtsx_pci_write_register(pcr, LDO_DV18_CFG,
DV331812_MASK, DV331812_17);
rtsx_pci_write_register(pcr, SD_PAD_CTL, SD_IO_USING_1V8,
SD_IO_USING_1V8);
break;
default:
return -EINVAL;
}
/* set pad drive */
rts5260_fill_driving(pcr, voltage);
return 0;
}
static void rts5260_stop_cmd(struct rtsx_pcr *pcr)
{
rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD);
rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA);
rtsx_pci_write_register(pcr, RTS5260_DMA_RST_CTL_0,
RTS5260_DMA_RST | RTS5260_ADMA3_RST,
RTS5260_DMA_RST | RTS5260_ADMA3_RST);
rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH);
}
static void rts5260_card_before_power_off(struct rtsx_pcr *pcr)
{
rts5260_stop_cmd(pcr);
rts5260_switch_output_voltage(pcr, OUTPUT_3V3);
}
static int rts5260_card_power_off(struct rtsx_pcr *pcr, int card)
{
int err = 0;
rts5260_card_before_power_off(pcr);
err = rtsx_pci_write_register(pcr, LDO_VCC_CFG1,
LDO_POW_SDVDD1_MASK, LDO_POW_SDVDD1_OFF);
err = rtsx_pci_write_register(pcr, LDO_CONFIG2,
DV331812_POWERON, DV331812_POWEROFF);
if (pcr->option.ocp_en)
rtsx_pci_disable_ocp(pcr);
return err;
}
static void rts5260_init_ocp(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &pcr->option;
if (option->ocp_en) {
u8 mask, val;
rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
RTS5260_DVCC_OCP_THD_MASK,
option->sd_800mA_ocp_thd);
rtsx_pci_write_register(pcr, RTS5260_DV331812_CFG,
RTS5260_DV331812_OCP_THD_MASK,
RTS5260_DV331812_OCP_THD_270);
mask = SD_OCP_GLITCH_MASK;
val = pcr->hw_param.ocp_glitch;
rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val);
rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
RTS5260_DVCC_OCP_EN |
RTS5260_DVCC_OCP_CL_EN,
RTS5260_DVCC_OCP_EN |
RTS5260_DVCC_OCP_CL_EN);
rtsx_pci_enable_ocp(pcr);
} else {
rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
RTS5260_DVCC_OCP_EN |
RTS5260_DVCC_OCP_CL_EN, 0);
}
}
static void rts5260_enable_ocp(struct rtsx_pcr *pcr)
{
u8 val = 0;
val = SD_OCP_INT_EN | SD_DETECT_EN;
rtsx_pci_write_register(pcr, REG_OCPCTL, 0xFF, val);
}
static void rts5260_disable_ocp(struct rtsx_pcr *pcr)
{
u8 mask = 0;
mask = SD_OCP_INT_EN | SD_DETECT_EN;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
}
static int rts5260_get_ocpstat(struct rtsx_pcr *pcr, u8 *val)
{
return rtsx_pci_read_register(pcr, REG_OCPSTAT, val);
}
static int rts5260_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val)
{
return rtsx_pci_read_register(pcr, REG_DV3318_OCPSTAT, val);
}
static void rts5260_clear_ocpstat(struct rtsx_pcr *pcr)
{
u8 mask = 0;
u8 val = 0;
mask = SD_OCP_INT_CLR | SD_OC_CLR;
val = SD_OCP_INT_CLR | SD_OC_CLR;
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val);
rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
DV3318_OCP_INT_CLR | DV3318_OCP_CLR,
DV3318_OCP_INT_CLR | DV3318_OCP_CLR);
udelay(10);
rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0);
rtsx_pci_write_register(pcr, REG_DV3318_OCPCTL,
DV3318_OCP_INT_CLR | DV3318_OCP_CLR, 0);
}
static void rts5260_process_ocp(struct rtsx_pcr *pcr)
{
if (!pcr->option.ocp_en)
return;
rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat);
rts5260_get_ocpstat2(pcr, &pcr->ocp_stat2);
if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) ||
(pcr->ocp_stat2 & (DV3318_OCP_NOW | DV3318_OCP_EVER))) {
rtsx_pci_card_power_off(pcr, RTSX_SD_CARD);
rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0);
rtsx_pci_clear_ocpstat(pcr);
pcr->ocp_stat = 0;
pcr->ocp_stat2 = 0;
}
}
static int rts5260_init_hw(struct rtsx_pcr *pcr)
{
int err;
rtsx_pci_init_cmd(pcr);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG1,
AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE);
/* Rest L1SUB Config */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, L1SUB_CONFIG3, 0xFF, 0x00);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PM_CLK_FORCE_CTL,
CLK_PM_EN, CLK_PM_EN);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWD_SUSPEND_EN, 0xFF, 0xFF);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PWR_GATE_CTRL,
PWR_GATE_EN, PWR_GATE_EN);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, REG_VREF,
PWD_SUSPND_EN, PWD_SUSPND_EN);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RBCTL,
U_AUTO_DMA_EN_MASK, U_AUTO_DMA_DISABLE);
if (pcr->flags & PCR_REVERSE_SOCKET)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
else
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG,
OBFF_EN_MASK, OBFF_DISABLE);
err = rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
if (err < 0)
return err;
rtsx_pci_init_ocp(pcr);
return 0;
}
static void rts5260_pwr_saving_setting(struct rtsx_pcr *pcr)
{
int lss_l1_1, lss_l1_2;
lss_l1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN)
| rtsx_check_dev_flag(pcr, PM_L1_1_EN);
lss_l1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN)
| rtsx_check_dev_flag(pcr, PM_L1_2_EN);
rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0);
if (lss_l1_2) {
pcr_dbg(pcr, "Set parameters for L1.2.");
rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
0xFF, PCIE_L1_2_EN);
rtsx_pci_write_register(pcr, RTS5260_DVCC_CTRL,
RTS5260_DVCC_OCP_EN |
RTS5260_DVCC_OCP_CL_EN,
RTS5260_DVCC_OCP_EN |
RTS5260_DVCC_OCP_CL_EN);
rtsx_pci_write_register(pcr, PWR_FE_CTL,
0xFF, PCIE_L1_2_PD_FE_EN);
} else if (lss_l1_1) {
pcr_dbg(pcr, "Set parameters for L1.1.");
rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
0xFF, PCIE_L1_1_EN);
rtsx_pci_write_register(pcr, PWR_FE_CTL,
0xFF, PCIE_L1_1_PD_FE_EN);
} else {
pcr_dbg(pcr, "Set parameters for L1.");
rtsx_pci_write_register(pcr, PWR_GLOBAL_CTRL,
0xFF, PCIE_L1_0_EN);
rtsx_pci_write_register(pcr, PWR_FE_CTL,
0xFF, PCIE_L1_0_PD_FE_EN);
}
rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_DPHY_RET_VALUE,
0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
rtsx_pci_write_register(pcr, CFG_L1_0_PCIE_MAC_RET_VALUE,
0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD30_RET_VALUE,
0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
rtsx_pci_write_register(pcr, CFG_L1_0_CRC_SD40_RET_VALUE,
0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
rtsx_pci_write_register(pcr, CFG_L1_0_SYS_RET_VALUE,
0xFF, CFG_L1_0_RET_VALUE_DEFAULT);
/*Option cut APHY*/
rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_0,
0xFF, CFG_PCIE_APHY_OFF_0_DEFAULT);
rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_1,
0xFF, CFG_PCIE_APHY_OFF_1_DEFAULT);
rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_2,
0xFF, CFG_PCIE_APHY_OFF_2_DEFAULT);
rtsx_pci_write_register(pcr, CFG_PCIE_APHY_OFF_3,
0xFF, CFG_PCIE_APHY_OFF_3_DEFAULT);
/*CDR DEC*/
rtsx_pci_write_register(pcr, PWC_CDR, 0xFF, PWC_CDR_DEFAULT);
/*PWMPFM*/
rtsx_pci_write_register(pcr, CFG_LP_FPWM_VALUE,
0xFF, CFG_LP_FPWM_VALUE_DEFAULT);
/*No Power Saving WA*/
rtsx_pci_write_register(pcr, CFG_L1_0_CRC_MISC_RET_VALUE,
0xFF, CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT);
}
static void rts5260_init_from_cfg(struct rtsx_pcr *pcr)
{
struct pci_dev *pdev = pcr->pci;
int l1ss;
struct rtsx_cr_option *option = &pcr->option;
u32 lval;
l1ss = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
if (!l1ss)
return;
pci_read_config_dword(pdev, l1ss + PCI_L1SS_CTL1, &lval);
if (lval & PCI_L1SS_CTL1_ASPM_L1_1)
rtsx_set_dev_flag(pcr, ASPM_L1_1_EN);
if (lval & PCI_L1SS_CTL1_ASPM_L1_2)
rtsx_set_dev_flag(pcr, ASPM_L1_2_EN);
if (lval & PCI_L1SS_CTL1_PCIPM_L1_1)
rtsx_set_dev_flag(pcr, PM_L1_1_EN);
if (lval & PCI_L1SS_CTL1_PCIPM_L1_2)
rtsx_set_dev_flag(pcr, PM_L1_2_EN);
rts5260_pwr_saving_setting(pcr);
if (option->ltr_en) {
u16 val;
pcie_capability_read_word(pdev, PCI_EXP_DEVCTL2, &val);
if (val & PCI_EXP_DEVCTL2_LTR_EN) {
option->ltr_enabled = true;
option->ltr_active = true;
rtsx_set_ltr_latency(pcr, option->ltr_active_latency);
} else {
option->ltr_enabled = false;
}
}
}
static int rts5260_extra_init_hw(struct rtsx_pcr *pcr)
{
/* Set mcu_cnt to 7 to ensure data can be sampled properly */
rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07);
rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D);
rts5260_init_from_cfg(pcr);
/* force no MDIO*/
rtsx_pci_write_register(pcr, RTS5260_AUTOLOAD_CFG4,
0xFF, RTS5260_MIMO_DISABLE);
/*Modify SDVCC Tune Default Parameters!*/
rtsx_pci_write_register(pcr, LDO_VCC_CFG0,
RTS5260_DVCC_TUNE_MASK, RTS5260_DVCC_33);
rtsx_pci_write_register(pcr, PCLK_CTL, PCLK_MODE_SEL, PCLK_MODE_SEL);
rts5260_init_hw(pcr);
rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00);
return 0;
}
static void rts5260_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active)
{
struct rtsx_cr_option *option = &pcr->option;
u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR);
int card_exist = (interrupt & SD_EXIST) | (interrupt & MS_EXIST);
int aspm_L1_1, aspm_L1_2;
u8 val = 0;
aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN);
aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN);
if (active) {
/* run, latency: 60us */
if (aspm_L1_1)
val = option->ltr_l1off_snooze_sspwrgate;
} else {
/* l1off, latency: 300us */
if (aspm_L1_2)
val = option->ltr_l1off_sspwrgate;
}
if (aspm_L1_1 || aspm_L1_2) {
if (rtsx_check_dev_flag(pcr,
LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) {
if (card_exist)
val &= ~L1OFF_MBIAS2_EN_5250;
else
val |= L1OFF_MBIAS2_EN_5250;
}
}
rtsx_set_l1off_sub(pcr, val);
}
static const struct pcr_ops rts5260_pcr_ops = {
.fetch_vendor_settings = rtsx_base_fetch_vendor_settings,
.turn_on_led = rts5260_turn_on_led,
.turn_off_led = rts5260_turn_off_led,
.extra_init_hw = rts5260_extra_init_hw,
.enable_auto_blink = rtsx_base_enable_auto_blink,
.disable_auto_blink = rtsx_base_disable_auto_blink,
.card_power_on = rts5260_card_power_on,
.card_power_off = rts5260_card_power_off,
.switch_output_voltage = rts5260_switch_output_voltage,
.stop_cmd = rts5260_stop_cmd,
.set_l1off_cfg_sub_d0 = rts5260_set_l1off_cfg_sub_d0,
.enable_ocp = rts5260_enable_ocp,
.disable_ocp = rts5260_disable_ocp,
.init_ocp = rts5260_init_ocp,
.process_ocp = rts5260_process_ocp,
.get_ocpstat = rts5260_get_ocpstat,
.clear_ocpstat = rts5260_clear_ocpstat,
};
void rts5260_init_params(struct rtsx_pcr *pcr)
{
struct rtsx_cr_option *option = &pcr->option;
struct rtsx_hw_param *hw_param = &pcr->hw_param;
pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104;
pcr->num_slots = 2;
pcr->flags = 0;
pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT;
pcr->sd30_drive_sel_1v8 = CFG_DRIVER_TYPE_B;
pcr->sd30_drive_sel_3v3 = CFG_DRIVER_TYPE_B;
pcr->aspm_en = ASPM_L1_EN;
pcr->aspm_mode = ASPM_MODE_REG;
pcr->tx_initial_phase = SET_CLOCK_PHASE(27, 29, 11);
pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5);
pcr->ic_version = rts5260_get_ic_version(pcr);
pcr->sd_pull_ctl_enable_tbl = rts5260_sd_pull_ctl_enable_tbl;
pcr->sd_pull_ctl_disable_tbl = rts5260_sd_pull_ctl_disable_tbl;
pcr->ms_pull_ctl_enable_tbl = rts5260_ms_pull_ctl_enable_tbl;
pcr->ms_pull_ctl_disable_tbl = rts5260_ms_pull_ctl_disable_tbl;
pcr->reg_pm_ctrl3 = RTS524A_PM_CTRL3;
pcr->ops = &rts5260_pcr_ops;
option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN
| LTR_L1SS_PWR_GATE_EN);
option->ltr_en = true;
/* init latency of active, idle, L1OFF to 60us, 300us, 3ms */
option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF;
option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF;
option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF;
option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF;
option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5250_DEF;
option->ltr_l1off_snooze_sspwrgate =
LTR_L1OFF_SNOOZE_SSPWRGATE_5250_DEF;
option->ocp_en = 1;
if (option->ocp_en)
hw_param->interrupt_en |= SD_OC_INT_EN;
hw_param->ocp_glitch = SD_OCP_GLITCH_100U | SDVIO_OCP_GLITCH_800U;
option->sd_400mA_ocp_thd = RTS5260_DVCC_OCP_THD_550;
option->sd_800mA_ocp_thd = RTS5260_DVCC_OCP_THD_970;
}
| linux-master | drivers/misc/cardreader/rts5260.c |
// SPDX-License-Identifier: GPL-2.0-only
/*
* SpanDSP - a series of DSP components for telephony
*
* echo.c - A line echo canceller. This code is being developed
* against and partially complies with G168.
*
* Written by Steve Underwood <[email protected]>
* and David Rowe <david_at_rowetel_dot_com>
*
* Copyright (C) 2001, 2003 Steve Underwood, 2007 David Rowe
*
* Based on a bit from here, a bit from there, eye of toad, ear of
* bat, 15 years of failed attempts by David and a few fried brain
* cells.
*
* All rights reserved.
*/
/*! \file */
/* Implementation Notes
David Rowe
April 2007
This code started life as Steve's NLMS algorithm with a tap
rotation algorithm to handle divergence during double talk. I
added a Geigel Double Talk Detector (DTD) [2] and performed some
G168 tests. However I had trouble meeting the G168 requirements,
especially for double talk - there were always cases where my DTD
failed, for example where near end speech was under the 6dB
threshold required for declaring double talk.
So I tried a two path algorithm [1], which has so far given better
results. The original tap rotation/Geigel algorithm is available
in SVN http://svn.rowetel.com/software/oslec/tags/before_16bit.
It's probably possible to make it work if some one wants to put some
serious work into it.
At present no special treatment is provided for tones, which
generally cause NLMS algorithms to diverge. Initial runs of a
subset of the G168 tests for tones (e.g ./echo_test 6) show the
current algorithm is passing OK, which is kind of surprising. The
full set of tests needs to be performed to confirm this result.
One other interesting change is that I have managed to get the NLMS
code to work with 16 bit coefficients, rather than the original 32
bit coefficents. This reduces the MIPs and storage required.
I evaulated the 16 bit port using g168_tests.sh and listening tests
on 4 real-world samples.
I also attempted the implementation of a block based NLMS update
[2] but although this passes g168_tests.sh it didn't converge well
on the real-world samples. I have no idea why, perhaps a scaling
problem. The block based code is also available in SVN
http://svn.rowetel.com/software/oslec/tags/before_16bit. If this
code can be debugged, it will lead to further reduction in MIPS, as
the block update code maps nicely onto DSP instruction sets (it's a
dot product) compared to the current sample-by-sample update.
Steve also has some nice notes on echo cancellers in echo.h
References:
[1] Ochiai, Areseki, and Ogihara, "Echo Canceller with Two Echo
Path Models", IEEE Transactions on communications, COM-25,
No. 6, June
1977.
https://www.rowetel.com/images/echo/dual_path_paper.pdf
[2] The classic, very useful paper that tells you how to
actually build a real world echo canceller:
Messerschmitt, Hedberg, Cole, Haoui, Winship, "Digital Voice
Echo Canceller with a TMS320020,
https://www.rowetel.com/images/echo/spra129.pdf
[3] I have written a series of blog posts on this work, here is
Part 1: http://www.rowetel.com/blog/?p=18
[4] The source code http://svn.rowetel.com/software/oslec/
[5] A nice reference on LMS filters:
https://en.wikipedia.org/wiki/Least_mean_squares_filter
Credits:
Thanks to Steve Underwood, Jean-Marc Valin, and Ramakrishnan
Muthukrishnan for their suggestions and email discussions. Thanks
also to those people who collected echo samples for me such as
Mark, Pawel, and Pavel.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "echo.h"
#define MIN_TX_POWER_FOR_ADAPTION 64
#define MIN_RX_POWER_FOR_ADAPTION 64
#define DTD_HANGOVER 600 /* 600 samples, or 75ms */
#define DC_LOG2BETA 3 /* log2() of DC filter Beta */
/* adapting coeffs using the traditional stochastic descent (N)LMS algorithm */
static inline void lms_adapt_bg(struct oslec_state *ec, int clean, int shift)
{
int i;
int offset1;
int offset2;
int factor;
int exp;
if (shift > 0)
factor = clean << shift;
else
factor = clean >> -shift;
/* Update the FIR taps */
offset2 = ec->curr_pos;
offset1 = ec->taps - offset2;
for (i = ec->taps - 1; i >= offset1; i--) {
exp = (ec->fir_state_bg.history[i - offset1] * factor);
ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
}
for (; i >= 0; i--) {
exp = (ec->fir_state_bg.history[i + offset2] * factor);
ec->fir_taps16[1][i] += (int16_t) ((exp + (1 << 14)) >> 15);
}
}
static inline int top_bit(unsigned int bits)
{
if (bits == 0)
return -1;
else
return (int)fls((int32_t) bits) - 1;
}
struct oslec_state *oslec_create(int len, int adaption_mode)
{
struct oslec_state *ec;
int i;
const int16_t *history;
ec = kzalloc(sizeof(*ec), GFP_KERNEL);
if (!ec)
return NULL;
ec->taps = len;
ec->log2taps = top_bit(len);
ec->curr_pos = ec->taps - 1;
ec->fir_taps16[0] =
kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
if (!ec->fir_taps16[0])
goto error_oom_0;
ec->fir_taps16[1] =
kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
if (!ec->fir_taps16[1])
goto error_oom_1;
history = fir16_create(&ec->fir_state, ec->fir_taps16[0], ec->taps);
if (!history)
goto error_state;
history = fir16_create(&ec->fir_state_bg, ec->fir_taps16[1], ec->taps);
if (!history)
goto error_state_bg;
for (i = 0; i < 5; i++)
ec->xvtx[i] = ec->yvtx[i] = ec->xvrx[i] = ec->yvrx[i] = 0;
ec->cng_level = 1000;
oslec_adaption_mode(ec, adaption_mode);
ec->snapshot = kcalloc(ec->taps, sizeof(int16_t), GFP_KERNEL);
if (!ec->snapshot)
goto error_snap;
ec->cond_met = 0;
ec->pstates = 0;
ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0;
ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0;
ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
ec->lbgn = ec->lbgn_acc = 0;
ec->lbgn_upper = 200;
ec->lbgn_upper_acc = ec->lbgn_upper << 13;
return ec;
error_snap:
fir16_free(&ec->fir_state_bg);
error_state_bg:
fir16_free(&ec->fir_state);
error_state:
kfree(ec->fir_taps16[1]);
error_oom_1:
kfree(ec->fir_taps16[0]);
error_oom_0:
kfree(ec);
return NULL;
}
EXPORT_SYMBOL_GPL(oslec_create);
void oslec_free(struct oslec_state *ec)
{
int i;
fir16_free(&ec->fir_state);
fir16_free(&ec->fir_state_bg);
for (i = 0; i < 2; i++)
kfree(ec->fir_taps16[i]);
kfree(ec->snapshot);
kfree(ec);
}
EXPORT_SYMBOL_GPL(oslec_free);
void oslec_adaption_mode(struct oslec_state *ec, int adaption_mode)
{
ec->adaption_mode = adaption_mode;
}
EXPORT_SYMBOL_GPL(oslec_adaption_mode);
void oslec_flush(struct oslec_state *ec)
{
int i;
ec->ltxacc = ec->lrxacc = ec->lcleanacc = ec->lclean_bgacc = 0;
ec->ltx = ec->lrx = ec->lclean = ec->lclean_bg = 0;
ec->tx_1 = ec->tx_2 = ec->rx_1 = ec->rx_2 = 0;
ec->lbgn = ec->lbgn_acc = 0;
ec->lbgn_upper = 200;
ec->lbgn_upper_acc = ec->lbgn_upper << 13;
ec->nonupdate_dwell = 0;
fir16_flush(&ec->fir_state);
fir16_flush(&ec->fir_state_bg);
ec->fir_state.curr_pos = ec->taps - 1;
ec->fir_state_bg.curr_pos = ec->taps - 1;
for (i = 0; i < 2; i++)
memset(ec->fir_taps16[i], 0, ec->taps * sizeof(int16_t));
ec->curr_pos = ec->taps - 1;
ec->pstates = 0;
}
EXPORT_SYMBOL_GPL(oslec_flush);
void oslec_snapshot(struct oslec_state *ec)
{
memcpy(ec->snapshot, ec->fir_taps16[0], ec->taps * sizeof(int16_t));
}
EXPORT_SYMBOL_GPL(oslec_snapshot);
/* Dual Path Echo Canceller */
int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx)
{
int32_t echo_value;
int clean_bg;
int tmp;
int tmp1;
/*
* Input scaling was found be required to prevent problems when tx
* starts clipping. Another possible way to handle this would be the
* filter coefficent scaling.
*/
ec->tx = tx;
ec->rx = rx;
tx >>= 1;
rx >>= 1;
/*
* Filter DC, 3dB point is 160Hz (I think), note 32 bit precision
* required otherwise values do not track down to 0. Zero at DC, Pole
* at (1-Beta) on real axis. Some chip sets (like Si labs) don't
* need this, but something like a $10 X100P card does. Any DC really
* slows down convergence.
*
* Note: removes some low frequency from the signal, this reduces the
* speech quality when listening to samples through headphones but may
* not be obvious through a telephone handset.
*
* Note that the 3dB frequency in radians is approx Beta, e.g. for Beta
* = 2^(-3) = 0.125, 3dB freq is 0.125 rads = 159Hz.
*/
if (ec->adaption_mode & ECHO_CAN_USE_RX_HPF) {
tmp = rx << 15;
/*
* Make sure the gain of the HPF is 1.0. This can still
* saturate a little under impulse conditions, and it might
* roll to 32768 and need clipping on sustained peak level
* signals. However, the scale of such clipping is small, and
* the error due to any saturation should not markedly affect
* the downstream processing.
*/
tmp -= (tmp >> 4);
ec->rx_1 += -(ec->rx_1 >> DC_LOG2BETA) + tmp - ec->rx_2;
/*
* hard limit filter to prevent clipping. Note that at this
* stage rx should be limited to +/- 16383 due to right shift
* above
*/
tmp1 = ec->rx_1 >> 15;
if (tmp1 > 16383)
tmp1 = 16383;
if (tmp1 < -16383)
tmp1 = -16383;
rx = tmp1;
ec->rx_2 = tmp;
}
/* Block average of power in the filter states. Used for
adaption power calculation. */
{
int new, old;
/* efficient "out with the old and in with the new" algorithm so
we don't have to recalculate over the whole block of
samples. */
new = (int)tx * (int)tx;
old = (int)ec->fir_state.history[ec->fir_state.curr_pos] *
(int)ec->fir_state.history[ec->fir_state.curr_pos];
ec->pstates +=
((new - old) + (1 << (ec->log2taps - 1))) >> ec->log2taps;
if (ec->pstates < 0)
ec->pstates = 0;
}
/* Calculate short term average levels using simple single pole IIRs */
ec->ltxacc += abs(tx) - ec->ltx;
ec->ltx = (ec->ltxacc + (1 << 4)) >> 5;
ec->lrxacc += abs(rx) - ec->lrx;
ec->lrx = (ec->lrxacc + (1 << 4)) >> 5;
/* Foreground filter */
ec->fir_state.coeffs = ec->fir_taps16[0];
echo_value = fir16(&ec->fir_state, tx);
ec->clean = rx - echo_value;
ec->lcleanacc += abs(ec->clean) - ec->lclean;
ec->lclean = (ec->lcleanacc + (1 << 4)) >> 5;
/* Background filter */
echo_value = fir16(&ec->fir_state_bg, tx);
clean_bg = rx - echo_value;
ec->lclean_bgacc += abs(clean_bg) - ec->lclean_bg;
ec->lclean_bg = (ec->lclean_bgacc + (1 << 4)) >> 5;
/* Background Filter adaption */
/* Almost always adap bg filter, just simple DT and energy
detection to minimise adaption in cases of strong double talk.
However this is not critical for the dual path algorithm.
*/
ec->factor = 0;
ec->shift = 0;
if (!ec->nonupdate_dwell) {
int p, logp, shift;
/* Determine:
f = Beta * clean_bg_rx/P ------ (1)
where P is the total power in the filter states.
The Boffins have shown that if we obey (1) we converge
quickly and avoid instability.
The correct factor f must be in Q30, as this is the fixed
point format required by the lms_adapt_bg() function,
therefore the scaled version of (1) is:
(2^30) * f = (2^30) * Beta * clean_bg_rx/P
factor = (2^30) * Beta * clean_bg_rx/P ----- (2)
We have chosen Beta = 0.25 by experiment, so:
factor = (2^30) * (2^-2) * clean_bg_rx/P
(30 - 2 - log2(P))
factor = clean_bg_rx 2 ----- (3)
To avoid a divide we approximate log2(P) as top_bit(P),
which returns the position of the highest non-zero bit in
P. This approximation introduces an error as large as a
factor of 2, but the algorithm seems to handle it OK.
Come to think of it a divide may not be a big deal on a
modern DSP, so its probably worth checking out the cycles
for a divide versus a top_bit() implementation.
*/
p = MIN_TX_POWER_FOR_ADAPTION + ec->pstates;
logp = top_bit(p) + ec->log2taps;
shift = 30 - 2 - logp;
ec->shift = shift;
lms_adapt_bg(ec, clean_bg, shift);
}
/* very simple DTD to make sure we dont try and adapt with strong
near end speech */
ec->adapt = 0;
if ((ec->lrx > MIN_RX_POWER_FOR_ADAPTION) && (ec->lrx > ec->ltx))
ec->nonupdate_dwell = DTD_HANGOVER;
if (ec->nonupdate_dwell)
ec->nonupdate_dwell--;
/* Transfer logic */
/* These conditions are from the dual path paper [1], I messed with
them a bit to improve performance. */
if ((ec->adaption_mode & ECHO_CAN_USE_ADAPTION) &&
(ec->nonupdate_dwell == 0) &&
/* (ec->Lclean_bg < 0.875*ec->Lclean) */
(8 * ec->lclean_bg < 7 * ec->lclean) &&
/* (ec->Lclean_bg < 0.125*ec->Ltx) */
(8 * ec->lclean_bg < ec->ltx)) {
if (ec->cond_met == 6) {
/*
* BG filter has had better results for 6 consecutive
* samples
*/
ec->adapt = 1;
memcpy(ec->fir_taps16[0], ec->fir_taps16[1],
ec->taps * sizeof(int16_t));
} else
ec->cond_met++;
} else
ec->cond_met = 0;
/* Non-Linear Processing */
ec->clean_nlp = ec->clean;
if (ec->adaption_mode & ECHO_CAN_USE_NLP) {
/*
* Non-linear processor - a fancy way to say "zap small
* signals, to avoid residual echo due to (uLaw/ALaw)
* non-linearity in the channel.".
*/
if ((16 * ec->lclean < ec->ltx)) {
/*
* Our e/c has improved echo by at least 24 dB (each
* factor of 2 is 6dB, so 2*2*2*2=16 is the same as
* 6+6+6+6=24dB)
*/
if (ec->adaption_mode & ECHO_CAN_USE_CNG) {
ec->cng_level = ec->lbgn;
/*
* Very elementary comfort noise generation.
* Just random numbers rolled off very vaguely
* Hoth-like. DR: This noise doesn't sound
* quite right to me - I suspect there are some
* overflow issues in the filtering as it's too
* "crackly".
* TODO: debug this, maybe just play noise at
* high level or look at spectrum.
*/
ec->cng_rndnum =
1664525U * ec->cng_rndnum + 1013904223U;
ec->cng_filter =
((ec->cng_rndnum & 0xFFFF) - 32768 +
5 * ec->cng_filter) >> 3;
ec->clean_nlp =
(ec->cng_filter * ec->cng_level * 8) >> 14;
} else if (ec->adaption_mode & ECHO_CAN_USE_CLIP) {
/* This sounds much better than CNG */
if (ec->clean_nlp > ec->lbgn)
ec->clean_nlp = ec->lbgn;
if (ec->clean_nlp < -ec->lbgn)
ec->clean_nlp = -ec->lbgn;
} else {
/*
* just mute the residual, doesn't sound very
* good, used mainly in G168 tests
*/
ec->clean_nlp = 0;
}
} else {
/*
* Background noise estimator. I tried a few
* algorithms here without much luck. This very simple
* one seems to work best, we just average the level
* using a slow (1 sec time const) filter if the
* current level is less than a (experimentally
* derived) constant. This means we dont include high
* level signals like near end speech. When combined
* with CNG or especially CLIP seems to work OK.
*/
if (ec->lclean < 40) {
ec->lbgn_acc += abs(ec->clean) - ec->lbgn;
ec->lbgn = (ec->lbgn_acc + (1 << 11)) >> 12;
}
}
}
/* Roll around the taps buffer */
if (ec->curr_pos <= 0)
ec->curr_pos = ec->taps;
ec->curr_pos--;
if (ec->adaption_mode & ECHO_CAN_DISABLE)
ec->clean_nlp = rx;
/* Output scaled back up again to match input scaling */
return (int16_t) ec->clean_nlp << 1;
}
EXPORT_SYMBOL_GPL(oslec_update);
/* This function is separated from the echo canceller is it is usually called
as part of the tx process. See rx HP (DC blocking) filter above, it's
the same design.
Some soft phones send speech signals with a lot of low frequency
energy, e.g. down to 20Hz. This can make the hybrid non-linear
which causes the echo canceller to fall over. This filter can help
by removing any low frequency before it gets to the tx port of the
hybrid.
It can also help by removing and DC in the tx signal. DC is bad
for LMS algorithms.
This is one of the classic DC removal filters, adjusted to provide
sufficient bass rolloff to meet the above requirement to protect hybrids
from things that upset them. The difference between successive samples
produces a lousy HPF, and then a suitably placed pole flattens things out.
The final result is a nicely rolled off bass end. The filtering is
implemented with extended fractional precision, which noise shapes things,
giving very clean DC removal.
*/
int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx)
{
int tmp;
int tmp1;
if (ec->adaption_mode & ECHO_CAN_USE_TX_HPF) {
tmp = tx << 15;
/*
* Make sure the gain of the HPF is 1.0. The first can still
* saturate a little under impulse conditions, and it might
* roll to 32768 and need clipping on sustained peak level
* signals. However, the scale of such clipping is small, and
* the error due to any saturation should not markedly affect
* the downstream processing.
*/
tmp -= (tmp >> 4);
ec->tx_1 += -(ec->tx_1 >> DC_LOG2BETA) + tmp - ec->tx_2;
tmp1 = ec->tx_1 >> 15;
if (tmp1 > 32767)
tmp1 = 32767;
if (tmp1 < -32767)
tmp1 = -32767;
tx = tmp1;
ec->tx_2 = tmp;
}
return tx;
}
EXPORT_SYMBOL_GPL(oslec_hpf_tx);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Rowe");
MODULE_DESCRIPTION("Open Source Line Echo Canceller");
MODULE_VERSION("0.3.0");
| linux-master | drivers/misc/echo/echo.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#ifndef __CHECKER__
#define CREATE_TRACE_POINTS
#include "trace.h"
#endif
| linux-master | drivers/misc/ocxl/trace.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/sysfs.h>
#include "ocxl_internal.h"
static inline struct ocxl_afu *to_afu(struct device *device)
{
struct ocxl_file_info *info = container_of(device, struct ocxl_file_info, dev);
return info->afu;
}
static ssize_t global_mmio_size_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct ocxl_afu *afu = to_afu(device);
return scnprintf(buf, PAGE_SIZE, "%d\n",
afu->config.global_mmio_size);
}
static ssize_t pp_mmio_size_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct ocxl_afu *afu = to_afu(device);
return scnprintf(buf, PAGE_SIZE, "%d\n",
afu->config.pp_mmio_stride);
}
static ssize_t afu_version_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct ocxl_afu *afu = to_afu(device);
return scnprintf(buf, PAGE_SIZE, "%hhu:%hhu\n",
afu->config.version_major,
afu->config.version_minor);
}
static ssize_t contexts_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct ocxl_afu *afu = to_afu(device);
return scnprintf(buf, PAGE_SIZE, "%d/%d\n",
afu->pasid_count, afu->pasid_max);
}
static ssize_t reload_on_reset_show(struct device *device,
struct device_attribute *attr,
char *buf)
{
struct ocxl_afu *afu = to_afu(device);
struct ocxl_fn *fn = afu->fn;
struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
int val;
if (ocxl_config_get_reset_reload(pci_dev, &val))
return scnprintf(buf, PAGE_SIZE, "unavailable\n");
return scnprintf(buf, PAGE_SIZE, "%d\n", val);
}
static ssize_t reload_on_reset_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct ocxl_afu *afu = to_afu(device);
struct ocxl_fn *fn = afu->fn;
struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent);
int rc, val;
rc = kstrtoint(buf, 0, &val);
if (rc || (val != 0 && val != 1))
return -EINVAL;
if (ocxl_config_set_reset_reload(pci_dev, val))
return -ENODEV;
return count;
}
static struct device_attribute afu_attrs[] = {
__ATTR_RO(global_mmio_size),
__ATTR_RO(pp_mmio_size),
__ATTR_RO(afu_version),
__ATTR_RO(contexts),
__ATTR_RW(reload_on_reset),
};
static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t off, size_t count)
{
struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj));
if (count == 0 || off < 0 ||
off >= afu->config.global_mmio_size)
return 0;
memcpy_fromio(buf, afu->global_mmio_ptr + off, count);
return count;
}
static vm_fault_t global_mmio_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct ocxl_afu *afu = vma->vm_private_data;
unsigned long offset;
if (vmf->pgoff >= (afu->config.global_mmio_size >> PAGE_SHIFT))
return VM_FAULT_SIGBUS;
offset = vmf->pgoff;
offset += (afu->global_mmio_start >> PAGE_SHIFT);
return vmf_insert_pfn(vma, vmf->address, offset);
}
static const struct vm_operations_struct global_mmio_vmops = {
.fault = global_mmio_fault,
};
static int global_mmio_mmap(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
struct vm_area_struct *vma)
{
struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj));
if ((vma_pages(vma) + vma->vm_pgoff) >
(afu->config.global_mmio_size >> PAGE_SHIFT))
return -EINVAL;
vm_flags_set(vma, VM_IO | VM_PFNMAP);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_ops = &global_mmio_vmops;
vma->vm_private_data = afu;
return 0;
}
int ocxl_sysfs_register_afu(struct ocxl_file_info *info)
{
int i, rc;
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) {
rc = device_create_file(&info->dev, &afu_attrs[i]);
if (rc)
goto err;
}
sysfs_attr_init(&info->attr_global_mmio.attr);
info->attr_global_mmio.attr.name = "global_mmio_area";
info->attr_global_mmio.attr.mode = 0600;
info->attr_global_mmio.size = info->afu->config.global_mmio_size;
info->attr_global_mmio.read = global_mmio_read;
info->attr_global_mmio.mmap = global_mmio_mmap;
rc = device_create_bin_file(&info->dev, &info->attr_global_mmio);
if (rc) {
dev_err(&info->dev, "Unable to create global mmio attr for afu: %d\n", rc);
goto err;
}
return 0;
err:
for (i--; i >= 0; i--)
device_remove_file(&info->dev, &afu_attrs[i]);
return rc;
}
void ocxl_sysfs_unregister_afu(struct ocxl_file_info *info)
{
int i;
/*
* device_remove_bin_file is safe to call if the file is not added as
* the files are removed by name, and early exit if not found
*/
for (i = 0; i < ARRAY_SIZE(afu_attrs); i++)
device_remove_file(&info->dev, &afu_attrs[i]);
device_remove_bin_file(&info->dev, &info->attr_global_mmio);
}
| linux-master | drivers/misc/ocxl/sysfs.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2017 IBM Corp.
#include <linux/sched/mm.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/mmu_context.h>
#include <linux/mmu_notifier.h>
#include <linux/irqdomain.h>
#include <asm/copro.h>
#include <asm/pnv-ocxl.h>
#include <asm/xive.h>
#include <misc/ocxl.h>
#include "ocxl_internal.h"
#include "trace.h"
#define SPA_PASID_BITS 15
#define SPA_PASID_MAX ((1 << SPA_PASID_BITS) - 1)
#define SPA_PE_MASK SPA_PASID_MAX
#define SPA_SPA_SIZE_LOG 22 /* Each SPA is 4 Mb */
#define SPA_CFG_SF (1ull << (63-0))
#define SPA_CFG_TA (1ull << (63-1))
#define SPA_CFG_HV (1ull << (63-3))
#define SPA_CFG_UV (1ull << (63-4))
#define SPA_CFG_XLAT_hpt (0ull << (63-6)) /* Hashed page table (HPT) mode */
#define SPA_CFG_XLAT_roh (2ull << (63-6)) /* Radix on HPT mode */
#define SPA_CFG_XLAT_ror (3ull << (63-6)) /* Radix on Radix mode */
#define SPA_CFG_PR (1ull << (63-49))
#define SPA_CFG_TC (1ull << (63-54))
#define SPA_CFG_DR (1ull << (63-59))
#define SPA_XSL_TF (1ull << (63-3)) /* Translation fault */
#define SPA_XSL_S (1ull << (63-38)) /* Store operation */
#define SPA_PE_VALID 0x80000000
struct ocxl_link;
struct pe_data {
struct mm_struct *mm;
/* callback to trigger when a translation fault occurs */
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr);
/* opaque pointer to be passed to the above callback */
void *xsl_err_data;
struct rcu_head rcu;
struct ocxl_link *link;
struct mmu_notifier mmu_notifier;
};
struct spa {
struct ocxl_process_element *spa_mem;
int spa_order;
struct mutex spa_lock;
struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */
char *irq_name;
int virq;
void __iomem *reg_dsisr;
void __iomem *reg_dar;
void __iomem *reg_tfc;
void __iomem *reg_pe_handle;
/*
* The following field are used by the memory fault
* interrupt handler. We can only have one interrupt at a
* time. The NPU won't raise another interrupt until the
* previous one has been ack'd by writing to the TFC register
*/
struct xsl_fault {
struct work_struct fault_work;
u64 pe;
u64 dsisr;
u64 dar;
struct pe_data pe_data;
} xsl_fault;
};
/*
* A opencapi link can be used be by several PCI functions. We have
* one link per device slot.
*
* A linked list of opencapi links should suffice, as there's a
* limited number of opencapi slots on a system and lookup is only
* done when the device is probed
*/
struct ocxl_link {
struct list_head list;
struct kref ref;
int domain;
int bus;
int dev;
void __iomem *arva; /* ATSD register virtual address */
spinlock_t atsd_lock; /* to serialize shootdowns */
atomic_t irq_available;
struct spa *spa;
void *platform_data;
};
static LIST_HEAD(links_list);
static DEFINE_MUTEX(links_list_lock);
enum xsl_response {
CONTINUE,
ADDRESS_ERROR,
RESTART,
};
static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe)
{
u64 reg;
*dsisr = in_be64(spa->reg_dsisr);
*dar = in_be64(spa->reg_dar);
reg = in_be64(spa->reg_pe_handle);
*pe = reg & SPA_PE_MASK;
}
static void ack_irq(struct spa *spa, enum xsl_response r)
{
u64 reg = 0;
/* continue is not supported */
if (r == RESTART)
reg = PPC_BIT(31);
else if (r == ADDRESS_ERROR)
reg = PPC_BIT(30);
else
WARN(1, "Invalid irq response %d\n", r);
if (reg) {
trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe,
spa->xsl_fault.dsisr, spa->xsl_fault.dar, reg);
out_be64(spa->reg_tfc, reg);
}
}
static void xsl_fault_handler_bh(struct work_struct *fault_work)
{
vm_fault_t flt = 0;
unsigned long access, flags, inv_flags = 0;
enum xsl_response r;
struct xsl_fault *fault = container_of(fault_work, struct xsl_fault,
fault_work);
struct spa *spa = container_of(fault, struct spa, xsl_fault);
int rc;
/*
* We must release a reference on mm_users whenever exiting this
* function (taken in the memory fault interrupt handler)
*/
rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr,
&flt);
if (rc) {
pr_debug("copro_handle_mm_fault failed: %d\n", rc);
if (fault->pe_data.xsl_err_cb) {
fault->pe_data.xsl_err_cb(
fault->pe_data.xsl_err_data,
fault->dar, fault->dsisr);
}
r = ADDRESS_ERROR;
goto ack;
}
if (!radix_enabled()) {
/*
* update_mmu_cache() will not have loaded the hash
* since current->trap is not a 0x400 or 0x300, so
* just call hash_page_mm() here.
*/
access = _PAGE_PRESENT | _PAGE_READ;
if (fault->dsisr & SPA_XSL_S)
access |= _PAGE_WRITE;
if (get_region_id(fault->dar) != USER_REGION_ID)
access |= _PAGE_PRIVILEGED;
local_irq_save(flags);
hash_page_mm(fault->pe_data.mm, fault->dar, access, 0x300,
inv_flags);
local_irq_restore(flags);
}
r = RESTART;
ack:
mmput(fault->pe_data.mm);
ack_irq(spa, r);
}
static irqreturn_t xsl_fault_handler(int irq, void *data)
{
struct ocxl_link *link = (struct ocxl_link *) data;
struct spa *spa = link->spa;
u64 dsisr, dar, pe_handle;
struct pe_data *pe_data;
struct ocxl_process_element *pe;
int pid;
bool schedule = false;
read_irq(spa, &dsisr, &dar, &pe_handle);
trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1);
WARN_ON(pe_handle > SPA_PE_MASK);
pe = spa->spa_mem + pe_handle;
pid = be32_to_cpu(pe->pid);
/* We could be reading all null values here if the PE is being
* removed while an interrupt kicks in. It's not supposed to
* happen if the driver notified the AFU to terminate the
* PASID, and the AFU waited for pending operations before
* acknowledging. But even if it happens, we won't find a
* memory context below and fail silently, so it should be ok.
*/
if (!(dsisr & SPA_XSL_TF)) {
WARN(1, "Invalid xsl interrupt fault register %#llx\n", dsisr);
ack_irq(spa, ADDRESS_ERROR);
return IRQ_HANDLED;
}
rcu_read_lock();
pe_data = radix_tree_lookup(&spa->pe_tree, pe_handle);
if (!pe_data) {
/*
* Could only happen if the driver didn't notify the
* AFU about PASID termination before removing the PE,
* or the AFU didn't wait for all memory access to
* have completed.
*
* Either way, we fail early, but we shouldn't log an
* error message, as it is a valid (if unexpected)
* scenario
*/
rcu_read_unlock();
pr_debug("Unknown mm context for xsl interrupt\n");
ack_irq(spa, ADDRESS_ERROR);
return IRQ_HANDLED;
}
if (!pe_data->mm) {
/*
* translation fault from a kernel context - an OpenCAPI
* device tried to access a bad kernel address
*/
rcu_read_unlock();
pr_warn("Unresolved OpenCAPI xsl fault in kernel context\n");
ack_irq(spa, ADDRESS_ERROR);
return IRQ_HANDLED;
}
WARN_ON(pe_data->mm->context.id != pid);
if (mmget_not_zero(pe_data->mm)) {
spa->xsl_fault.pe = pe_handle;
spa->xsl_fault.dar = dar;
spa->xsl_fault.dsisr = dsisr;
spa->xsl_fault.pe_data = *pe_data;
schedule = true;
/* mm_users count released by bottom half */
}
rcu_read_unlock();
if (schedule)
schedule_work(&spa->xsl_fault.fault_work);
else
ack_irq(spa, ADDRESS_ERROR);
return IRQ_HANDLED;
}
static void unmap_irq_registers(struct spa *spa)
{
pnv_ocxl_unmap_xsl_regs(spa->reg_dsisr, spa->reg_dar, spa->reg_tfc,
spa->reg_pe_handle);
}
static int map_irq_registers(struct pci_dev *dev, struct spa *spa)
{
return pnv_ocxl_map_xsl_regs(dev, &spa->reg_dsisr, &spa->reg_dar,
&spa->reg_tfc, &spa->reg_pe_handle);
}
static int setup_xsl_irq(struct pci_dev *dev, struct ocxl_link *link)
{
struct spa *spa = link->spa;
int rc;
int hwirq;
rc = pnv_ocxl_get_xsl_irq(dev, &hwirq);
if (rc)
return rc;
rc = map_irq_registers(dev, spa);
if (rc)
return rc;
spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x",
link->domain, link->bus, link->dev);
if (!spa->irq_name) {
dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n");
rc = -ENOMEM;
goto err_xsl;
}
/*
* At some point, we'll need to look into allowing a higher
* number of interrupts. Could we have an IRQ domain per link?
*/
spa->virq = irq_create_mapping(NULL, hwirq);
if (!spa->virq) {
dev_err(&dev->dev,
"irq_create_mapping failed for translation interrupt\n");
rc = -EINVAL;
goto err_name;
}
dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq);
rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name,
link);
if (rc) {
dev_err(&dev->dev,
"request_irq failed for translation interrupt: %d\n",
rc);
rc = -EINVAL;
goto err_mapping;
}
return 0;
err_mapping:
irq_dispose_mapping(spa->virq);
err_name:
kfree(spa->irq_name);
err_xsl:
unmap_irq_registers(spa);
return rc;
}
static void release_xsl_irq(struct ocxl_link *link)
{
struct spa *spa = link->spa;
if (spa->virq) {
free_irq(spa->virq, link);
irq_dispose_mapping(spa->virq);
}
kfree(spa->irq_name);
unmap_irq_registers(spa);
}
static int alloc_spa(struct pci_dev *dev, struct ocxl_link *link)
{
struct spa *spa;
spa = kzalloc(sizeof(struct spa), GFP_KERNEL);
if (!spa)
return -ENOMEM;
mutex_init(&spa->spa_lock);
INIT_RADIX_TREE(&spa->pe_tree, GFP_KERNEL);
INIT_WORK(&spa->xsl_fault.fault_work, xsl_fault_handler_bh);
spa->spa_order = SPA_SPA_SIZE_LOG - PAGE_SHIFT;
spa->spa_mem = (struct ocxl_process_element *)
__get_free_pages(GFP_KERNEL | __GFP_ZERO, spa->spa_order);
if (!spa->spa_mem) {
dev_err(&dev->dev, "Can't allocate Shared Process Area\n");
kfree(spa);
return -ENOMEM;
}
pr_debug("Allocated SPA for %x:%x:%x at %p\n", link->domain, link->bus,
link->dev, spa->spa_mem);
link->spa = spa;
return 0;
}
static void free_spa(struct ocxl_link *link)
{
struct spa *spa = link->spa;
pr_debug("Freeing SPA for %x:%x:%x\n", link->domain, link->bus,
link->dev);
if (spa && spa->spa_mem) {
free_pages((unsigned long) spa->spa_mem, spa->spa_order);
kfree(spa);
link->spa = NULL;
}
}
static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_link)
{
struct ocxl_link *link;
int rc;
link = kzalloc(sizeof(struct ocxl_link), GFP_KERNEL);
if (!link)
return -ENOMEM;
kref_init(&link->ref);
link->domain = pci_domain_nr(dev->bus);
link->bus = dev->bus->number;
link->dev = PCI_SLOT(dev->devfn);
atomic_set(&link->irq_available, MAX_IRQ_PER_LINK);
spin_lock_init(&link->atsd_lock);
rc = alloc_spa(dev, link);
if (rc)
goto err_free;
rc = setup_xsl_irq(dev, link);
if (rc)
goto err_spa;
/* platform specific hook */
rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask,
&link->platform_data);
if (rc)
goto err_xsl_irq;
/* if link->arva is not defeined, MMIO registers are not used to
* generate TLB invalidate. PowerBus snooping is enabled.
* Otherwise, PowerBus snooping is disabled. TLB Invalidates are
* initiated using MMIO registers.
*/
pnv_ocxl_map_lpar(dev, mfspr(SPRN_LPID), 0, &link->arva);
*out_link = link;
return 0;
err_xsl_irq:
release_xsl_irq(link);
err_spa:
free_spa(link);
err_free:
kfree(link);
return rc;
}
static void free_link(struct ocxl_link *link)
{
release_xsl_irq(link);
free_spa(link);
kfree(link);
}
int ocxl_link_setup(struct pci_dev *dev, int PE_mask, void **link_handle)
{
int rc = 0;
struct ocxl_link *link;
mutex_lock(&links_list_lock);
list_for_each_entry(link, &links_list, list) {
/* The functions of a device all share the same link */
if (link->domain == pci_domain_nr(dev->bus) &&
link->bus == dev->bus->number &&
link->dev == PCI_SLOT(dev->devfn)) {
kref_get(&link->ref);
*link_handle = link;
goto unlock;
}
}
rc = alloc_link(dev, PE_mask, &link);
if (rc)
goto unlock;
list_add(&link->list, &links_list);
*link_handle = link;
unlock:
mutex_unlock(&links_list_lock);
return rc;
}
EXPORT_SYMBOL_GPL(ocxl_link_setup);
static void release_xsl(struct kref *ref)
{
struct ocxl_link *link = container_of(ref, struct ocxl_link, ref);
if (link->arva) {
pnv_ocxl_unmap_lpar(link->arva);
link->arva = NULL;
}
list_del(&link->list);
/* call platform code before releasing data */
pnv_ocxl_spa_release(link->platform_data);
free_link(link);
}
void ocxl_link_release(struct pci_dev *dev, void *link_handle)
{
struct ocxl_link *link = (struct ocxl_link *) link_handle;
mutex_lock(&links_list_lock);
kref_put(&link->ref, release_xsl);
mutex_unlock(&links_list_lock);
}
EXPORT_SYMBOL_GPL(ocxl_link_release);
static void arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct pe_data *pe_data = container_of(mn, struct pe_data, mmu_notifier);
struct ocxl_link *link = pe_data->link;
unsigned long addr, pid, page_size = PAGE_SIZE;
pid = mm->context.id;
trace_ocxl_mmu_notifier_range(start, end, pid);
spin_lock(&link->atsd_lock);
for (addr = start; addr < end; addr += page_size)
pnv_ocxl_tlb_invalidate(link->arva, pid, addr, page_size);
spin_unlock(&link->atsd_lock);
}
static const struct mmu_notifier_ops ocxl_mmu_notifier_ops = {
.arch_invalidate_secondary_tlbs = arch_invalidate_secondary_tlbs,
};
static u64 calculate_cfg_state(bool kernel)
{
u64 state;
state = SPA_CFG_DR;
if (mfspr(SPRN_LPCR) & LPCR_TC)
state |= SPA_CFG_TC;
if (radix_enabled())
state |= SPA_CFG_XLAT_ror;
else
state |= SPA_CFG_XLAT_hpt;
state |= SPA_CFG_HV;
if (kernel) {
if (mfmsr() & MSR_SF)
state |= SPA_CFG_SF;
} else {
state |= SPA_CFG_PR;
if (!test_tsk_thread_flag(current, TIF_32BIT))
state |= SPA_CFG_SF;
}
return state;
}
int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
u64 amr, u16 bdf, struct mm_struct *mm,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data)
{
struct ocxl_link *link = (struct ocxl_link *) link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
int pe_handle, rc = 0;
struct pe_data *pe_data;
BUILD_BUG_ON(sizeof(struct ocxl_process_element) != 128);
if (pasid > SPA_PASID_MAX)
return -EINVAL;
mutex_lock(&spa->spa_lock);
pe_handle = pasid & SPA_PE_MASK;
pe = spa->spa_mem + pe_handle;
if (pe->software_state) {
rc = -EBUSY;
goto unlock;
}
pe_data = kmalloc(sizeof(*pe_data), GFP_KERNEL);
if (!pe_data) {
rc = -ENOMEM;
goto unlock;
}
pe_data->mm = mm;
pe_data->xsl_err_cb = xsl_err_cb;
pe_data->xsl_err_data = xsl_err_data;
pe_data->link = link;
pe_data->mmu_notifier.ops = &ocxl_mmu_notifier_ops;
memset(pe, 0, sizeof(struct ocxl_process_element));
pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0));
pe->pasid = cpu_to_be32(pasid << (31 - 19));
pe->bdf = cpu_to_be16(bdf);
pe->lpid = cpu_to_be32(mfspr(SPRN_LPID));
pe->pid = cpu_to_be32(pidr);
pe->tid = cpu_to_be32(tidr);
pe->amr = cpu_to_be64(amr);
pe->software_state = cpu_to_be32(SPA_PE_VALID);
/*
* For user contexts, register a copro so that TLBIs are seen
* by the nest MMU. If we have a kernel context, TLBIs are
* already global.
*/
if (mm) {
mm_context_add_copro(mm);
if (link->arva) {
/* Use MMIO registers for the TLB Invalidate
* operations.
*/
trace_ocxl_init_mmu_notifier(pasid, mm->context.id);
mmu_notifier_register(&pe_data->mmu_notifier, mm);
}
}
/*
* Barrier is to make sure PE is visible in the SPA before it
* is used by the device. It also helps with the global TLBI
* invalidation
*/
mb();
radix_tree_insert(&spa->pe_tree, pe_handle, pe_data);
/*
* The mm must stay valid for as long as the device uses it. We
* lower the count when the context is removed from the SPA.
*
* We grab mm_count (and not mm_users), as we don't want to
* end up in a circular dependency if a process mmaps its
* mmio, therefore incrementing the file ref count when
* calling mmap(), and forgets to unmap before exiting. In
* that scenario, when the kernel handles the death of the
* process, the file is not cleaned because unmap was not
* called, and the mm wouldn't be freed because we would still
* have a reference on mm_users. Incrementing mm_count solves
* the problem.
*/
if (mm)
mmgrab(mm);
trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr);
unlock:
mutex_unlock(&spa->spa_lock);
return rc;
}
EXPORT_SYMBOL_GPL(ocxl_link_add_pe);
int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
{
struct ocxl_link *link = (struct ocxl_link *) link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
int pe_handle, rc;
if (pasid > SPA_PASID_MAX)
return -EINVAL;
pe_handle = pasid & SPA_PE_MASK;
pe = spa->spa_mem + pe_handle;
mutex_lock(&spa->spa_lock);
pe->tid = cpu_to_be32(tid);
/*
* The barrier makes sure the PE is updated
* before we clear the NPU context cache below, so that the
* old PE cannot be reloaded erroneously.
*/
mb();
/*
* hook to platform code
* On powerpc, the entry needs to be cleared from the context
* cache of the NPU.
*/
rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle);
WARN_ON(rc);
mutex_unlock(&spa->spa_lock);
return rc;
}
int ocxl_link_remove_pe(void *link_handle, int pasid)
{
struct ocxl_link *link = (struct ocxl_link *) link_handle;
struct spa *spa = link->spa;
struct ocxl_process_element *pe;
struct pe_data *pe_data;
int pe_handle, rc;
if (pasid > SPA_PASID_MAX)
return -EINVAL;
/*
* About synchronization with our memory fault handler:
*
* Before removing the PE, the driver is supposed to have
* notified the AFU, which should have cleaned up and make
* sure the PASID is no longer in use, including pending
* interrupts. However, there's no way to be sure...
*
* We clear the PE and remove the context from our radix
* tree. From that point on, any new interrupt for that
* context will fail silently, which is ok. As mentioned
* above, that's not expected, but it could happen if the
* driver or AFU didn't do the right thing.
*
* There could still be a bottom half running, but we don't
* need to wait/flush, as it is managing a reference count on
* the mm it reads from the radix tree.
*/
pe_handle = pasid & SPA_PE_MASK;
pe = spa->spa_mem + pe_handle;
mutex_lock(&spa->spa_lock);
if (!(be32_to_cpu(pe->software_state) & SPA_PE_VALID)) {
rc = -EINVAL;
goto unlock;
}
trace_ocxl_context_remove(current->pid, spa->spa_mem, pasid,
be32_to_cpu(pe->pid), be32_to_cpu(pe->tid));
memset(pe, 0, sizeof(struct ocxl_process_element));
/*
* The barrier makes sure the PE is removed from the SPA
* before we clear the NPU context cache below, so that the
* old PE cannot be reloaded erroneously.
*/
mb();
/*
* hook to platform code
* On powerpc, the entry needs to be cleared from the context
* cache of the NPU.
*/
rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle);
WARN_ON(rc);
pe_data = radix_tree_delete(&spa->pe_tree, pe_handle);
if (!pe_data) {
WARN(1, "Couldn't find pe data when removing PE\n");
} else {
if (pe_data->mm) {
if (link->arva) {
trace_ocxl_release_mmu_notifier(pasid,
pe_data->mm->context.id);
mmu_notifier_unregister(&pe_data->mmu_notifier,
pe_data->mm);
spin_lock(&link->atsd_lock);
pnv_ocxl_tlb_invalidate(link->arva,
pe_data->mm->context.id,
0ull,
PAGE_SIZE);
spin_unlock(&link->atsd_lock);
}
mm_context_remove_copro(pe_data->mm);
mmdrop(pe_data->mm);
}
kfree_rcu(pe_data, rcu);
}
unlock:
mutex_unlock(&spa->spa_lock);
return rc;
}
EXPORT_SYMBOL_GPL(ocxl_link_remove_pe);
int ocxl_link_irq_alloc(void *link_handle, int *hw_irq)
{
struct ocxl_link *link = (struct ocxl_link *) link_handle;
int irq;
if (atomic_dec_if_positive(&link->irq_available) < 0)
return -ENOSPC;
irq = xive_native_alloc_irq();
if (!irq) {
atomic_inc(&link->irq_available);
return -ENXIO;
}
*hw_irq = irq;
return 0;
}
EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc);
void ocxl_link_free_irq(void *link_handle, int hw_irq)
{
struct ocxl_link *link = (struct ocxl_link *) link_handle;
xive_native_free_irq(hw_irq);
atomic_inc(&link->irq_available);
}
EXPORT_SYMBOL_GPL(ocxl_link_free_irq);
| linux-master | drivers/misc/ocxl/link.c |
// SPDX-License-Identifier: GPL-2.0+
// Copyright 2019 IBM Corp.
#include <linux/module.h>
#include "ocxl_internal.h"
/*
* Any opencapi device which wants to use this 'generic' driver should
* use the 0x062B device ID. Vendors should define the subsystem
* vendor/device ID to help differentiate devices.
*/
static const struct pci_device_id ocxl_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x062B), },
{ }
};
MODULE_DEVICE_TABLE(pci, ocxl_pci_tbl);
static int ocxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
int rc;
struct ocxl_afu *afu, *tmp;
struct ocxl_fn *fn;
struct list_head *afu_list;
fn = ocxl_function_open(dev);
if (IS_ERR(fn))
return PTR_ERR(fn);
pci_set_drvdata(dev, fn);
afu_list = ocxl_function_afu_list(fn);
list_for_each_entry_safe(afu, tmp, afu_list, list) {
// Cleanup handled within ocxl_file_register_afu()
rc = ocxl_file_register_afu(afu);
if (rc) {
dev_err(&dev->dev, "Failed to register AFU '%s' index %d",
afu->config.name, afu->config.idx);
}
}
return 0;
}
static void ocxl_remove(struct pci_dev *dev)
{
struct ocxl_fn *fn;
struct ocxl_afu *afu;
struct list_head *afu_list;
fn = pci_get_drvdata(dev);
afu_list = ocxl_function_afu_list(fn);
list_for_each_entry(afu, afu_list, list) {
ocxl_file_unregister_afu(afu);
}
ocxl_function_close(fn);
}
struct pci_driver ocxl_pci_driver = {
.name = "ocxl",
.id_table = ocxl_pci_tbl,
.probe = ocxl_probe,
.remove = ocxl_remove,
.shutdown = ocxl_remove,
};
| linux-master | drivers/misc/ocxl/pci.c |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.